2
0
mirror of https://github.com/hibiken/asynq.git synced 2024-09-20 11:05:58 +08:00

Introduce Task Results

* Added Retention Option to specify retention TTL for tasks
* Added ResultWriter as a client interface to write result data for the associated task
This commit is contained in:
Ken Hibino 2021-11-05 16:52:54 -07:00
parent 4638405cbd
commit f4ddac4dcc
33 changed files with 2099 additions and 846 deletions

View File

@ -7,7 +7,7 @@ jobs:
strategy: strategy:
matrix: matrix:
os: [ubuntu-latest] os: [ubuntu-latest]
go-version: [1.13.x, 1.14.x, 1.15.x, 1.16.x] go-version: [1.14.x, 1.15.x, 1.16.x, 1.17.x]
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
services: services:
redis: redis:

View File

@ -10,11 +10,15 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
### Changed ### Changed
- `NewTask` takes `Option` as variadic argument - `NewTask` takes `Option` as variadic argument
- Bumped minimum supported go version to 1.14 (i.e. go1.14 or higher is required).
### Added ### Added
- `Retention` option is added to allow user to specify task retention duration after completion.
- `TaskID` option is added to allow user to specify task ID. - `TaskID` option is added to allow user to specify task ID.
- `ErrTaskIDConflict` sentinel error value is added. - `ErrTaskIDConflict` sentinel error value is added.
- `ResultWriter` type is added and provided through `Task.ResultWriter` method.
- `TaskInfo` has new fields `CompletedAt`, `Result` and `Retention`.
### Removed ### Removed

View File

@ -49,7 +49,7 @@ Task queues are used as a mechanism to distribute work across multiple machines.
## Quickstart ## Quickstart
Make sure you have Go installed ([download](https://golang.org/dl/)). Version `1.13` or higher is required. Make sure you have Go installed ([download](https://golang.org/dl/)). Version `1.14` or higher is required.
Initialize your project by creating a folder and then running `go mod init github.com/your/repo` ([learn more](https://blog.golang.org/using-go-modules)) inside the folder. Then install Asynq library with the [`go get`](https://golang.org/cmd/go/#hdr-Add_dependencies_to_current_module_and_install_them) command: Initialize your project by creating a folder and then running `go mod init github.com/your/repo` ([learn more](https://blog.golang.org/using-go-modules)) inside the folder. Then install Asynq library with the [`go get`](https://golang.org/cmd/go/#hdr-Add_dependencies_to_current_module_and_install_them) command:

View File

@ -5,6 +5,7 @@
package asynq package asynq
import ( import (
"context"
"crypto/tls" "crypto/tls"
"fmt" "fmt"
"net/url" "net/url"
@ -26,11 +27,20 @@ type Task struct {
// opts holds options for the task. // opts holds options for the task.
opts []Option opts []Option
// w is the ResultWriter for the task.
w *ResultWriter
} }
func (t *Task) Type() string { return t.typename } func (t *Task) Type() string { return t.typename }
func (t *Task) Payload() []byte { return t.payload } func (t *Task) Payload() []byte { return t.payload }
// ResultWriter returns a pointer to the ResultWriter associated with the task.
//
// Nil pointer is returned if called on a newly created task (i.e. task created by calling NewTask).
// Only the tasks passed to Handler.ProcessTask have a valid ResultWriter pointer.
func (t *Task) ResultWriter() *ResultWriter { return t.w }
// NewTask returns a new Task given a type name and payload data. // NewTask returns a new Task given a type name and payload data.
// Options can be passed to configure task processing behavior. // Options can be passed to configure task processing behavior.
func NewTask(typename string, payload []byte, opts ...Option) *Task { func NewTask(typename string, payload []byte, opts ...Option) *Task {
@ -41,6 +51,15 @@ func NewTask(typename string, payload []byte, opts ...Option) *Task {
} }
} }
// newTask creates a task with the given typename, payload and ResultWriter.
func newTask(typename string, payload []byte, w *ResultWriter) *Task {
return &Task{
typename: typename,
payload: payload,
w: w,
}
}
// A TaskInfo describes a task and its metadata. // A TaskInfo describes a task and its metadata.
type TaskInfo struct { type TaskInfo struct {
// ID is the identifier of the task. // ID is the identifier of the task.
@ -81,9 +100,29 @@ type TaskInfo struct {
// NextProcessAt is the time the task is scheduled to be processed, // NextProcessAt is the time the task is scheduled to be processed,
// zero if not applicable. // zero if not applicable.
NextProcessAt time.Time NextProcessAt time.Time
// Retention is duration of the retention period after the task is successfully processed.
Retention time.Duration
// CompletedAt is the time when the task is processed successfully.
// Zero value (i.e. time.Time{}) indicates no value.
CompletedAt time.Time
// Result holds the result data associated with the task.
// Use ResultWriter to write result data from the Handler.
Result []byte
} }
func newTaskInfo(msg *base.TaskMessage, state base.TaskState, nextProcessAt time.Time) *TaskInfo { // If t is non-zero, returns time converted from t as unix time in seconds.
// If t is zero, returns zero value of time.Time.
func fromUnixTimeOrZero(t int64) time.Time {
if t == 0 {
return time.Time{}
}
return time.Unix(t, 0)
}
func newTaskInfo(msg *base.TaskMessage, state base.TaskState, nextProcessAt time.Time, result []byte) *TaskInfo {
info := TaskInfo{ info := TaskInfo{
ID: msg.ID, ID: msg.ID,
Queue: msg.Queue, Queue: msg.Queue,
@ -93,18 +132,12 @@ func newTaskInfo(msg *base.TaskMessage, state base.TaskState, nextProcessAt time
Retried: msg.Retried, Retried: msg.Retried,
LastErr: msg.ErrorMsg, LastErr: msg.ErrorMsg,
Timeout: time.Duration(msg.Timeout) * time.Second, Timeout: time.Duration(msg.Timeout) * time.Second,
Deadline: fromUnixTimeOrZero(msg.Deadline),
Retention: time.Duration(msg.Retention) * time.Second,
NextProcessAt: nextProcessAt, NextProcessAt: nextProcessAt,
} LastFailedAt: fromUnixTimeOrZero(msg.LastFailedAt),
if msg.LastFailedAt == 0 { CompletedAt: fromUnixTimeOrZero(msg.CompletedAt),
info.LastFailedAt = time.Time{} Result: result,
} else {
info.LastFailedAt = time.Unix(msg.LastFailedAt, 0)
}
if msg.Deadline == 0 {
info.Deadline = time.Time{}
} else {
info.Deadline = time.Unix(msg.Deadline, 0)
} }
switch state { switch state {
@ -118,6 +151,8 @@ func newTaskInfo(msg *base.TaskMessage, state base.TaskState, nextProcessAt time
info.State = TaskStateRetry info.State = TaskStateRetry
case base.TaskStateArchived: case base.TaskStateArchived:
info.State = TaskStateArchived info.State = TaskStateArchived
case base.TaskStateCompleted:
info.State = TaskStateCompleted
default: default:
panic(fmt.Sprintf("internal error: unknown state: %d", state)) panic(fmt.Sprintf("internal error: unknown state: %d", state))
} }
@ -142,6 +177,9 @@ const (
// Indicates that the task is archived and stored for inspection purposes. // Indicates that the task is archived and stored for inspection purposes.
TaskStateArchived TaskStateArchived
// Indicates that the task is processed successfully and retained until the retention TTL expires.
TaskStateCompleted
) )
func (s TaskState) String() string { func (s TaskState) String() string {
@ -156,6 +194,8 @@ func (s TaskState) String() string {
return "retry" return "retry"
case TaskStateArchived: case TaskStateArchived:
return "archived" return "archived"
case TaskStateCompleted:
return "completed"
} }
panic("asynq: unknown task state") panic("asynq: unknown task state")
} }
@ -440,3 +480,27 @@ func parseRedisSentinelURI(u *url.URL) (RedisConnOpt, error) {
} }
return RedisFailoverClientOpt{MasterName: master, SentinelAddrs: addrs, Password: password}, nil return RedisFailoverClientOpt{MasterName: master, SentinelAddrs: addrs, Password: password}, nil
} }
// ResultWriter is a client interface to write result data for a task.
// It writes the data to the redis instance the server is connected to.
type ResultWriter struct {
id string // task ID this writer is responsible for
qname string // queue name the task belongs to
broker base.Broker
ctx context.Context // context associated with the task
}
// Write writes the given data as a result of the task the ResultWriter is associated with.
func (w *ResultWriter) Write(data []byte) (n int, err error) {
select {
case <-w.ctx.Done():
return 0, fmt.Errorf("failed to result task result: %v", w.ctx.Err())
default:
}
return w.broker.WriteResult(w.qname, w.id, data)
}
// TaskID returns the ID of the task the ResultWriter is associated with.
func (w *ResultWriter) TaskID() string {
return w.id
}

View File

@ -46,6 +46,7 @@ const (
ProcessAtOpt ProcessAtOpt
ProcessInOpt ProcessInOpt
TaskIDOpt TaskIDOpt
RetentionOpt
) )
// Option specifies the task processing behavior. // Option specifies the task processing behavior.
@ -70,6 +71,7 @@ type (
uniqueOption time.Duration uniqueOption time.Duration
processAtOption time.Time processAtOption time.Time
processInOption time.Duration processInOption time.Duration
retentionOption time.Duration
) )
// MaxRetry returns an option to specify the max number of times // MaxRetry returns an option to specify the max number of times
@ -178,6 +180,17 @@ func (d processInOption) String() string { return fmt.Sprintf("ProcessIn(%v)
func (d processInOption) Type() OptionType { return ProcessInOpt } func (d processInOption) Type() OptionType { return ProcessInOpt }
func (d processInOption) Value() interface{} { return time.Duration(d) } func (d processInOption) Value() interface{} { return time.Duration(d) }
// Retention returns an option to specify the duration of retention period for the task.
// If this option is provided, the task will be stored as a completed task after successful processing.
// A completed task will be deleted after the specified duration elapses.
func Retention(d time.Duration) Option {
return retentionOption(d)
}
func (ttl retentionOption) String() string { return fmt.Sprintf("Retention(%v)", time.Duration(ttl)) }
func (ttl retentionOption) Type() OptionType { return RetentionOpt }
func (ttl retentionOption) Value() interface{} { return time.Duration(ttl) }
// ErrDuplicateTask indicates that the given task could not be enqueued since it's a duplicate of another task. // ErrDuplicateTask indicates that the given task could not be enqueued since it's a duplicate of another task.
// //
// ErrDuplicateTask error only applies to tasks enqueued with a Unique option. // ErrDuplicateTask error only applies to tasks enqueued with a Unique option.
@ -196,6 +209,7 @@ type option struct {
deadline time.Time deadline time.Time
uniqueTTL time.Duration uniqueTTL time.Duration
processAt time.Time processAt time.Time
retention time.Duration
} }
// composeOptions merges user provided options into the default options // composeOptions merges user provided options into the default options
@ -237,6 +251,8 @@ func composeOptions(opts ...Option) (option, error) {
res.processAt = time.Time(opt) res.processAt = time.Time(opt)
case processInOption: case processInOption:
res.processAt = time.Now().Add(time.Duration(opt)) res.processAt = time.Now().Add(time.Duration(opt))
case retentionOption:
res.retention = time.Duration(opt)
default: default:
// ignore unexpected option // ignore unexpected option
} }
@ -316,6 +332,7 @@ func (c *Client) Enqueue(task *Task, opts ...Option) (*TaskInfo, error) {
Deadline: deadline.Unix(), Deadline: deadline.Unix(),
Timeout: int64(timeout.Seconds()), Timeout: int64(timeout.Seconds()),
UniqueKey: uniqueKey, UniqueKey: uniqueKey,
Retention: int64(opt.retention.Seconds()),
} }
now := time.Now() now := time.Now()
var state base.TaskState var state base.TaskState
@ -335,7 +352,7 @@ func (c *Client) Enqueue(task *Task, opts ...Option) (*TaskInfo, error) {
case err != nil: case err != nil:
return nil, err return nil, err
} }
return newTaskInfo(msg, state, opt.processAt), nil return newTaskInfo(msg, state, opt.processAt, nil), nil
} }
func (c *Client) enqueue(msg *base.TaskMessage, uniqueTTL time.Duration) error { func (c *Client) enqueue(msg *base.TaskMessage, uniqueTTL time.Duration) error {

View File

@ -416,6 +416,40 @@ func TestClientEnqueue(t *testing.T) {
}, },
}, },
}, },
{
desc: "With Retention option",
task: task,
opts: []Option{
Retention(24 * time.Hour),
},
wantInfo: &TaskInfo{
Queue: "default",
Type: task.Type(),
Payload: task.Payload(),
State: TaskStatePending,
MaxRetry: defaultMaxRetry,
Retried: 0,
LastErr: "",
LastFailedAt: time.Time{},
Timeout: defaultTimeout,
Deadline: time.Time{},
NextProcessAt: now,
Retention: 24 * time.Hour,
},
wantPending: map[string][]*base.TaskMessage{
"default": {
{
Type: task.Type(),
Payload: task.Payload(),
Retry: defaultMaxRetry,
Queue: "default",
Timeout: int64(defaultTimeout.Seconds()),
Deadline: noDeadline.Unix(),
Retention: int64((24 * time.Hour).Seconds()),
},
},
},
},
} }
for _, tc := range tests { for _, tc := range tests {

25
go.sum
View File

@ -1,22 +1,30 @@
cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ=
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473 h1:4cmBvAEBNJaGARUEs3/suWRyfyBfhf7I60WBZq+bv2w=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/go-redis/redis/v8 v8.11.2 h1:WqlSpAwz8mxDSMCvbyz1Mkiqe0LE5OY4j3lgkvu1Ts0= github.com/go-redis/redis/v8 v8.11.2 h1:WqlSpAwz8mxDSMCvbyz1Mkiqe0LE5OY4j3lgkvu1Ts0=
github.com/go-redis/redis/v8 v8.11.2/go.mod h1:DLomh7y2e3ggQXQLd1YgmvIfecPJoFl7WU5SOQ/r06M= github.com/go-redis/redis/v8 v8.11.2/go.mod h1:DLomh7y2e3ggQXQLd1YgmvIfecPJoFl7WU5SOQ/r06M=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/mock v1.1.1 h1:G5FRp8JnTd7RQH5kemVNlMeyXQAztQ3mOWV95KxsXH8=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
@ -37,9 +45,11 @@ github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs= github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs=
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1 h1:VkoXIwSboBpnk99O/KFauAEILuNHv5DVFKZMBN/gUgw=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
@ -55,25 +65,32 @@ github.com/onsi/gomega v1.10.5 h1:7n6FEkpFmfCoo2t+YYqXH0evK+a9ICQz0xcAy9dYcaQ=
github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48= github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng=
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/yuin/goldmark v1.2.1 h1:ruQGxdhGHe7FWOJPT0mKs5+pD2Xs1Bm/kdGlHO04FmM=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.uber.org/goleak v0.10.0 h1:G3eWbSNIskeRqtsN/1uI5B+eP73y3JUuBsv9AZjehb4= go.uber.org/goleak v0.10.0 h1:G3eWbSNIskeRqtsN/1uI5B+eP73y3JUuBsv9AZjehb4=
go.uber.org/goleak v0.10.0/go.mod h1:VCZuO8V8mFPlL0F5J5GK1rtHV3DrFcQ1R8ryq7FK0aI= go.uber.org/goleak v0.10.0/go.mod h1:VCZuO8V8mFPlL0F5J5GK1rtHV3DrFcQ1R8ryq7FK0aI=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4 h1:c2HOrn5iMezYjSlGPncknSEr/8x5LELb/ilJbXi9DEA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3 h1:XQyxROzUlZH+WIQwySDgnISgOivlhjIEwaQaJEJrrN0=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -86,10 +103,12 @@ golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb h1:eBmm0M9fYhWpKZLjQUUKka/LtIxf46G4fxeEz5KJr9U= golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb h1:eBmm0M9fYhWpKZLjQUUKka/LtIxf46G4fxeEz5KJr9U=
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 h1:SQFwaSi55rU7vdNs9Yr0Z324VNlrF+0wMqRXT4St8ck=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -113,6 +132,7 @@ golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e h1:4nW4NLDYnU28ojHaHO8OVxFHk/aQ33U01a9cjED+pzE=
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@ -120,12 +140,15 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.27.0 h1:rRYRFMVgRv6E0D70Skyfsr28tDXIuuPZyWGMPdMcnXg=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
@ -140,6 +163,7 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
@ -149,4 +173,5 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc h1:/hemPrYIhOhy8zYrNj+069zDB68us2sMGsfkFJO0iZs=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

View File

@ -66,6 +66,8 @@ type QueueInfo struct {
Retry int Retry int
// Number of archived tasks. // Number of archived tasks.
Archived int Archived int
// Number of stored completed tasks.
Completed int
// Total number of tasks being processed during the given date. // Total number of tasks being processed during the given date.
// The number includes both succeeded and failed tasks. // The number includes both succeeded and failed tasks.
@ -99,6 +101,7 @@ func (i *Inspector) GetQueueInfo(qname string) (*QueueInfo, error) {
Scheduled: stats.Scheduled, Scheduled: stats.Scheduled,
Retry: stats.Retry, Retry: stats.Retry,
Archived: stats.Archived, Archived: stats.Archived,
Completed: stats.Completed,
Processed: stats.Processed, Processed: stats.Processed,
Failed: stats.Failed, Failed: stats.Failed,
Paused: stats.Paused, Paused: stats.Paused,
@ -186,7 +189,7 @@ func (i *Inspector) GetTaskInfo(qname, id string) (*TaskInfo, error) {
case err != nil: case err != nil:
return nil, fmt.Errorf("asynq: %v", err) return nil, fmt.Errorf("asynq: %v", err)
} }
return newTaskInfo(info.Message, info.State, info.NextProcessAt), nil return newTaskInfo(info.Message, info.State, info.NextProcessAt, info.Result), nil
} }
// ListOption specifies behavior of list operation. // ListOption specifies behavior of list operation.
@ -259,17 +262,21 @@ func (i *Inspector) ListPendingTasks(qname string, opts ...ListOption) ([]*TaskI
} }
opt := composeListOptions(opts...) opt := composeListOptions(opts...)
pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1} pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1}
msgs, err := i.rdb.ListPending(qname, pgn) infos, err := i.rdb.ListPending(qname, pgn)
switch { switch {
case errors.IsQueueNotFound(err): case errors.IsQueueNotFound(err):
return nil, fmt.Errorf("asynq: %w", ErrQueueNotFound) return nil, fmt.Errorf("asynq: %w", ErrQueueNotFound)
case err != nil: case err != nil:
return nil, fmt.Errorf("asynq: %v", err) return nil, fmt.Errorf("asynq: %v", err)
} }
now := time.Now()
var tasks []*TaskInfo var tasks []*TaskInfo
for _, m := range msgs { for _, i := range infos {
tasks = append(tasks, newTaskInfo(m, base.TaskStatePending, now)) tasks = append(tasks, newTaskInfo(
i.Message,
i.State,
i.NextProcessAt,
i.Result,
))
} }
return tasks, err return tasks, err
} }
@ -283,7 +290,7 @@ func (i *Inspector) ListActiveTasks(qname string, opts ...ListOption) ([]*TaskIn
} }
opt := composeListOptions(opts...) opt := composeListOptions(opts...)
pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1} pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1}
msgs, err := i.rdb.ListActive(qname, pgn) infos, err := i.rdb.ListActive(qname, pgn)
switch { switch {
case errors.IsQueueNotFound(err): case errors.IsQueueNotFound(err):
return nil, fmt.Errorf("asynq: %w", ErrQueueNotFound) return nil, fmt.Errorf("asynq: %w", ErrQueueNotFound)
@ -291,8 +298,13 @@ func (i *Inspector) ListActiveTasks(qname string, opts ...ListOption) ([]*TaskIn
return nil, fmt.Errorf("asynq: %v", err) return nil, fmt.Errorf("asynq: %v", err)
} }
var tasks []*TaskInfo var tasks []*TaskInfo
for _, m := range msgs { for _, i := range infos {
tasks = append(tasks, newTaskInfo(m, base.TaskStateActive, time.Time{})) tasks = append(tasks, newTaskInfo(
i.Message,
i.State,
i.NextProcessAt,
i.Result,
))
} }
return tasks, err return tasks, err
} }
@ -307,7 +319,7 @@ func (i *Inspector) ListScheduledTasks(qname string, opts ...ListOption) ([]*Tas
} }
opt := composeListOptions(opts...) opt := composeListOptions(opts...)
pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1} pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1}
zs, err := i.rdb.ListScheduled(qname, pgn) infos, err := i.rdb.ListScheduled(qname, pgn)
switch { switch {
case errors.IsQueueNotFound(err): case errors.IsQueueNotFound(err):
return nil, fmt.Errorf("asynq: %w", ErrQueueNotFound) return nil, fmt.Errorf("asynq: %w", ErrQueueNotFound)
@ -315,11 +327,12 @@ func (i *Inspector) ListScheduledTasks(qname string, opts ...ListOption) ([]*Tas
return nil, fmt.Errorf("asynq: %v", err) return nil, fmt.Errorf("asynq: %v", err)
} }
var tasks []*TaskInfo var tasks []*TaskInfo
for _, z := range zs { for _, i := range infos {
tasks = append(tasks, newTaskInfo( tasks = append(tasks, newTaskInfo(
z.Message, i.Message,
base.TaskStateScheduled, i.State,
time.Unix(z.Score, 0), i.NextProcessAt,
i.Result,
)) ))
} }
return tasks, nil return tasks, nil
@ -335,7 +348,7 @@ func (i *Inspector) ListRetryTasks(qname string, opts ...ListOption) ([]*TaskInf
} }
opt := composeListOptions(opts...) opt := composeListOptions(opts...)
pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1} pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1}
zs, err := i.rdb.ListRetry(qname, pgn) infos, err := i.rdb.ListRetry(qname, pgn)
switch { switch {
case errors.IsQueueNotFound(err): case errors.IsQueueNotFound(err):
return nil, fmt.Errorf("asynq: %w", ErrQueueNotFound) return nil, fmt.Errorf("asynq: %w", ErrQueueNotFound)
@ -343,11 +356,12 @@ func (i *Inspector) ListRetryTasks(qname string, opts ...ListOption) ([]*TaskInf
return nil, fmt.Errorf("asynq: %v", err) return nil, fmt.Errorf("asynq: %v", err)
} }
var tasks []*TaskInfo var tasks []*TaskInfo
for _, z := range zs { for _, i := range infos {
tasks = append(tasks, newTaskInfo( tasks = append(tasks, newTaskInfo(
z.Message, i.Message,
base.TaskStateRetry, i.State,
time.Unix(z.Score, 0), i.NextProcessAt,
i.Result,
)) ))
} }
return tasks, nil return tasks, nil
@ -363,7 +377,7 @@ func (i *Inspector) ListArchivedTasks(qname string, opts ...ListOption) ([]*Task
} }
opt := composeListOptions(opts...) opt := composeListOptions(opts...)
pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1} pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1}
zs, err := i.rdb.ListArchived(qname, pgn) infos, err := i.rdb.ListArchived(qname, pgn)
switch { switch {
case errors.IsQueueNotFound(err): case errors.IsQueueNotFound(err):
return nil, fmt.Errorf("asynq: %w", ErrQueueNotFound) return nil, fmt.Errorf("asynq: %w", ErrQueueNotFound)
@ -371,11 +385,41 @@ func (i *Inspector) ListArchivedTasks(qname string, opts ...ListOption) ([]*Task
return nil, fmt.Errorf("asynq: %v", err) return nil, fmt.Errorf("asynq: %v", err)
} }
var tasks []*TaskInfo var tasks []*TaskInfo
for _, z := range zs { for _, i := range infos {
tasks = append(tasks, newTaskInfo( tasks = append(tasks, newTaskInfo(
z.Message, i.Message,
base.TaskStateArchived, i.State,
time.Time{}, i.NextProcessAt,
i.Result,
))
}
return tasks, nil
}
// ListCompletedTasks retrieves completed tasks from the specified queue.
// Tasks are sorted by expiration time (i.e. CompletedAt + Retention) in descending order.
//
// By default, it retrieves the first 30 tasks.
func (i *Inspector) ListCompletedTasks(qname string, opts ...ListOption) ([]*TaskInfo, error) {
if err := base.ValidateQueueName(qname); err != nil {
return nil, fmt.Errorf("asynq: %v", err)
}
opt := composeListOptions(opts...)
pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1}
infos, err := i.rdb.ListCompleted(qname, pgn)
switch {
case errors.IsQueueNotFound(err):
return nil, fmt.Errorf("asynq: %w", ErrQueueNotFound)
case err != nil:
return nil, fmt.Errorf("asynq: %v", err)
}
var tasks []*TaskInfo
for _, i := range infos {
tasks = append(tasks, newTaskInfo(
i.Message,
i.State,
i.NextProcessAt,
i.Result,
)) ))
} }
return tasks, nil return tasks, nil
@ -421,6 +465,16 @@ func (i *Inspector) DeleteAllArchivedTasks(qname string) (int, error) {
return int(n), err return int(n), err
} }
// DeleteAllCompletedTasks deletes all completed tasks from the specified queue,
// and reports the number tasks deleted.
func (i *Inspector) DeleteAllCompletedTasks(qname string) (int, error) {
if err := base.ValidateQueueName(qname); err != nil {
return 0, err
}
n, err := i.rdb.DeleteAllCompletedTasks(qname)
return int(n), err
}
// DeleteTask deletes a task with the given id from the given queue. // DeleteTask deletes a task with the given id from the given queue.
// The task needs to be in pending, scheduled, retry, or archived state, // The task needs to be in pending, scheduled, retry, or archived state,
// otherwise DeleteTask will return an error. // otherwise DeleteTask will return an error.
@ -790,6 +844,12 @@ func parseOption(s string) (Option, error) {
return nil, err return nil, err
} }
return ProcessIn(d), nil return ProcessIn(d), nil
case "Retention":
d, err := time.ParseDuration(arg)
if err != nil {
return nil, err
}
return Retention(d), nil
default: default:
return nil, fmt.Errorf("cannot not parse option string %q", s) return nil, fmt.Errorf("cannot not parse option string %q", s)
} }

View File

@ -276,6 +276,7 @@ func TestInspectorGetQueueInfo(t *testing.T) {
scheduled map[string][]base.Z scheduled map[string][]base.Z
retry map[string][]base.Z retry map[string][]base.Z
archived map[string][]base.Z archived map[string][]base.Z
completed map[string][]base.Z
processed map[string]int processed map[string]int
failed map[string]int failed map[string]int
qname string qname string
@ -310,6 +311,11 @@ func TestInspectorGetQueueInfo(t *testing.T) {
"critical": {}, "critical": {},
"low": {}, "low": {},
}, },
completed: map[string][]base.Z{
"default": {},
"critical": {},
"low": {},
},
processed: map[string]int{ processed: map[string]int{
"default": 120, "default": 120,
"critical": 100, "critical": 100,
@ -329,6 +335,7 @@ func TestInspectorGetQueueInfo(t *testing.T) {
Scheduled: 2, Scheduled: 2,
Retry: 0, Retry: 0,
Archived: 0, Archived: 0,
Completed: 0,
Processed: 120, Processed: 120,
Failed: 2, Failed: 2,
Paused: false, Paused: false,
@ -344,6 +351,7 @@ func TestInspectorGetQueueInfo(t *testing.T) {
h.SeedAllScheduledQueues(t, r, tc.scheduled) h.SeedAllScheduledQueues(t, r, tc.scheduled)
h.SeedAllRetryQueues(t, r, tc.retry) h.SeedAllRetryQueues(t, r, tc.retry)
h.SeedAllArchivedQueues(t, r, tc.archived) h.SeedAllArchivedQueues(t, r, tc.archived)
h.SeedAllCompletedQueues(t, r, tc.completed)
for qname, n := range tc.processed { for qname, n := range tc.processed {
processedKey := base.ProcessedKey(qname, now) processedKey := base.ProcessedKey(qname, now)
r.Set(context.Background(), processedKey, n, 0) r.Set(context.Background(), processedKey, n, 0)
@ -424,7 +432,7 @@ func TestInspectorHistory(t *testing.T) {
} }
func createPendingTask(msg *base.TaskMessage) *TaskInfo { func createPendingTask(msg *base.TaskMessage) *TaskInfo {
return newTaskInfo(msg, base.TaskStatePending, time.Now()) return newTaskInfo(msg, base.TaskStatePending, time.Now(), nil)
} }
func TestInspectorGetTaskInfo(t *testing.T) { func TestInspectorGetTaskInfo(t *testing.T) {
@ -489,6 +497,7 @@ func TestInspectorGetTaskInfo(t *testing.T) {
m1, m1,
base.TaskStateActive, base.TaskStateActive,
time.Time{}, // zero value for n/a time.Time{}, // zero value for n/a
nil,
), ),
}, },
{ {
@ -498,6 +507,7 @@ func TestInspectorGetTaskInfo(t *testing.T) {
m2, m2,
base.TaskStateScheduled, base.TaskStateScheduled,
fiveMinsFromNow, fiveMinsFromNow,
nil,
), ),
}, },
{ {
@ -507,6 +517,7 @@ func TestInspectorGetTaskInfo(t *testing.T) {
m3, m3,
base.TaskStateRetry, base.TaskStateRetry,
oneHourFromNow, oneHourFromNow,
nil,
), ),
}, },
{ {
@ -516,6 +527,7 @@ func TestInspectorGetTaskInfo(t *testing.T) {
m4, m4,
base.TaskStateArchived, base.TaskStateArchived,
time.Time{}, // zero value for n/a time.Time{}, // zero value for n/a
nil,
), ),
}, },
{ {
@ -525,6 +537,7 @@ func TestInspectorGetTaskInfo(t *testing.T) {
m5, m5,
base.TaskStatePending, base.TaskStatePending,
now, now,
nil,
), ),
}, },
} }
@ -722,8 +735,8 @@ func TestInspectorListActiveTasks(t *testing.T) {
}, },
qname: "default", qname: "default",
want: []*TaskInfo{ want: []*TaskInfo{
newTaskInfo(m1, base.TaskStateActive, time.Time{}), newTaskInfo(m1, base.TaskStateActive, time.Time{}, nil),
newTaskInfo(m2, base.TaskStateActive, time.Time{}), newTaskInfo(m2, base.TaskStateActive, time.Time{}, nil),
}, },
}, },
} }
@ -749,6 +762,7 @@ func createScheduledTask(z base.Z) *TaskInfo {
z.Message, z.Message,
base.TaskStateScheduled, base.TaskStateScheduled,
time.Unix(z.Score, 0), time.Unix(z.Score, 0),
nil,
) )
} }
@ -818,6 +832,7 @@ func createRetryTask(z base.Z) *TaskInfo {
z.Message, z.Message,
base.TaskStateRetry, base.TaskStateRetry,
time.Unix(z.Score, 0), time.Unix(z.Score, 0),
nil,
) )
} }
@ -888,6 +903,7 @@ func createArchivedTask(z base.Z) *TaskInfo {
z.Message, z.Message,
base.TaskStateArchived, base.TaskStateArchived,
time.Time{}, // zero value for n/a time.Time{}, // zero value for n/a
nil,
) )
} }
@ -952,6 +968,83 @@ func TestInspectorListArchivedTasks(t *testing.T) {
} }
} }
func newCompletedTaskMessage(typename, qname string, retention time.Duration, completedAt time.Time) *base.TaskMessage {
msg := h.NewTaskMessageWithQueue(typename, nil, qname)
msg.Retention = int64(retention.Seconds())
msg.CompletedAt = completedAt.Unix()
return msg
}
func createCompletedTask(z base.Z) *TaskInfo {
return newTaskInfo(
z.Message,
base.TaskStateCompleted,
time.Time{}, // zero value for n/a
nil, // TODO: Test with result data
)
}
func TestInspectorListCompletedTasks(t *testing.T) {
r := setup(t)
defer r.Close()
now := time.Now()
m1 := newCompletedTaskMessage("task1", "default", 1*time.Hour, now.Add(-3*time.Minute)) // Expires in 57 mins
m2 := newCompletedTaskMessage("task2", "default", 30*time.Minute, now.Add(-10*time.Minute)) // Expires in 20 mins
m3 := newCompletedTaskMessage("task3", "default", 2*time.Hour, now.Add(-30*time.Minute)) // Expires in 90 mins
m4 := newCompletedTaskMessage("task4", "custom", 15*time.Minute, now.Add(-2*time.Minute)) // Expires in 13 mins
z1 := base.Z{Message: m1, Score: m1.CompletedAt + m1.Retention}
z2 := base.Z{Message: m2, Score: m2.CompletedAt + m2.Retention}
z3 := base.Z{Message: m3, Score: m3.CompletedAt + m3.Retention}
z4 := base.Z{Message: m4, Score: m4.CompletedAt + m4.Retention}
inspector := NewInspector(getRedisConnOpt(t))
tests := []struct {
desc string
completed map[string][]base.Z
qname string
want []*TaskInfo
}{
{
desc: "with a few completed tasks",
completed: map[string][]base.Z{
"default": {z1, z2, z3},
"custom": {z4},
},
qname: "default",
// Should be sorted by expiration time (CompletedAt + Retention).
want: []*TaskInfo{
createCompletedTask(z2),
createCompletedTask(z1),
createCompletedTask(z3),
},
},
{
desc: "with empty completed queue",
completed: map[string][]base.Z{
"default": {},
},
qname: "default",
want: []*TaskInfo(nil),
},
}
for _, tc := range tests {
h.FlushDB(t, r)
h.SeedAllCompletedQueues(t, r, tc.completed)
got, err := inspector.ListCompletedTasks(tc.qname)
if err != nil {
t.Errorf("%s; ListCompletedTasks(%q) returned error: %v", tc.desc, tc.qname, err)
continue
}
if diff := cmp.Diff(tc.want, got, cmp.AllowUnexported(TaskInfo{})); diff != "" {
t.Errorf("%s; ListCompletedTasks(%q) = %v, want %v; (-want,+got)\n%s",
tc.desc, tc.qname, got, tc.want, diff)
}
}
}
func TestInspectorListPagination(t *testing.T) { func TestInspectorListPagination(t *testing.T) {
// Create 100 tasks. // Create 100 tasks.
var msgs []*base.TaskMessage var msgs []*base.TaskMessage
@ -1050,6 +1143,9 @@ func TestInspectorListTasksQueueNotFoundError(t *testing.T) {
if _, err := inspector.ListArchivedTasks(tc.qname); !errors.Is(err, tc.wantErr) { if _, err := inspector.ListArchivedTasks(tc.qname); !errors.Is(err, tc.wantErr) {
t.Errorf("ListArchivedTasks(%q) returned error %v, want %v", tc.qname, err, tc.wantErr) t.Errorf("ListArchivedTasks(%q) returned error %v, want %v", tc.qname, err, tc.wantErr)
} }
if _, err := inspector.ListCompletedTasks(tc.qname); !errors.Is(err, tc.wantErr) {
t.Errorf("ListCompletedTasks(%q) returned error %v, want %v", tc.qname, err, tc.wantErr)
}
} }
} }
@ -1315,6 +1411,72 @@ func TestInspectorDeleteAllArchivedTasks(t *testing.T) {
} }
} }
func TestInspectorDeleteAllCompletedTasks(t *testing.T) {
r := setup(t)
defer r.Close()
now := time.Now()
m1 := newCompletedTaskMessage("task1", "default", 30*time.Minute, now.Add(-2*time.Minute))
m2 := newCompletedTaskMessage("task2", "default", 30*time.Minute, now.Add(-5*time.Minute))
m3 := newCompletedTaskMessage("task3", "default", 30*time.Minute, now.Add(-10*time.Minute))
m4 := newCompletedTaskMessage("task4", "custom", 30*time.Minute, now.Add(-3*time.Minute))
z1 := base.Z{Message: m1, Score: m1.CompletedAt + m1.Retention}
z2 := base.Z{Message: m2, Score: m2.CompletedAt + m2.Retention}
z3 := base.Z{Message: m3, Score: m3.CompletedAt + m3.Retention}
z4 := base.Z{Message: m4, Score: m4.CompletedAt + m4.Retention}
inspector := NewInspector(getRedisConnOpt(t))
tests := []struct {
completed map[string][]base.Z
qname string
want int
wantCompleted map[string][]base.Z
}{
{
completed: map[string][]base.Z{
"default": {z1, z2, z3},
"custom": {z4},
},
qname: "default",
want: 3,
wantCompleted: map[string][]base.Z{
"default": {},
"custom": {z4},
},
},
{
completed: map[string][]base.Z{
"default": {},
},
qname: "default",
want: 0,
wantCompleted: map[string][]base.Z{
"default": {},
},
},
}
for _, tc := range tests {
h.FlushDB(t, r)
h.SeedAllCompletedQueues(t, r, tc.completed)
got, err := inspector.DeleteAllCompletedTasks(tc.qname)
if err != nil {
t.Errorf("DeleteAllCompletedTasks(%q) returned error: %v", tc.qname, err)
continue
}
if got != tc.want {
t.Errorf("DeleteAllCompletedTasks(%q) = %d, want %d", tc.qname, got, tc.want)
}
for qname, want := range tc.wantCompleted {
gotCompleted := h.GetCompletedEntries(t, r, qname)
if diff := cmp.Diff(want, gotCompleted, h.SortZSetEntryOpt); diff != "" {
t.Errorf("unexpected completed tasks in queue %q: (-want, +got)\n%s", qname, diff)
}
}
}
}
func TestInspectorArchiveAllPendingTasks(t *testing.T) { func TestInspectorArchiveAllPendingTasks(t *testing.T) {
r := setup(t) r := setup(t)
defer r.Close() defer r.Close()
@ -3034,6 +3196,7 @@ func TestParseOption(t *testing.T) {
{`Unique(1h)`, UniqueOpt, 1 * time.Hour}, {`Unique(1h)`, UniqueOpt, 1 * time.Hour},
{ProcessAt(oneHourFromNow).String(), ProcessAtOpt, oneHourFromNow}, {ProcessAt(oneHourFromNow).String(), ProcessAtOpt, oneHourFromNow},
{`ProcessIn(10m)`, ProcessInOpt, 10 * time.Minute}, {`ProcessIn(10m)`, ProcessInOpt, 10 * time.Minute},
{`Retention(24h)`, RetentionOpt, 24 * time.Hour},
} }
for _, tc := range tests { for _, tc := range tests {
@ -3065,7 +3228,7 @@ func TestParseOption(t *testing.T) {
if gotVal != tc.wantVal.(int) { if gotVal != tc.wantVal.(int) {
t.Fatalf("got value %v, want %v", gotVal, tc.wantVal) t.Fatalf("got value %v, want %v", gotVal, tc.wantVal)
} }
case TimeoutOpt, UniqueOpt, ProcessInOpt: case TimeoutOpt, UniqueOpt, ProcessInOpt, RetentionOpt:
gotVal, ok := got.Value().(time.Duration) gotVal, ok := got.Value().(time.Duration)
if !ok { if !ok {
t.Fatal("returned Option with non duration value") t.Fatal("returned Option with non duration value")

View File

@ -139,6 +139,12 @@ func TaskMessageWithError(t base.TaskMessage, errMsg string, failedAt time.Time)
return &t return &t
} }
// TaskMessageWithCompletedAt returns an updated copy of t after completion.
func TaskMessageWithCompletedAt(t base.TaskMessage, completedAt time.Time) *base.TaskMessage {
t.CompletedAt = completedAt.Unix()
return &t
}
// MustMarshal marshals given task message and returns a json string. // MustMarshal marshals given task message and returns a json string.
// Calling test will fail if marshaling errors out. // Calling test will fail if marshaling errors out.
func MustMarshal(tb testing.TB, msg *base.TaskMessage) string { func MustMarshal(tb testing.TB, msg *base.TaskMessage) string {
@ -224,6 +230,13 @@ func SeedDeadlines(tb testing.TB, r redis.UniversalClient, entries []base.Z, qna
seedRedisZSet(tb, r, base.DeadlinesKey(qname), entries, base.TaskStateActive) seedRedisZSet(tb, r, base.DeadlinesKey(qname), entries, base.TaskStateActive)
} }
// SeedCompletedQueue initializes the completed set witht the given entries.
func SeedCompletedQueue(tb testing.TB, r redis.UniversalClient, entries []base.Z, qname string) {
tb.Helper()
r.SAdd(context.Background(), base.AllQueues, qname)
seedRedisZSet(tb, r, base.CompletedKey(qname), entries, base.TaskStateCompleted)
}
// SeedAllPendingQueues initializes all of the specified queues with the given messages. // SeedAllPendingQueues initializes all of the specified queues with the given messages.
// //
// pending maps a queue name to a list of messages. // pending maps a queue name to a list of messages.
@ -274,6 +287,14 @@ func SeedAllDeadlines(tb testing.TB, r redis.UniversalClient, deadlines map[stri
} }
} }
// SeedAllCompletedQueues initializes all of the completed queues with the given entries.
func SeedAllCompletedQueues(tb testing.TB, r redis.UniversalClient, completed map[string][]base.Z) {
tb.Helper()
for q, entries := range completed {
SeedCompletedQueue(tb, r, entries, q)
}
}
func seedRedisList(tb testing.TB, c redis.UniversalClient, key string, func seedRedisList(tb testing.TB, c redis.UniversalClient, key string,
msgs []*base.TaskMessage, state base.TaskState) { msgs []*base.TaskMessage, state base.TaskState) {
tb.Helper() tb.Helper()
@ -367,6 +388,13 @@ func GetArchivedMessages(tb testing.TB, r redis.UniversalClient, qname string) [
return getMessagesFromZSet(tb, r, qname, base.ArchivedKey, base.TaskStateArchived) return getMessagesFromZSet(tb, r, qname, base.ArchivedKey, base.TaskStateArchived)
} }
// GetCompletedMessages returns all completed task messages in the given queue.
// It also asserts the state field of the task.
func GetCompletedMessages(tb testing.TB, r redis.UniversalClient, qname string) []*base.TaskMessage {
tb.Helper()
return getMessagesFromZSet(tb, r, qname, base.CompletedKey, base.TaskStateCompleted)
}
// GetScheduledEntries returns all scheduled messages and its score in the given queue. // GetScheduledEntries returns all scheduled messages and its score in the given queue.
// It also asserts the state field of the task. // It also asserts the state field of the task.
func GetScheduledEntries(tb testing.TB, r redis.UniversalClient, qname string) []base.Z { func GetScheduledEntries(tb testing.TB, r redis.UniversalClient, qname string) []base.Z {
@ -395,6 +423,13 @@ func GetDeadlinesEntries(tb testing.TB, r redis.UniversalClient, qname string) [
return getMessagesFromZSetWithScores(tb, r, qname, base.DeadlinesKey, base.TaskStateActive) return getMessagesFromZSetWithScores(tb, r, qname, base.DeadlinesKey, base.TaskStateActive)
} }
// GetCompletedEntries returns all completed messages and its score in the given queue.
// It also asserts the state field of the task.
func GetCompletedEntries(tb testing.TB, r redis.UniversalClient, qname string) []base.Z {
tb.Helper()
return getMessagesFromZSetWithScores(tb, r, qname, base.CompletedKey, base.TaskStateCompleted)
}
// Retrieves all messages stored under `keyFn(qname)` key in redis list. // Retrieves all messages stored under `keyFn(qname)` key in redis list.
func getMessagesFromList(tb testing.TB, r redis.UniversalClient, qname string, func getMessagesFromList(tb testing.TB, r redis.UniversalClient, qname string,
keyFn func(qname string) string, state base.TaskState) []*base.TaskMessage { keyFn func(qname string) string, state base.TaskState) []*base.TaskMessage {

View File

@ -48,6 +48,7 @@ const (
TaskStateScheduled TaskStateScheduled
TaskStateRetry TaskStateRetry
TaskStateArchived TaskStateArchived
TaskStateCompleted
) )
func (s TaskState) String() string { func (s TaskState) String() string {
@ -62,6 +63,8 @@ func (s TaskState) String() string {
return "retry" return "retry"
case TaskStateArchived: case TaskStateArchived:
return "archived" return "archived"
case TaskStateCompleted:
return "completed"
} }
panic(fmt.Sprintf("internal error: unknown task state %d", s)) panic(fmt.Sprintf("internal error: unknown task state %d", s))
} }
@ -78,6 +81,8 @@ func TaskStateFromString(s string) (TaskState, error) {
return TaskStateRetry, nil return TaskStateRetry, nil
case "archived": case "archived":
return TaskStateArchived, nil return TaskStateArchived, nil
case "completed":
return TaskStateCompleted, nil
} }
return 0, errors.E(errors.FailedPrecondition, fmt.Sprintf("%q is not supported task state", s)) return 0, errors.E(errors.FailedPrecondition, fmt.Sprintf("%q is not supported task state", s))
} }
@ -136,6 +141,10 @@ func DeadlinesKey(qname string) string {
return fmt.Sprintf("%sdeadlines", QueueKeyPrefix(qname)) return fmt.Sprintf("%sdeadlines", QueueKeyPrefix(qname))
} }
func CompletedKey(qname string) string {
return fmt.Sprintf("%scompleted", QueueKeyPrefix(qname))
}
// PausedKey returns a redis key to indicate that the given queue is paused. // PausedKey returns a redis key to indicate that the given queue is paused.
func PausedKey(qname string) string { func PausedKey(qname string) string {
return fmt.Sprintf("%spaused", QueueKeyPrefix(qname)) return fmt.Sprintf("%spaused", QueueKeyPrefix(qname))
@ -229,6 +238,15 @@ type TaskMessage struct {
// //
// Empty string indicates that no uniqueness lock was used. // Empty string indicates that no uniqueness lock was used.
UniqueKey string UniqueKey string
// Retention specifies the number of seconds the task should be retained after completion.
Retention int64
// CompletedAt is the time the task was processed successfully in Unix time,
// the number of seconds elapsed since January 1, 1970 UTC.
//
// Use zero to indicate no value.
CompletedAt int64
} }
// EncodeMessage marshals the given task message and returns an encoded bytes. // EncodeMessage marshals the given task message and returns an encoded bytes.
@ -248,6 +266,8 @@ func EncodeMessage(msg *TaskMessage) ([]byte, error) {
Timeout: msg.Timeout, Timeout: msg.Timeout,
Deadline: msg.Deadline, Deadline: msg.Deadline,
UniqueKey: msg.UniqueKey, UniqueKey: msg.UniqueKey,
Retention: msg.Retention,
CompletedAt: msg.CompletedAt,
}) })
} }
@ -269,6 +289,8 @@ func DecodeMessage(data []byte) (*TaskMessage, error) {
Timeout: pbmsg.GetTimeout(), Timeout: pbmsg.GetTimeout(),
Deadline: pbmsg.GetDeadline(), Deadline: pbmsg.GetDeadline(),
UniqueKey: pbmsg.GetUniqueKey(), UniqueKey: pbmsg.GetUniqueKey(),
Retention: pbmsg.GetRetention(),
CompletedAt: pbmsg.GetCompletedAt(),
}, nil }, nil
} }
@ -277,6 +299,7 @@ type TaskInfo struct {
Message *TaskMessage Message *TaskMessage
State TaskState State TaskState
NextProcessAt time.Time NextProcessAt time.Time
Result []byte
} }
// Z represents sorted set member. // Z represents sorted set member.
@ -641,16 +664,19 @@ type Broker interface {
EnqueueUnique(msg *TaskMessage, ttl time.Duration) error EnqueueUnique(msg *TaskMessage, ttl time.Duration) error
Dequeue(qnames ...string) (*TaskMessage, time.Time, error) Dequeue(qnames ...string) (*TaskMessage, time.Time, error)
Done(msg *TaskMessage) error Done(msg *TaskMessage) error
MarkAsComplete(msg *TaskMessage) error
Requeue(msg *TaskMessage) error Requeue(msg *TaskMessage) error
Schedule(msg *TaskMessage, processAt time.Time) error Schedule(msg *TaskMessage, processAt time.Time) error
ScheduleUnique(msg *TaskMessage, processAt time.Time, ttl time.Duration) error ScheduleUnique(msg *TaskMessage, processAt time.Time, ttl time.Duration) error
Retry(msg *TaskMessage, processAt time.Time, errMsg string, isFailure bool) error Retry(msg *TaskMessage, processAt time.Time, errMsg string, isFailure bool) error
Archive(msg *TaskMessage, errMsg string) error Archive(msg *TaskMessage, errMsg string) error
ForwardIfReady(qnames ...string) error ForwardIfReady(qnames ...string) error
DeleteExpiredCompletedTasks(qname string) error
ListDeadlineExceeded(deadline time.Time, qnames ...string) ([]*TaskMessage, error) ListDeadlineExceeded(deadline time.Time, qnames ...string) ([]*TaskMessage, error)
WriteServerState(info *ServerInfo, workers []*WorkerInfo, ttl time.Duration) error WriteServerState(info *ServerInfo, workers []*WorkerInfo, ttl time.Duration) error
ClearServerState(host string, pid int, serverID string) error ClearServerState(host string, pid int, serverID string) error
CancelationPubSub() (*redis.PubSub, error) // TODO: Need to decouple from redis to support other brokers CancelationPubSub() (*redis.PubSub, error) // TODO: Need to decouple from redis to support other brokers
PublishCancelation(id string) error PublishCancelation(id string) error
WriteResult(qname, id string, data []byte) (n int, err error)
Close() error Close() error
} }

View File

@ -139,6 +139,23 @@ func TestArchivedKey(t *testing.T) {
} }
} }
func TestCompletedKey(t *testing.T) {
tests := []struct {
qname string
want string
}{
{"default", "asynq:{default}:completed"},
{"custom", "asynq:{custom}:completed"},
}
for _, tc := range tests {
got := CompletedKey(tc.qname)
if got != tc.want {
t.Errorf("CompletedKey(%q) = %q, want %q", tc.qname, got, tc.want)
}
}
}
func TestPausedKey(t *testing.T) { func TestPausedKey(t *testing.T) {
tests := []struct { tests := []struct {
qname string qname string
@ -359,6 +376,7 @@ func TestMessageEncoding(t *testing.T) {
Retried: 0, Retried: 0,
Timeout: 1800, Timeout: 1800,
Deadline: 1692311100, Deadline: 1692311100,
Retention: 3600,
}, },
out: &TaskMessage{ out: &TaskMessage{
Type: "task1", Type: "task1",
@ -369,6 +387,7 @@ func TestMessageEncoding(t *testing.T) {
Retried: 0, Retried: 0,
Timeout: 1800, Timeout: 1800,
Deadline: 1692311100, Deadline: 1692311100,
Retention: 3600,
}, },
}, },
} }

View File

@ -30,7 +30,7 @@ const metadataCtxKey ctxKey = 0
// New returns a context and cancel function for a given task message. // New returns a context and cancel function for a given task message.
func New(msg *base.TaskMessage, deadline time.Time) (context.Context, context.CancelFunc) { func New(msg *base.TaskMessage, deadline time.Time) (context.Context, context.CancelFunc) {
metadata := taskMetadata{ metadata := taskMetadata{
id: msg.ID.String(), id: msg.ID,
maxRetry: msg.Retry, maxRetry: msg.Retry,
retryCount: msg.Retried, retryCount: msg.Retried,
qname: msg.Queue, qname: msg.Queue,

View File

@ -29,7 +29,6 @@ func TestCreateContextWithFutureDeadline(t *testing.T) {
} }
ctx, cancel := New(msg, tc.deadline) ctx, cancel := New(msg, tc.deadline)
select { select {
case x := <-ctx.Done(): case x := <-ctx.Done():
t.Errorf("<-ctx.Done() == %v, want nothing (it should block)", x) t.Errorf("<-ctx.Done() == %v, want nothing (it should block)", x)

View File

@ -5,7 +5,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT. // Code generated by protoc-gen-go. DO NOT EDIT.
// versions: // versions:
// protoc-gen-go v1.25.0 // protoc-gen-go v1.25.0
// protoc v3.14.0 // protoc v3.17.3
// source: asynq.proto // source: asynq.proto
package proto package proto
@ -65,6 +65,14 @@ type TaskMessage struct {
// UniqueKey holds the redis key used for uniqueness lock for this task. // UniqueKey holds the redis key used for uniqueness lock for this task.
// Empty string indicates that no uniqueness lock was used. // Empty string indicates that no uniqueness lock was used.
UniqueKey string `protobuf:"bytes,10,opt,name=unique_key,json=uniqueKey,proto3" json:"unique_key,omitempty"` UniqueKey string `protobuf:"bytes,10,opt,name=unique_key,json=uniqueKey,proto3" json:"unique_key,omitempty"`
// Retention period specified in a number of seconds.
// The task will be stored in redis as a completed task until the TTL
// expires.
Retention int64 `protobuf:"varint,12,opt,name=retention,proto3" json:"retention,omitempty"`
// Time when the task completed in success in Unix time,
// the number of seconds elapsed since January 1, 1970 UTC.
// This field is populated if result_ttl > 0 upon completion.
CompletedAt int64 `protobuf:"varint,13,opt,name=completed_at,json=completedAt,proto3" json:"completed_at,omitempty"`
} }
func (x *TaskMessage) Reset() { func (x *TaskMessage) Reset() {
@ -176,6 +184,20 @@ func (x *TaskMessage) GetUniqueKey() string {
return "" return ""
} }
func (x *TaskMessage) GetRetention() int64 {
if x != nil {
return x.Retention
}
return 0
}
func (x *TaskMessage) GetCompletedAt() int64 {
if x != nil {
return x.CompletedAt
}
return 0
}
// ServerInfo holds information about a running server. // ServerInfo holds information about a running server.
type ServerInfo struct { type ServerInfo struct {
state protoimpl.MessageState state protoimpl.MessageState
@ -592,7 +614,7 @@ var file_asynq_proto_rawDesc = []byte{
0x0a, 0x0b, 0x61, 0x73, 0x79, 0x6e, 0x71, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x61, 0x0a, 0x0b, 0x61, 0x73, 0x79, 0x6e, 0x71, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x61,
0x73, 0x79, 0x6e, 0x71, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x73, 0x79, 0x6e, 0x71, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa9, 0x02, 0x0a, 0x0b, 0x54, 0x61, 0x73, 0x6b, 0x4d, 0x65, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xea, 0x02, 0x0a, 0x0b, 0x54, 0x61, 0x73, 0x6b, 0x4d, 0x65,
0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20,
0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79,
0x6c, 0x6f, 0x61, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c,
@ -611,80 +633,84 @@ var file_asynq_proto_rawDesc = []byte{
0x6e, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69,
0x6e, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x6e, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x5f, 0x6b, 0x65, 0x79,
0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x4b, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x4b, 0x65,
0x79, 0x22, 0x8f, 0x03, 0x0a, 0x0a, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x79, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0c,
0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12,
0x68, 0x6f, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18,
0x05, 0x52, 0x03, 0x70, 0x69, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x0d, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64,
0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x72, 0x76, 0x65, 0x41, 0x74, 0x22, 0x8f, 0x03, 0x0a, 0x0a, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x49, 0x6e, 0x66,
0x72, 0x49, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
0x63, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01,
0x72, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x35, 0x0a, 0x06, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x18, 0x28, 0x05, 0x52, 0x03, 0x70, 0x69, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x65, 0x72, 0x76, 0x65,
0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x61, 0x73, 0x79, 0x6e, 0x71, 0x2e, 0x53, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x72, 0x76,
0x72, 0x76, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x51, 0x75, 0x65, 0x75, 0x65, 0x73, 0x45, 0x65, 0x72, 0x49, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65,
0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x6e, 0x63, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75,
0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x5f, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x35, 0x0a, 0x06, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73,
0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x50, 0x72, 0x69, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x61, 0x73, 0x79, 0x6e, 0x71, 0x2e, 0x53,
0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x65, 0x72, 0x76, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x51, 0x75, 0x65, 0x75, 0x65, 0x73,
0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x39, 0x0a, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x12, 0x27, 0x0a,
0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0f, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x5f, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79,
0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x50, 0x72,
0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73,
0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x13, 0x61, 0x63, 0x74, 0x69, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x39,
0x76, 0x65, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01,
0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x11, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x57, 0x6f, 0x72,
0x6b, 0x65, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x1a, 0x39, 0x0a, 0x0b, 0x51, 0x75, 0x65, 0x75,
0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01,
0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c,
0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a,
0x02, 0x38, 0x01, 0x22, 0xb1, 0x02, 0x0a, 0x0a, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x6e,
0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x02, 0x20,
0x01, 0x28, 0x05, 0x52, 0x03, 0x70, 0x69, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x65, 0x72, 0x76,
0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x72,
0x76, 0x65, 0x72, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64,
0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12, 0x1b,
0x0a, 0x09, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28,
0x09, 0x52, 0x08, 0x74, 0x61, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x74,
0x61, 0x73, 0x6b, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28,
0x0c, 0x52, 0x0b, 0x74, 0x61, 0x73, 0x6b, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x14,
0x0a, 0x05, 0x71, 0x75, 0x65, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x71,
0x75, 0x65, 0x75, 0x65, 0x12, 0x39, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69,
0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73,
0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12,
0x36, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x64,
0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x22, 0xad, 0x02, 0x0a, 0x0e, 0x53, 0x63, 0x68, 0x65,
0x64, 0x75, 0x6c, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64,
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x70,
0x65, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x73, 0x70, 0x65, 0x63, 0x12, 0x1b,
0x0a, 0x09, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28,
0x09, 0x52, 0x08, 0x74, 0x61, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x74,
0x61, 0x73, 0x6b, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28,
0x0c, 0x52, 0x0b, 0x74, 0x61, 0x73, 0x6b, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x27,
0x0a, 0x0f, 0x65, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x65, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65,
0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x46, 0x0a, 0x11, 0x6e, 0x65, 0x78, 0x74, 0x5f,
0x65, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01,
0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0f, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09,
0x6e, 0x65, 0x78, 0x74, 0x45, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x13, 0x61, 0x63, 0x74,
0x46, 0x0a, 0x11, 0x70, 0x72, 0x65, 0x76, 0x5f, 0x65, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x5f, 0x69, 0x76, 0x65, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74,
0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x11, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x57, 0x6f,
0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x72, 0x6b, 0x65, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x1a, 0x39, 0x0a, 0x0b, 0x51, 0x75, 0x65,
0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0f, 0x70, 0x72, 0x65, 0x76, 0x45, 0x6e, 0x71, 0x75, 0x75, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18,
0x65, 0x75, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x22, 0x6f, 0x0a, 0x15, 0x53, 0x63, 0x68, 0x65, 0x64, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61,
0x75, 0x6c, 0x65, 0x72, 0x45, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
0x12, 0x17, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xb1, 0x02, 0x0a, 0x0a, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49,
0x09, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12, 0x3d, 0x0a, 0x0c, 0x65, 0x6e, 0x71, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28,
0x75, 0x65, 0x75, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x09, 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x02,
0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x70, 0x69, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x65, 0x72,
0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x65, 0x6e, 0x71, 0x76, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65,
0x75, 0x65, 0x75, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x72, 0x76, 0x65, 0x72, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69,
0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x69, 0x62, 0x69, 0x6b, 0x65, 0x6e, 0x2f, 0x61, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12,
0x73, 0x79, 0x6e, 0x71, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x1b, 0x0a, 0x09, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01,
0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x28, 0x09, 0x52, 0x08, 0x74, 0x61, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x12, 0x21, 0x0a, 0x0c,
0x74, 0x61, 0x73, 0x6b, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x06, 0x20, 0x01,
0x28, 0x0c, 0x52, 0x0b, 0x74, 0x61, 0x73, 0x6b, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12,
0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05,
0x71, 0x75, 0x65, 0x75, 0x65, 0x12, 0x39, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74,
0x69, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65,
0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65,
0x12, 0x36, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x09, 0x20, 0x01,
0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08,
0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x22, 0xad, 0x02, 0x0a, 0x0e, 0x53, 0x63, 0x68,
0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x69,
0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x73,
0x70, 0x65, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x73, 0x70, 0x65, 0x63, 0x12,
0x1b, 0x0a, 0x09, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01,
0x28, 0x09, 0x52, 0x08, 0x74, 0x61, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x12, 0x21, 0x0a, 0x0c,
0x74, 0x61, 0x73, 0x6b, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x04, 0x20, 0x01,
0x28, 0x0c, 0x52, 0x0b, 0x74, 0x61, 0x73, 0x6b, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12,
0x27, 0x0a, 0x0f, 0x65, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f,
0x6e, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x65, 0x6e, 0x71, 0x75, 0x65, 0x75,
0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x46, 0x0a, 0x11, 0x6e, 0x65, 0x78, 0x74,
0x5f, 0x65, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20,
0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52,
0x0f, 0x6e, 0x65, 0x78, 0x74, 0x45, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x54, 0x69, 0x6d, 0x65,
0x12, 0x46, 0x0a, 0x11, 0x70, 0x72, 0x65, 0x76, 0x5f, 0x65, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65,
0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69,
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0f, 0x70, 0x72, 0x65, 0x76, 0x45, 0x6e, 0x71,
0x75, 0x65, 0x75, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x22, 0x6f, 0x0a, 0x15, 0x53, 0x63, 0x68, 0x65,
0x64, 0x75, 0x6c, 0x65, 0x72, 0x45, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x45, 0x76, 0x65, 0x6e,
0x74, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01,
0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12, 0x3d, 0x0a, 0x0c, 0x65, 0x6e,
0x71, 0x75, 0x65, 0x75, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x65, 0x6e,
0x71, 0x75, 0x65, 0x75, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74,
0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x69, 0x62, 0x69, 0x6b, 0x65, 0x6e, 0x2f,
0x61, 0x73, 0x79, 0x6e, 0x71, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
} }
var ( var (

View File

@ -51,6 +51,15 @@ message TaskMessage {
// Empty string indicates that no uniqueness lock was used. // Empty string indicates that no uniqueness lock was used.
string unique_key = 10; string unique_key = 10;
// Retention period specified in a number of seconds.
// The task will be stored in redis as a completed task until the TTL
// expires.
int64 retention = 12;
// Time when the task completed in success in Unix time,
// the number of seconds elapsed since January 1, 1970 UTC.
// This field is populated if result_ttl > 0 upon completion.
int64 completed_at = 13;
}; };
// ServerInfo holds information about a running server. // ServerInfo holds information about a running server.

View File

@ -40,6 +40,7 @@ type Stats struct {
Scheduled int Scheduled int
Retry int Retry int
Archived int Archived int
Completed int
// Total number of tasks processed during the current date. // Total number of tasks processed during the current date.
// The number includes both succeeded and failed tasks. // The number includes both succeeded and failed tasks.
Processed int Processed int
@ -67,9 +68,10 @@ type DailyStats struct {
// KEYS[3] -> asynq:<qname>:scheduled // KEYS[3] -> asynq:<qname>:scheduled
// KEYS[4] -> asynq:<qname>:retry // KEYS[4] -> asynq:<qname>:retry
// KEYS[5] -> asynq:<qname>:archived // KEYS[5] -> asynq:<qname>:archived
// KEYS[6] -> asynq:<qname>:processed:<yyyy-mm-dd> // KEYS[6] -> asynq:<qname>:completed
// KEYS[7] -> asynq:<qname>:failed:<yyyy-mm-dd> // KEYS[7] -> asynq:<qname>:processed:<yyyy-mm-dd>
// KEYS[8] -> asynq:<qname>:paused // KEYS[8] -> asynq:<qname>:failed:<yyyy-mm-dd>
// KEYS[9] -> asynq:<qname>:paused
var currentStatsCmd = redis.NewScript(` var currentStatsCmd = redis.NewScript(`
local res = {} local res = {}
table.insert(res, KEYS[1]) table.insert(res, KEYS[1])
@ -82,28 +84,30 @@ table.insert(res, KEYS[4])
table.insert(res, redis.call("ZCARD", KEYS[4])) table.insert(res, redis.call("ZCARD", KEYS[4]))
table.insert(res, KEYS[5]) table.insert(res, KEYS[5])
table.insert(res, redis.call("ZCARD", KEYS[5])) table.insert(res, redis.call("ZCARD", KEYS[5]))
table.insert(res, KEYS[6])
table.insert(res, redis.call("ZCARD", KEYS[6]))
local pcount = 0 local pcount = 0
local p = redis.call("GET", KEYS[6]) local p = redis.call("GET", KEYS[7])
if p then if p then
pcount = tonumber(p) pcount = tonumber(p)
end end
table.insert(res, KEYS[6]) table.insert(res, KEYS[7])
table.insert(res, pcount) table.insert(res, pcount)
local fcount = 0 local fcount = 0
local f = redis.call("GET", KEYS[7]) local f = redis.call("GET", KEYS[8])
if f then if f then
fcount = tonumber(f) fcount = tonumber(f)
end end
table.insert(res, KEYS[7])
table.insert(res, fcount)
table.insert(res, KEYS[8]) table.insert(res, KEYS[8])
table.insert(res, redis.call("EXISTS", KEYS[8])) table.insert(res, fcount)
table.insert(res, KEYS[9])
table.insert(res, redis.call("EXISTS", KEYS[9]))
return res`) return res`)
// CurrentStats returns a current state of the queues. // CurrentStats returns a current state of the queues.
func (r *RDB) CurrentStats(qname string) (*Stats, error) { func (r *RDB) CurrentStats(qname string) (*Stats, error) {
var op errors.Op = "rdb.CurrentStats" var op errors.Op = "rdb.CurrentStats"
exists, err := r.client.SIsMember(context.Background(), base.AllQueues, qname).Result() exists, err := r.queueExists(qname)
if err != nil { if err != nil {
return nil, errors.E(op, errors.Unknown, err) return nil, errors.E(op, errors.Unknown, err)
} }
@ -117,6 +121,7 @@ func (r *RDB) CurrentStats(qname string) (*Stats, error) {
base.ScheduledKey(qname), base.ScheduledKey(qname),
base.RetryKey(qname), base.RetryKey(qname),
base.ArchivedKey(qname), base.ArchivedKey(qname),
base.CompletedKey(qname),
base.ProcessedKey(qname, now), base.ProcessedKey(qname, now),
base.FailedKey(qname, now), base.FailedKey(qname, now),
base.PausedKey(qname), base.PausedKey(qname),
@ -152,6 +157,9 @@ func (r *RDB) CurrentStats(qname string) (*Stats, error) {
case base.ArchivedKey(qname): case base.ArchivedKey(qname):
stats.Archived = val stats.Archived = val
size += val size += val
case base.CompletedKey(qname):
stats.Completed = val
size += val
case base.ProcessedKey(qname, now): case base.ProcessedKey(qname, now):
stats.Processed = val stats.Processed = val
case base.FailedKey(qname, now): case base.FailedKey(qname, now):
@ -182,6 +190,7 @@ func (r *RDB) CurrentStats(qname string) (*Stats, error) {
// KEYS[3] -> asynq:{qname}:scheduled // KEYS[3] -> asynq:{qname}:scheduled
// KEYS[4] -> asynq:{qname}:retry // KEYS[4] -> asynq:{qname}:retry
// KEYS[5] -> asynq:{qname}:archived // KEYS[5] -> asynq:{qname}:archived
// KEYS[6] -> asynq:{qname}:completed
// //
// ARGV[1] -> asynq:{qname}:t: // ARGV[1] -> asynq:{qname}:t:
// ARGV[2] -> sample_size (e.g 20) // ARGV[2] -> sample_size (e.g 20)
@ -208,7 +217,7 @@ for i=1,2 do
memusg = memusg + m memusg = memusg + m
end end
end end
for i=3,5 do for i=3,6 do
local ids = redis.call("ZRANGE", KEYS[i], 0, sample_size - 1) local ids = redis.call("ZRANGE", KEYS[i], 0, sample_size - 1)
local sample_total = 0 local sample_total = 0
if (table.getn(ids) > 0) then if (table.getn(ids) > 0) then
@ -237,6 +246,7 @@ func (r *RDB) memoryUsage(qname string) (int64, error) {
base.ScheduledKey(qname), base.ScheduledKey(qname),
base.RetryKey(qname), base.RetryKey(qname),
base.ArchivedKey(qname), base.ArchivedKey(qname),
base.CompletedKey(qname),
} }
argv := []interface{}{ argv := []interface{}{
base.TaskKeyPrefix(qname), base.TaskKeyPrefix(qname),
@ -270,7 +280,7 @@ func (r *RDB) HistoricalStats(qname string, n int) ([]*DailyStats, error) {
if n < 1 { if n < 1 {
return nil, errors.E(op, errors.FailedPrecondition, "the number of days must be positive") return nil, errors.E(op, errors.FailedPrecondition, "the number of days must be positive")
} }
exists, err := r.client.SIsMember(context.Background(), base.AllQueues, qname).Result() exists, err := r.queueExists(qname)
if err != nil { if err != nil {
return nil, errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "sismember", Err: err}) return nil, errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "sismember", Err: err})
} }
@ -337,7 +347,8 @@ func parseInfo(infoStr string) (map[string]string, error) {
return info, nil return info, nil
} }
func reverse(x []string) { // TODO: Use generics once available.
func reverse(x []*base.TaskInfo) {
for i := len(x)/2 - 1; i >= 0; i-- { for i := len(x)/2 - 1; i >= 0; i-- {
opp := len(x) - 1 - i opp := len(x) - 1 - i
x[i], x[opp] = x[opp], x[i] x[i], x[opp] = x[opp], x[i]
@ -347,7 +358,7 @@ func reverse(x []string) {
// checkQueueExists verifies whether the queue exists. // checkQueueExists verifies whether the queue exists.
// It returns QueueNotFoundError if queue doesn't exist. // It returns QueueNotFoundError if queue doesn't exist.
func (r *RDB) checkQueueExists(qname string) error { func (r *RDB) checkQueueExists(qname string) error {
exists, err := r.client.SIsMember(context.Background(), base.AllQueues, qname).Result() exists, err := r.queueExists(qname)
if err != nil { if err != nil {
return errors.E(errors.Unknown, &errors.RedisCommandError{Command: "sismember", Err: err}) return errors.E(errors.Unknown, &errors.RedisCommandError{Command: "sismember", Err: err})
} }
@ -364,24 +375,25 @@ func (r *RDB) checkQueueExists(qname string) error {
// ARGV[3] -> queue key prefix (asynq:{<qname>}:) // ARGV[3] -> queue key prefix (asynq:{<qname>}:)
// //
// Output: // Output:
// Tuple of {msg, state, nextProcessAt} // Tuple of {msg, state, nextProcessAt, result}
// msg: encoded task message // msg: encoded task message
// state: string describing the state of the task // state: string describing the state of the task
// nextProcessAt: unix time in seconds, zero if not applicable. // nextProcessAt: unix time in seconds, zero if not applicable.
// result: result data associated with the task
// //
// If the task key doesn't exist, it returns error with a message "NOT FOUND" // If the task key doesn't exist, it returns error with a message "NOT FOUND"
var getTaskInfoCmd = redis.NewScript(` var getTaskInfoCmd = redis.NewScript(`
if redis.call("EXISTS", KEYS[1]) == 0 then if redis.call("EXISTS", KEYS[1]) == 0 then
return redis.error_reply("NOT FOUND") return redis.error_reply("NOT FOUND")
end end
local msg, state = unpack(redis.call("HMGET", KEYS[1], "msg", "state")) local msg, state, result = unpack(redis.call("HMGET", KEYS[1], "msg", "state", "result"))
if state == "scheduled" or state == "retry" then if state == "scheduled" or state == "retry" then
return {msg, state, redis.call("ZSCORE", ARGV[3] .. state, ARGV[1])} return {msg, state, redis.call("ZSCORE", ARGV[3] .. state, ARGV[1]), result}
end end
if state == "pending" then if state == "pending" then
return {msg, state, ARGV[2]} return {msg, state, ARGV[2], result}
end end
return {msg, state, 0} return {msg, state, 0, result}
`) `)
// GetTaskInfo returns a TaskInfo describing the task from the given queue. // GetTaskInfo returns a TaskInfo describing the task from the given queue.
@ -407,7 +419,7 @@ func (r *RDB) GetTaskInfo(qname, id string) (*base.TaskInfo, error) {
if err != nil { if err != nil {
return nil, errors.E(op, errors.Internal, "unexpected value returned from Lua script") return nil, errors.E(op, errors.Internal, "unexpected value returned from Lua script")
} }
if len(vals) != 3 { if len(vals) != 4 {
return nil, errors.E(op, errors.Internal, "unepxected number of values returned from Lua script") return nil, errors.E(op, errors.Internal, "unepxected number of values returned from Lua script")
} }
encoded, err := cast.ToStringE(vals[0]) encoded, err := cast.ToStringE(vals[0])
@ -422,6 +434,10 @@ func (r *RDB) GetTaskInfo(qname, id string) (*base.TaskInfo, error) {
if err != nil { if err != nil {
return nil, errors.E(op, errors.Internal, "unexpected value returned from Lua script") return nil, errors.E(op, errors.Internal, "unexpected value returned from Lua script")
} }
resultStr, err := cast.ToStringE(vals[3])
if err != nil {
return nil, errors.E(op, errors.Internal, "unexpected value returned from Lua script")
}
msg, err := base.DecodeMessage([]byte(encoded)) msg, err := base.DecodeMessage([]byte(encoded))
if err != nil { if err != nil {
return nil, errors.E(op, errors.Internal, "could not decode task message") return nil, errors.E(op, errors.Internal, "could not decode task message")
@ -434,10 +450,15 @@ func (r *RDB) GetTaskInfo(qname, id string) (*base.TaskInfo, error) {
if processAtUnix != 0 { if processAtUnix != 0 {
nextProcessAt = time.Unix(processAtUnix, 0) nextProcessAt = time.Unix(processAtUnix, 0)
} }
var result []byte
if len(resultStr) > 0 {
result = []byte(resultStr)
}
return &base.TaskInfo{ return &base.TaskInfo{
Message: msg, Message: msg,
State: state, State: state,
NextProcessAt: nextProcessAt, NextProcessAt: nextProcessAt,
Result: result,
}, nil }, nil
} }
@ -460,12 +481,16 @@ func (p Pagination) stop() int64 {
} }
// ListPending returns pending tasks that are ready to be processed. // ListPending returns pending tasks that are ready to be processed.
func (r *RDB) ListPending(qname string, pgn Pagination) ([]*base.TaskMessage, error) { func (r *RDB) ListPending(qname string, pgn Pagination) ([]*base.TaskInfo, error) {
var op errors.Op = "rdb.ListPending" var op errors.Op = "rdb.ListPending"
if !r.client.SIsMember(context.Background(), base.AllQueues, qname).Val() { exists, err := r.queueExists(qname)
if err != nil {
return nil, errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "sismember", Err: err})
}
if !exists {
return nil, errors.E(op, errors.NotFound, &errors.QueueNotFoundError{Queue: qname}) return nil, errors.E(op, errors.NotFound, &errors.QueueNotFoundError{Queue: qname})
} }
res, err := r.listMessages(base.PendingKey(qname), qname, pgn) res, err := r.listMessages(qname, base.TaskStatePending, pgn)
if err != nil { if err != nil {
return nil, errors.E(op, errors.CanonicalCode(err), err) return nil, errors.E(op, errors.CanonicalCode(err), err)
} }
@ -473,12 +498,16 @@ func (r *RDB) ListPending(qname string, pgn Pagination) ([]*base.TaskMessage, er
} }
// ListActive returns all tasks that are currently being processed for the given queue. // ListActive returns all tasks that are currently being processed for the given queue.
func (r *RDB) ListActive(qname string, pgn Pagination) ([]*base.TaskMessage, error) { func (r *RDB) ListActive(qname string, pgn Pagination) ([]*base.TaskInfo, error) {
var op errors.Op = "rdb.ListActive" var op errors.Op = "rdb.ListActive"
if !r.client.SIsMember(context.Background(), base.AllQueues, qname).Val() { exists, err := r.queueExists(qname)
if err != nil {
return nil, errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "sismember", Err: err})
}
if !exists {
return nil, errors.E(op, errors.NotFound, &errors.QueueNotFoundError{Queue: qname}) return nil, errors.E(op, errors.NotFound, &errors.QueueNotFoundError{Queue: qname})
} }
res, err := r.listMessages(base.ActiveKey(qname), qname, pgn) res, err := r.listMessages(qname, base.TaskStateActive, pgn)
if err != nil { if err != nil {
return nil, errors.E(op, errors.CanonicalCode(err), err) return nil, errors.E(op, errors.CanonicalCode(err), err)
} }
@ -491,16 +520,27 @@ func (r *RDB) ListActive(qname string, pgn Pagination) ([]*base.TaskMessage, err
// ARGV[3] -> task key prefix // ARGV[3] -> task key prefix
var listMessagesCmd = redis.NewScript(` var listMessagesCmd = redis.NewScript(`
local ids = redis.call("LRange", KEYS[1], ARGV[1], ARGV[2]) local ids = redis.call("LRange", KEYS[1], ARGV[1], ARGV[2])
local res = {} local data = {}
for _, id in ipairs(ids) do for _, id in ipairs(ids) do
local key = ARGV[3] .. id local key = ARGV[3] .. id
table.insert(res, redis.call("HGET", key, "msg")) local msg, result = unpack(redis.call("HMGET", key, "msg","result"))
table.insert(data, msg)
table.insert(data, result)
end end
return res return data
`) `)
// listMessages returns a list of TaskMessage in Redis list with the given key. // listMessages returns a list of TaskInfo in Redis list with the given key.
func (r *RDB) listMessages(key, qname string, pgn Pagination) ([]*base.TaskMessage, error) { func (r *RDB) listMessages(qname string, state base.TaskState, pgn Pagination) ([]*base.TaskInfo, error) {
var key string
switch state {
case base.TaskStateActive:
key = base.ActiveKey(qname)
case base.TaskStatePending:
key = base.PendingKey(qname)
default:
panic(fmt.Sprintf("unsupported task state: %v", state))
}
// Note: Because we use LPUSH to redis list, we need to calculate the // Note: Because we use LPUSH to redis list, we need to calculate the
// correct range and reverse the list to get the tasks with pagination. // correct range and reverse the list to get the tasks with pagination.
stop := -pgn.start() - 1 stop := -pgn.start() - 1
@ -514,27 +554,44 @@ func (r *RDB) listMessages(key, qname string, pgn Pagination) ([]*base.TaskMessa
if err != nil { if err != nil {
return nil, errors.E(errors.Internal, fmt.Errorf("cast error: Lua script returned unexpected value: %v", res)) return nil, errors.E(errors.Internal, fmt.Errorf("cast error: Lua script returned unexpected value: %v", res))
} }
reverse(data) var infos []*base.TaskInfo
var msgs []*base.TaskMessage for i := 0; i < len(data); i += 2 {
for _, s := range data { m, err := base.DecodeMessage([]byte(data[i]))
m, err := base.DecodeMessage([]byte(s))
if err != nil { if err != nil {
continue // bad data, ignore and continue continue // bad data, ignore and continue
} }
msgs = append(msgs, m) var res []byte
if len(data[i+1]) > 0 {
res = []byte(data[i+1])
} }
return msgs, nil var nextProcessAt time.Time
if state == base.TaskStatePending {
nextProcessAt = time.Now()
}
infos = append(infos, &base.TaskInfo{
Message: m,
State: state,
NextProcessAt: nextProcessAt,
Result: res,
})
}
reverse(infos)
return infos, nil
} }
// ListScheduled returns all tasks from the given queue that are scheduled // ListScheduled returns all tasks from the given queue that are scheduled
// to be processed in the future. // to be processed in the future.
func (r *RDB) ListScheduled(qname string, pgn Pagination) ([]base.Z, error) { func (r *RDB) ListScheduled(qname string, pgn Pagination) ([]*base.TaskInfo, error) {
var op errors.Op = "rdb.ListScheduled" var op errors.Op = "rdb.ListScheduled"
if !r.client.SIsMember(context.Background(), base.AllQueues, qname).Val() { exists, err := r.queueExists(qname)
if err != nil {
return nil, errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "sismember", Err: err})
}
if !exists {
return nil, errors.E(op, errors.NotFound, &errors.QueueNotFoundError{Queue: qname}) return nil, errors.E(op, errors.NotFound, &errors.QueueNotFoundError{Queue: qname})
} }
res, err := r.listZSetEntries(base.ScheduledKey(qname), qname, pgn) res, err := r.listZSetEntries(qname, base.TaskStateScheduled, pgn)
if err != nil { if err != nil {
return nil, errors.E(op, errors.CanonicalCode(err), err) return nil, errors.E(op, errors.CanonicalCode(err), err)
} }
@ -543,12 +600,16 @@ func (r *RDB) ListScheduled(qname string, pgn Pagination) ([]base.Z, error) {
// ListRetry returns all tasks from the given queue that have failed before // ListRetry returns all tasks from the given queue that have failed before
// and willl be retried in the future. // and willl be retried in the future.
func (r *RDB) ListRetry(qname string, pgn Pagination) ([]base.Z, error) { func (r *RDB) ListRetry(qname string, pgn Pagination) ([]*base.TaskInfo, error) {
var op errors.Op = "rdb.ListRetry" var op errors.Op = "rdb.ListRetry"
if !r.client.SIsMember(context.Background(), base.AllQueues, qname).Val() { exists, err := r.queueExists(qname)
if err != nil {
return nil, errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "sismember", Err: err})
}
if !exists {
return nil, errors.E(op, errors.NotFound, &errors.QueueNotFoundError{Queue: qname}) return nil, errors.E(op, errors.NotFound, &errors.QueueNotFoundError{Queue: qname})
} }
res, err := r.listZSetEntries(base.RetryKey(qname), qname, pgn) res, err := r.listZSetEntries(qname, base.TaskStateRetry, pgn)
if err != nil { if err != nil {
return nil, errors.E(op, errors.CanonicalCode(err), err) return nil, errors.E(op, errors.CanonicalCode(err), err)
} }
@ -556,39 +617,82 @@ func (r *RDB) ListRetry(qname string, pgn Pagination) ([]base.Z, error) {
} }
// ListArchived returns all tasks from the given queue that have exhausted its retry limit. // ListArchived returns all tasks from the given queue that have exhausted its retry limit.
func (r *RDB) ListArchived(qname string, pgn Pagination) ([]base.Z, error) { func (r *RDB) ListArchived(qname string, pgn Pagination) ([]*base.TaskInfo, error) {
var op errors.Op = "rdb.ListArchived" var op errors.Op = "rdb.ListArchived"
if !r.client.SIsMember(context.Background(), base.AllQueues, qname).Val() { exists, err := r.queueExists(qname)
if err != nil {
return nil, errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "sismember", Err: err})
}
if !exists {
return nil, errors.E(op, errors.NotFound, &errors.QueueNotFoundError{Queue: qname}) return nil, errors.E(op, errors.NotFound, &errors.QueueNotFoundError{Queue: qname})
} }
zs, err := r.listZSetEntries(base.ArchivedKey(qname), qname, pgn) zs, err := r.listZSetEntries(qname, base.TaskStateArchived, pgn)
if err != nil { if err != nil {
return nil, errors.E(op, errors.CanonicalCode(err), err) return nil, errors.E(op, errors.CanonicalCode(err), err)
} }
return zs, nil return zs, nil
} }
// ListCompleted returns all tasks from the given queue that have completed successfully.
func (r *RDB) ListCompleted(qname string, pgn Pagination) ([]*base.TaskInfo, error) {
var op errors.Op = "rdb.ListCompleted"
exists, err := r.queueExists(qname)
if err != nil {
return nil, errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "sismember", Err: err})
}
if !exists {
return nil, errors.E(op, errors.NotFound, &errors.QueueNotFoundError{Queue: qname})
}
zs, err := r.listZSetEntries(qname, base.TaskStateCompleted, pgn)
if err != nil {
return nil, errors.E(op, errors.CanonicalCode(err), err)
}
return zs, nil
}
// Reports whether a queue with the given name exists.
func (r *RDB) queueExists(qname string) (bool, error) {
return r.client.SIsMember(context.Background(), base.AllQueues, qname).Result()
}
// KEYS[1] -> key for ids set (e.g. asynq:{<qname>}:scheduled) // KEYS[1] -> key for ids set (e.g. asynq:{<qname>}:scheduled)
// ARGV[1] -> min // ARGV[1] -> min
// ARGV[2] -> max // ARGV[2] -> max
// ARGV[3] -> task key prefix // ARGV[3] -> task key prefix
// //
// Returns an array populated with // Returns an array populated with
// [msg1, score1, msg2, score2, ..., msgN, scoreN] // [msg1, score1, result1, msg2, score2, result2, ..., msgN, scoreN, resultN]
var listZSetEntriesCmd = redis.NewScript(` var listZSetEntriesCmd = redis.NewScript(`
local res = {} local data = {}
local id_score_pairs = redis.call("ZRANGE", KEYS[1], ARGV[1], ARGV[2], "WITHSCORES") local id_score_pairs = redis.call("ZRANGE", KEYS[1], ARGV[1], ARGV[2], "WITHSCORES")
for i = 1, table.getn(id_score_pairs), 2 do for i = 1, table.getn(id_score_pairs), 2 do
local key = ARGV[3] .. id_score_pairs[i] local id = id_score_pairs[i]
table.insert(res, redis.call("HGET", key, "msg")) local score = id_score_pairs[i+1]
table.insert(res, id_score_pairs[i+1]) local key = ARGV[3] .. id
local msg, res = unpack(redis.call("HMGET", key, "msg", "result"))
table.insert(data, msg)
table.insert(data, score)
table.insert(data, res)
end end
return res return data
`) `)
// listZSetEntries returns a list of message and score pairs in Redis sorted-set // listZSetEntries returns a list of message and score pairs in Redis sorted-set
// with the given key. // with the given key.
func (r *RDB) listZSetEntries(key, qname string, pgn Pagination) ([]base.Z, error) { func (r *RDB) listZSetEntries(qname string, state base.TaskState, pgn Pagination) ([]*base.TaskInfo, error) {
var key string
switch state {
case base.TaskStateScheduled:
key = base.ScheduledKey(qname)
case base.TaskStateRetry:
key = base.RetryKey(qname)
case base.TaskStateArchived:
key = base.ArchivedKey(qname)
case base.TaskStateCompleted:
key = base.CompletedKey(qname)
default:
panic(fmt.Sprintf("unsupported task state: %v", state))
}
res, err := listZSetEntriesCmd.Run(context.Background(), r.client, []string{key}, res, err := listZSetEntriesCmd.Run(context.Background(), r.client, []string{key},
pgn.start(), pgn.stop(), base.TaskKeyPrefix(qname)).Result() pgn.start(), pgn.stop(), base.TaskKeyPrefix(qname)).Result()
if err != nil { if err != nil {
@ -598,8 +702,8 @@ func (r *RDB) listZSetEntries(key, qname string, pgn Pagination) ([]base.Z, erro
if err != nil { if err != nil {
return nil, errors.E(errors.Internal, fmt.Errorf("cast error: Lua script returned unexpected value: %v", res)) return nil, errors.E(errors.Internal, fmt.Errorf("cast error: Lua script returned unexpected value: %v", res))
} }
var zs []base.Z var infos []*base.TaskInfo
for i := 0; i < len(data); i += 2 { for i := 0; i < len(data); i += 3 {
s, err := cast.ToStringE(data[i]) s, err := cast.ToStringE(data[i])
if err != nil { if err != nil {
return nil, errors.E(errors.Internal, fmt.Errorf("cast error: Lua script returned unexpected value: %v", res)) return nil, errors.E(errors.Internal, fmt.Errorf("cast error: Lua script returned unexpected value: %v", res))
@ -608,13 +712,30 @@ func (r *RDB) listZSetEntries(key, qname string, pgn Pagination) ([]base.Z, erro
if err != nil { if err != nil {
return nil, errors.E(errors.Internal, fmt.Errorf("cast error: Lua script returned unexpected value: %v", res)) return nil, errors.E(errors.Internal, fmt.Errorf("cast error: Lua script returned unexpected value: %v", res))
} }
resStr, err := cast.ToStringE(data[i+2])
if err != nil {
return nil, errors.E(errors.Internal, fmt.Errorf("cast error: Lua script returned unexpected value: %v", res))
}
msg, err := base.DecodeMessage([]byte(s)) msg, err := base.DecodeMessage([]byte(s))
if err != nil { if err != nil {
continue // bad data, ignore and continue continue // bad data, ignore and continue
} }
zs = append(zs, base.Z{Message: msg, Score: score}) var nextProcessAt time.Time
if state == base.TaskStateScheduled || state == base.TaskStateRetry {
nextProcessAt = time.Unix(score, 0)
} }
return zs, nil var resBytes []byte
if len(resStr) > 0 {
resBytes = []byte(resStr)
}
infos = append(infos, &base.TaskInfo{
Message: msg,
State: state,
NextProcessAt: nextProcessAt,
Result: resBytes,
})
}
return infos, nil
} }
// RunAllScheduledTasks enqueues all scheduled tasks from the given queue // RunAllScheduledTasks enqueues all scheduled tasks from the given queue
@ -1132,6 +1253,20 @@ func (r *RDB) DeleteAllScheduledTasks(qname string) (int64, error) {
return n, nil return n, nil
} }
// DeleteAllCompletedTasks deletes all completed tasks from the given queue
// and returns the number of tasks deleted.
func (r *RDB) DeleteAllCompletedTasks(qname string) (int64, error) {
var op errors.Op = "rdb.DeleteAllCompletedTasks"
n, err := r.deleteAll(base.CompletedKey(qname), qname)
if errors.IsQueueNotFound(err) {
return 0, errors.E(op, errors.NotFound, err)
}
if err != nil {
return 0, errors.E(op, errors.Unknown, err)
}
return n, nil
}
// deleteAllCmd deletes tasks from the given zset. // deleteAllCmd deletes tasks from the given zset.
// //
// Input: // Input:
@ -1334,7 +1469,7 @@ return 1`)
// the queue is empty. // the queue is empty.
func (r *RDB) RemoveQueue(qname string, force bool) error { func (r *RDB) RemoveQueue(qname string, force bool) error {
var op errors.Op = "rdb.RemoveQueue" var op errors.Op = "rdb.RemoveQueue"
exists, err := r.client.SIsMember(context.Background(), base.AllQueues, qname).Result() exists, err := r.queueExists(qname)
if err != nil { if err != nil {
return err return err
} }

View File

@ -67,6 +67,7 @@ func TestCurrentStats(t *testing.T) {
scheduled map[string][]base.Z scheduled map[string][]base.Z
retry map[string][]base.Z retry map[string][]base.Z
archived map[string][]base.Z archived map[string][]base.Z
completed map[string][]base.Z
processed map[string]int processed map[string]int
failed map[string]int failed map[string]int
paused []string paused []string
@ -102,6 +103,11 @@ func TestCurrentStats(t *testing.T) {
"critical": {}, "critical": {},
"low": {}, "low": {},
}, },
completed: map[string][]base.Z{
"default": {},
"critical": {},
"low": {},
},
processed: map[string]int{ processed: map[string]int{
"default": 120, "default": 120,
"critical": 100, "critical": 100,
@ -123,6 +129,7 @@ func TestCurrentStats(t *testing.T) {
Scheduled: 2, Scheduled: 2,
Retry: 0, Retry: 0,
Archived: 0, Archived: 0,
Completed: 0,
Processed: 120, Processed: 120,
Failed: 2, Failed: 2,
Timestamp: now, Timestamp: now,
@ -157,6 +164,11 @@ func TestCurrentStats(t *testing.T) {
"critical": {}, "critical": {},
"low": {}, "low": {},
}, },
completed: map[string][]base.Z{
"default": {},
"critical": {},
"low": {},
},
processed: map[string]int{ processed: map[string]int{
"default": 120, "default": 120,
"critical": 100, "critical": 100,
@ -178,6 +190,7 @@ func TestCurrentStats(t *testing.T) {
Scheduled: 0, Scheduled: 0,
Retry: 0, Retry: 0,
Archived: 0, Archived: 0,
Completed: 0,
Processed: 100, Processed: 100,
Failed: 0, Failed: 0,
Timestamp: now, Timestamp: now,
@ -197,6 +210,7 @@ func TestCurrentStats(t *testing.T) {
h.SeedAllScheduledQueues(t, r.client, tc.scheduled) h.SeedAllScheduledQueues(t, r.client, tc.scheduled)
h.SeedAllRetryQueues(t, r.client, tc.retry) h.SeedAllRetryQueues(t, r.client, tc.retry)
h.SeedAllArchivedQueues(t, r.client, tc.archived) h.SeedAllArchivedQueues(t, r.client, tc.archived)
h.SeedAllCompletedQueues(t, r.client, tc.completed)
for qname, n := range tc.processed { for qname, n := range tc.processed {
processedKey := base.ProcessedKey(qname, now) processedKey := base.ProcessedKey(qname, now)
r.client.Set(context.Background(), processedKey, n, 0) r.client.Set(context.Background(), processedKey, n, 0)
@ -315,16 +329,19 @@ func TestGetTaskInfo(t *testing.T) {
r := setup(t) r := setup(t)
defer r.Close() defer r.Close()
now := time.Now()
fiveMinsFromNow := now.Add(5 * time.Minute)
oneHourFromNow := now.Add(1 * time.Hour)
twoHoursAgo := now.Add(-2 * time.Hour)
m1 := h.NewTaskMessageWithQueue("task1", nil, "default") m1 := h.NewTaskMessageWithQueue("task1", nil, "default")
m2 := h.NewTaskMessageWithQueue("task2", nil, "default") m2 := h.NewTaskMessageWithQueue("task2", nil, "default")
m3 := h.NewTaskMessageWithQueue("task3", nil, "custom") m3 := h.NewTaskMessageWithQueue("task3", nil, "custom")
m4 := h.NewTaskMessageWithQueue("task4", nil, "custom") m4 := h.NewTaskMessageWithQueue("task4", nil, "custom")
m5 := h.NewTaskMessageWithQueue("task5", nil, "custom") m5 := h.NewTaskMessageWithQueue("task5", nil, "custom")
m6 := h.NewTaskMessageWithQueue("task5", nil, "custom")
now := time.Now() m6.CompletedAt = twoHoursAgo.Unix()
fiveMinsFromNow := now.Add(5 * time.Minute) m6.Retention = int64((24 * time.Hour).Seconds())
oneHourFromNow := now.Add(1 * time.Hour)
twoHoursAgo := now.Add(-2 * time.Hour)
fixtures := struct { fixtures := struct {
active map[string][]*base.TaskMessage active map[string][]*base.TaskMessage
@ -332,6 +349,7 @@ func TestGetTaskInfo(t *testing.T) {
scheduled map[string][]base.Z scheduled map[string][]base.Z
retry map[string][]base.Z retry map[string][]base.Z
archived map[string][]base.Z archived map[string][]base.Z
completed map[string][]base.Z
}{ }{
active: map[string][]*base.TaskMessage{ active: map[string][]*base.TaskMessage{
"default": {m1}, "default": {m1},
@ -353,6 +371,10 @@ func TestGetTaskInfo(t *testing.T) {
"default": {}, "default": {},
"custom": {{Message: m4, Score: twoHoursAgo.Unix()}}, "custom": {{Message: m4, Score: twoHoursAgo.Unix()}},
}, },
completed: map[string][]base.Z{
"default": {},
"custom": {{Message: m6, Score: m6.CompletedAt + m6.Retention}},
},
} }
h.SeedAllActiveQueues(t, r.client, fixtures.active) h.SeedAllActiveQueues(t, r.client, fixtures.active)
@ -360,6 +382,11 @@ func TestGetTaskInfo(t *testing.T) {
h.SeedAllScheduledQueues(t, r.client, fixtures.scheduled) h.SeedAllScheduledQueues(t, r.client, fixtures.scheduled)
h.SeedAllRetryQueues(t, r.client, fixtures.retry) h.SeedAllRetryQueues(t, r.client, fixtures.retry)
h.SeedAllArchivedQueues(t, r.client, fixtures.archived) h.SeedAllArchivedQueues(t, r.client, fixtures.archived)
h.SeedAllCompletedQueues(t, r.client, fixtures.completed)
// Write result data for the completed task.
if err := r.client.HSet(context.Background(), base.TaskKey(m6.Queue, m6.ID), "result", "foobar").Err(); err != nil {
t.Fatalf("Failed to write result data under task key: %v", err)
}
tests := []struct { tests := []struct {
qname string qname string
@ -373,6 +400,7 @@ func TestGetTaskInfo(t *testing.T) {
Message: m1, Message: m1,
State: base.TaskStateActive, State: base.TaskStateActive,
NextProcessAt: time.Time{}, // zero value for N/A NextProcessAt: time.Time{}, // zero value for N/A
Result: nil,
}, },
}, },
{ {
@ -382,6 +410,7 @@ func TestGetTaskInfo(t *testing.T) {
Message: m2, Message: m2,
State: base.TaskStateScheduled, State: base.TaskStateScheduled,
NextProcessAt: fiveMinsFromNow, NextProcessAt: fiveMinsFromNow,
Result: nil,
}, },
}, },
{ {
@ -391,6 +420,7 @@ func TestGetTaskInfo(t *testing.T) {
Message: m3, Message: m3,
State: base.TaskStateRetry, State: base.TaskStateRetry,
NextProcessAt: oneHourFromNow, NextProcessAt: oneHourFromNow,
Result: nil,
}, },
}, },
{ {
@ -400,6 +430,7 @@ func TestGetTaskInfo(t *testing.T) {
Message: m4, Message: m4,
State: base.TaskStateArchived, State: base.TaskStateArchived,
NextProcessAt: time.Time{}, // zero value for N/A NextProcessAt: time.Time{}, // zero value for N/A
Result: nil,
}, },
}, },
{ {
@ -409,6 +440,17 @@ func TestGetTaskInfo(t *testing.T) {
Message: m5, Message: m5,
State: base.TaskStatePending, State: base.TaskStatePending,
NextProcessAt: now, NextProcessAt: now,
Result: nil,
},
},
{
qname: "custom",
id: m6.ID,
want: &base.TaskInfo{
Message: m6,
State: base.TaskStateCompleted,
NextProcessAt: time.Time{}, // zero value for N/A
Result: []byte("foobar"),
}, },
}, },
} }
@ -516,21 +558,24 @@ func TestListPending(t *testing.T) {
tests := []struct { tests := []struct {
pending map[string][]*base.TaskMessage pending map[string][]*base.TaskMessage
qname string qname string
want []*base.TaskMessage want []*base.TaskInfo
}{ }{
{ {
pending: map[string][]*base.TaskMessage{ pending: map[string][]*base.TaskMessage{
base.DefaultQueueName: {m1, m2}, base.DefaultQueueName: {m1, m2},
}, },
qname: base.DefaultQueueName, qname: base.DefaultQueueName,
want: []*base.TaskMessage{m1, m2}, want: []*base.TaskInfo{
{Message: m1, State: base.TaskStatePending, NextProcessAt: time.Now(), Result: nil},
{Message: m2, State: base.TaskStatePending, NextProcessAt: time.Now(), Result: nil},
},
}, },
{ {
pending: map[string][]*base.TaskMessage{ pending: map[string][]*base.TaskMessage{
base.DefaultQueueName: nil, base.DefaultQueueName: nil,
}, },
qname: base.DefaultQueueName, qname: base.DefaultQueueName,
want: []*base.TaskMessage(nil), want: []*base.TaskInfo(nil),
}, },
{ {
pending: map[string][]*base.TaskMessage{ pending: map[string][]*base.TaskMessage{
@ -539,7 +584,10 @@ func TestListPending(t *testing.T) {
"low": {m4}, "low": {m4},
}, },
qname: base.DefaultQueueName, qname: base.DefaultQueueName,
want: []*base.TaskMessage{m1, m2}, want: []*base.TaskInfo{
{Message: m1, State: base.TaskStatePending, NextProcessAt: time.Now(), Result: nil},
{Message: m2, State: base.TaskStatePending, NextProcessAt: time.Now(), Result: nil},
},
}, },
{ {
pending: map[string][]*base.TaskMessage{ pending: map[string][]*base.TaskMessage{
@ -548,7 +596,9 @@ func TestListPending(t *testing.T) {
"low": {m4}, "low": {m4},
}, },
qname: "critical", qname: "critical",
want: []*base.TaskMessage{m3}, want: []*base.TaskInfo{
{Message: m3, State: base.TaskStatePending, NextProcessAt: time.Now(), Result: nil},
},
}, },
} }
@ -562,7 +612,7 @@ func TestListPending(t *testing.T) {
t.Errorf("%s = %v, %v, want %v, nil", op, got, err, tc.want) t.Errorf("%s = %v, %v, want %v, nil", op, got, err, tc.want)
continue continue
} }
if diff := cmp.Diff(tc.want, got); diff != "" { if diff := cmp.Diff(tc.want, got, cmpopts.EquateApproxTime(2*time.Second)); diff != "" {
t.Errorf("%s = %v, %v, want %v, nil; (-want, +got)\n%s", op, got, err, tc.want, diff) t.Errorf("%s = %v, %v, want %v, nil; (-want, +got)\n%s", op, got, err, tc.want, diff)
continue continue
} }
@ -622,13 +672,13 @@ func TestListPendingPagination(t *testing.T) {
continue continue
} }
first := got[0] first := got[0].Message
if first.Type != tc.wantFirst { if first.Type != tc.wantFirst {
t.Errorf("%s; %s returned a list with first message %q, want %q", t.Errorf("%s; %s returned a list with first message %q, want %q",
tc.desc, op, first.Type, tc.wantFirst) tc.desc, op, first.Type, tc.wantFirst)
} }
last := got[len(got)-1] last := got[len(got)-1].Message
if last.Type != tc.wantLast { if last.Type != tc.wantLast {
t.Errorf("%s; %s returned a list with the last message %q, want %q", t.Errorf("%s; %s returned a list with the last message %q, want %q",
tc.desc, op, last.Type, tc.wantLast) tc.desc, op, last.Type, tc.wantLast)
@ -648,7 +698,7 @@ func TestListActive(t *testing.T) {
tests := []struct { tests := []struct {
inProgress map[string][]*base.TaskMessage inProgress map[string][]*base.TaskMessage
qname string qname string
want []*base.TaskMessage want []*base.TaskInfo
}{ }{
{ {
inProgress: map[string][]*base.TaskMessage{ inProgress: map[string][]*base.TaskMessage{
@ -657,14 +707,17 @@ func TestListActive(t *testing.T) {
"low": {m4}, "low": {m4},
}, },
qname: "default", qname: "default",
want: []*base.TaskMessage{m1, m2}, want: []*base.TaskInfo{
{Message: m1, State: base.TaskStateActive, NextProcessAt: time.Time{}, Result: nil},
{Message: m2, State: base.TaskStateActive, NextProcessAt: time.Time{}, Result: nil},
},
}, },
{ {
inProgress: map[string][]*base.TaskMessage{ inProgress: map[string][]*base.TaskMessage{
"default": {}, "default": {},
}, },
qname: "default", qname: "default",
want: []*base.TaskMessage(nil), want: []*base.TaskInfo(nil),
}, },
} }
@ -678,7 +731,7 @@ func TestListActive(t *testing.T) {
t.Errorf("%s = %v, %v, want %v, nil", op, got, err, tc.inProgress) t.Errorf("%s = %v, %v, want %v, nil", op, got, err, tc.inProgress)
continue continue
} }
if diff := cmp.Diff(tc.want, got); diff != "" { if diff := cmp.Diff(tc.want, got, cmpopts.EquateApproxTime(1*time.Second)); diff != "" {
t.Errorf("%s = %v, %v, want %v, nil; (-want, +got)\n%s", op, got, err, tc.want, diff) t.Errorf("%s = %v, %v, want %v, nil; (-want, +got)\n%s", op, got, err, tc.want, diff)
continue continue
} }
@ -728,13 +781,13 @@ func TestListActivePagination(t *testing.T) {
continue continue
} }
first := got[0] first := got[0].Message
if first.Type != tc.wantFirst { if first.Type != tc.wantFirst {
t.Errorf("%s; %s returned a list with first message %q, want %q", t.Errorf("%s; %s returned a list with first message %q, want %q",
tc.desc, op, first.Type, tc.wantFirst) tc.desc, op, first.Type, tc.wantFirst)
} }
last := got[len(got)-1] last := got[len(got)-1].Message
if last.Type != tc.wantLast { if last.Type != tc.wantLast {
t.Errorf("%s; %s returned a list with the last message %q, want %q", t.Errorf("%s; %s returned a list with the last message %q, want %q",
tc.desc, op, last.Type, tc.wantLast) tc.desc, op, last.Type, tc.wantLast)
@ -757,7 +810,7 @@ func TestListScheduled(t *testing.T) {
tests := []struct { tests := []struct {
scheduled map[string][]base.Z scheduled map[string][]base.Z
qname string qname string
want []base.Z want []*base.TaskInfo
}{ }{
{ {
scheduled: map[string][]base.Z{ scheduled: map[string][]base.Z{
@ -772,10 +825,10 @@ func TestListScheduled(t *testing.T) {
}, },
qname: "default", qname: "default",
// should be sorted by score in ascending order // should be sorted by score in ascending order
want: []base.Z{ want: []*base.TaskInfo{
{Message: m3, Score: p3.Unix()}, {Message: m3, NextProcessAt: p3, State: base.TaskStateScheduled, Result: nil},
{Message: m1, Score: p1.Unix()}, {Message: m1, NextProcessAt: p1, State: base.TaskStateScheduled, Result: nil},
{Message: m2, Score: p2.Unix()}, {Message: m2, NextProcessAt: p2, State: base.TaskStateScheduled, Result: nil},
}, },
}, },
{ {
@ -790,8 +843,8 @@ func TestListScheduled(t *testing.T) {
}, },
}, },
qname: "custom", qname: "custom",
want: []base.Z{ want: []*base.TaskInfo{
{Message: m4, Score: p4.Unix()}, {Message: m4, NextProcessAt: p4, State: base.TaskStateScheduled, Result: nil},
}, },
}, },
{ {
@ -799,7 +852,7 @@ func TestListScheduled(t *testing.T) {
"default": {}, "default": {},
}, },
qname: "default", qname: "default",
want: []base.Z(nil), want: []*base.TaskInfo(nil),
}, },
} }
@ -813,7 +866,7 @@ func TestListScheduled(t *testing.T) {
t.Errorf("%s = %v, %v, want %v, nil", op, got, err, tc.want) t.Errorf("%s = %v, %v, want %v, nil", op, got, err, tc.want)
continue continue
} }
if diff := cmp.Diff(tc.want, got, zScoreCmpOpt); diff != "" { if diff := cmp.Diff(tc.want, got, cmpopts.EquateApproxTime(1*time.Second)); diff != "" {
t.Errorf("%s = %v, %v, want %v, nil; (-want, +got)\n%s", op, got, err, tc.want, diff) t.Errorf("%s = %v, %v, want %v, nil; (-want, +got)\n%s", op, got, err, tc.want, diff)
continue continue
} }
@ -915,7 +968,7 @@ func TestListRetry(t *testing.T) {
tests := []struct { tests := []struct {
retry map[string][]base.Z retry map[string][]base.Z
qname string qname string
want []base.Z want []*base.TaskInfo
}{ }{
{ {
retry: map[string][]base.Z{ retry: map[string][]base.Z{
@ -928,9 +981,9 @@ func TestListRetry(t *testing.T) {
}, },
}, },
qname: "default", qname: "default",
want: []base.Z{ want: []*base.TaskInfo{
{Message: m1, Score: p1.Unix()}, {Message: m1, NextProcessAt: p1, State: base.TaskStateRetry, Result: nil},
{Message: m2, Score: p2.Unix()}, {Message: m2, NextProcessAt: p2, State: base.TaskStateRetry, Result: nil},
}, },
}, },
{ {
@ -944,8 +997,8 @@ func TestListRetry(t *testing.T) {
}, },
}, },
qname: "custom", qname: "custom",
want: []base.Z{ want: []*base.TaskInfo{
{Message: m3, Score: p3.Unix()}, {Message: m3, NextProcessAt: p3, State: base.TaskStateRetry, Result: nil},
}, },
}, },
{ {
@ -953,7 +1006,7 @@ func TestListRetry(t *testing.T) {
"default": {}, "default": {},
}, },
qname: "default", qname: "default",
want: []base.Z(nil), want: []*base.TaskInfo(nil),
}, },
} }
@ -967,7 +1020,7 @@ func TestListRetry(t *testing.T) {
t.Errorf("%s = %v, %v, want %v, nil", op, got, err, tc.want) t.Errorf("%s = %v, %v, want %v, nil", op, got, err, tc.want)
continue continue
} }
if diff := cmp.Diff(tc.want, got, zScoreCmpOpt); diff != "" { if diff := cmp.Diff(tc.want, got, cmpopts.EquateApproxTime(1*time.Second)); diff != "" {
t.Errorf("%s = %v, %v, want %v, nil; (-want, +got)\n%s", t.Errorf("%s = %v, %v, want %v, nil; (-want, +got)\n%s",
op, got, err, tc.want, diff) op, got, err, tc.want, diff)
continue continue
@ -1068,7 +1121,7 @@ func TestListArchived(t *testing.T) {
tests := []struct { tests := []struct {
archived map[string][]base.Z archived map[string][]base.Z
qname string qname string
want []base.Z want []*base.TaskInfo
}{ }{
{ {
archived: map[string][]base.Z{ archived: map[string][]base.Z{
@ -1081,9 +1134,9 @@ func TestListArchived(t *testing.T) {
}, },
}, },
qname: "default", qname: "default",
want: []base.Z{ want: []*base.TaskInfo{
{Message: m2, Score: f2.Unix()}, // FIXME: shouldn't be sorted in the other order? {Message: m2, NextProcessAt: time.Time{}, State: base.TaskStateArchived, Result: nil}, // FIXME: shouldn't be sorted in the other order?
{Message: m1, Score: f1.Unix()}, {Message: m1, NextProcessAt: time.Time{}, State: base.TaskStateArchived, Result: nil},
}, },
}, },
{ {
@ -1097,8 +1150,8 @@ func TestListArchived(t *testing.T) {
}, },
}, },
qname: "custom", qname: "custom",
want: []base.Z{ want: []*base.TaskInfo{
{Message: m3, Score: f3.Unix()}, {Message: m3, NextProcessAt: time.Time{}, State: base.TaskStateArchived, Result: nil},
}, },
}, },
{ {
@ -1106,7 +1159,7 @@ func TestListArchived(t *testing.T) {
"default": {}, "default": {},
}, },
qname: "default", qname: "default",
want: []base.Z(nil), want: []*base.TaskInfo(nil),
}, },
} }
@ -1115,12 +1168,12 @@ func TestListArchived(t *testing.T) {
h.SeedAllArchivedQueues(t, r.client, tc.archived) h.SeedAllArchivedQueues(t, r.client, tc.archived)
got, err := r.ListArchived(tc.qname, Pagination{Size: 20, Page: 0}) got, err := r.ListArchived(tc.qname, Pagination{Size: 20, Page: 0})
op := fmt.Sprintf("r.ListDead(%q, Pagination{Size: 20, Page: 0})", tc.qname) op := fmt.Sprintf("r.ListArchived(%q, Pagination{Size: 20, Page: 0})", tc.qname)
if err != nil { if err != nil {
t.Errorf("%s = %v, %v, want %v, nil", op, got, err, tc.want) t.Errorf("%s = %v, %v, want %v, nil", op, got, err, tc.want)
continue continue
} }
if diff := cmp.Diff(tc.want, got, zScoreCmpOpt); diff != "" { if diff := cmp.Diff(tc.want, got); diff != "" {
t.Errorf("%s = %v, %v, want %v, nil; (-want, +got)\n%s", t.Errorf("%s = %v, %v, want %v, nil; (-want, +got)\n%s",
op, got, err, tc.want, diff) op, got, err, tc.want, diff)
continue continue
@ -1156,7 +1209,148 @@ func TestListArchivedPagination(t *testing.T) {
for _, tc := range tests { for _, tc := range tests {
got, err := r.ListArchived(tc.qname, Pagination{Size: tc.size, Page: tc.page}) got, err := r.ListArchived(tc.qname, Pagination{Size: tc.size, Page: tc.page})
op := fmt.Sprintf("r.ListDead(Pagination{Size: %d, Page: %d})", op := fmt.Sprintf("r.ListArchived(Pagination{Size: %d, Page: %d})",
tc.size, tc.page)
if err != nil {
t.Errorf("%s; %s returned error %v", tc.desc, op, err)
continue
}
if len(got) != tc.wantSize {
t.Errorf("%s; %s returned list of size %d, want %d",
tc.desc, op, len(got), tc.wantSize)
continue
}
if tc.wantSize == 0 {
continue
}
first := got[0].Message
if first.Type != tc.wantFirst {
t.Errorf("%s; %s returned a list with first message %q, want %q",
tc.desc, op, first.Type, tc.wantFirst)
}
last := got[len(got)-1].Message
if last.Type != tc.wantLast {
t.Errorf("%s; %s returned a list with the last message %q, want %q",
tc.desc, op, last.Type, tc.wantLast)
}
}
}
func TestListCompleted(t *testing.T) {
r := setup(t)
defer r.Close()
msg1 := &base.TaskMessage{
ID: uuid.NewString(),
Type: "foo",
Queue: "default",
CompletedAt: time.Now().Add(-2 * time.Hour).Unix(),
}
msg2 := &base.TaskMessage{
ID: uuid.NewString(),
Type: "foo",
Queue: "default",
CompletedAt: time.Now().Add(-5 * time.Hour).Unix(),
}
msg3 := &base.TaskMessage{
ID: uuid.NewString(),
Type: "foo",
Queue: "custom",
CompletedAt: time.Now().Add(-5 * time.Hour).Unix(),
}
expireAt1 := time.Now().Add(3 * time.Hour)
expireAt2 := time.Now().Add(4 * time.Hour)
expireAt3 := time.Now().Add(5 * time.Hour)
tests := []struct {
completed map[string][]base.Z
qname string
want []*base.TaskInfo
}{
{
completed: map[string][]base.Z{
"default": {
{Message: msg1, Score: expireAt1.Unix()},
{Message: msg2, Score: expireAt2.Unix()},
},
"custom": {
{Message: msg3, Score: expireAt3.Unix()},
},
},
qname: "default",
want: []*base.TaskInfo{
{Message: msg1, NextProcessAt: time.Time{}, State: base.TaskStateCompleted, Result: nil},
{Message: msg2, NextProcessAt: time.Time{}, State: base.TaskStateCompleted, Result: nil},
},
},
{
completed: map[string][]base.Z{
"default": {
{Message: msg1, Score: expireAt1.Unix()},
{Message: msg2, Score: expireAt2.Unix()},
},
"custom": {
{Message: msg3, Score: expireAt3.Unix()},
},
},
qname: "custom",
want: []*base.TaskInfo{
{Message: msg3, NextProcessAt: time.Time{}, State: base.TaskStateCompleted, Result: nil},
},
},
}
for _, tc := range tests {
h.FlushDB(t, r.client) // clean up db before each test case
h.SeedAllCompletedQueues(t, r.client, tc.completed)
got, err := r.ListCompleted(tc.qname, Pagination{Size: 20, Page: 0})
op := fmt.Sprintf("r.ListCompleted(%q, Pagination{Size: 20, Page: 0})", tc.qname)
if err != nil {
t.Errorf("%s = %v, %v, want %v, nil", op, got, err, tc.want)
continue
}
if diff := cmp.Diff(tc.want, got); diff != "" {
t.Errorf("%s = %v, %v, want %v, nil; (-want, +got)\n%s",
op, got, err, tc.want, diff)
continue
}
}
}
func TestListCompletedPagination(t *testing.T) {
r := setup(t)
defer r.Close()
var entries []base.Z
for i := 0; i < 100; i++ {
msg := h.NewTaskMessage(fmt.Sprintf("task %d", i), nil)
entries = append(entries, base.Z{Message: msg, Score: int64(i)})
}
h.SeedCompletedQueue(t, r.client, entries, "default")
tests := []struct {
desc string
qname string
page int
size int
wantSize int
wantFirst string
wantLast string
}{
{"first page", "default", 0, 20, 20, "task 0", "task 19"},
{"second page", "default", 1, 20, 20, "task 20", "task 39"},
{"different page size", "default", 2, 30, 30, "task 60", "task 89"},
{"last page", "default", 3, 30, 10, "task 90", "task 99"},
{"out of range", "default", 4, 30, 0, "", ""},
}
for _, tc := range tests {
got, err := r.ListCompleted(tc.qname, Pagination{Size: tc.size, Page: tc.page})
op := fmt.Sprintf("r.ListCompleted(Pagination{Size: %d, Page: %d})",
tc.size, tc.page) tc.size, tc.page)
if err != nil { if err != nil {
t.Errorf("%s; %s returned error %v", tc.desc, op, err) t.Errorf("%s; %s returned error %v", tc.desc, op, err)
@ -1917,13 +2111,13 @@ func TestRunAllArchivedTasks(t *testing.T) {
got, err := r.RunAllArchivedTasks(tc.qname) got, err := r.RunAllArchivedTasks(tc.qname)
if err != nil { if err != nil {
t.Errorf("%s; r.RunAllDeadTasks(%q) = %v, %v; want %v, nil", t.Errorf("%s; r.RunAllArchivedTasks(%q) = %v, %v; want %v, nil",
tc.desc, tc.qname, got, err, tc.want) tc.desc, tc.qname, got, err, tc.want)
continue continue
} }
if got != tc.want { if got != tc.want {
t.Errorf("%s; r.RunAllDeadTasks(%q) = %v, %v; want %v, nil", t.Errorf("%s; r.RunAllArchivedTasks(%q) = %v, %v; want %v, nil",
tc.desc, tc.qname, got, err, tc.want) tc.desc, tc.qname, got, err, tc.want)
} }
@ -3322,10 +3516,10 @@ func TestDeleteAllArchivedTasks(t *testing.T) {
got, err := r.DeleteAllArchivedTasks(tc.qname) got, err := r.DeleteAllArchivedTasks(tc.qname)
if err != nil { if err != nil {
t.Errorf("r.DeleteAllDeadTasks(%q) returned error: %v", tc.qname, err) t.Errorf("r.DeleteAllArchivedTasks(%q) returned error: %v", tc.qname, err)
} }
if got != tc.want { if got != tc.want {
t.Errorf("r.DeleteAllDeadTasks(%q) = %d, nil, want %d, nil", tc.qname, got, tc.want) t.Errorf("r.DeleteAllArchivedTasks(%q) = %d, nil, want %d, nil", tc.qname, got, tc.want)
} }
for qname, want := range tc.wantArchived { for qname, want := range tc.wantArchived {
gotArchived := h.GetArchivedMessages(t, r.client, qname) gotArchived := h.GetArchivedMessages(t, r.client, qname)
@ -3336,6 +3530,76 @@ func TestDeleteAllArchivedTasks(t *testing.T) {
} }
} }
func newCompletedTaskMessage(qname, typename string, retention time.Duration, completedAt time.Time) *base.TaskMessage {
msg := h.NewTaskMessageWithQueue(typename, nil, qname)
msg.Retention = int64(retention.Seconds())
msg.CompletedAt = completedAt.Unix()
return msg
}
func TestDeleteAllCompletedTasks(t *testing.T) {
r := setup(t)
defer r.Close()
now := time.Now()
m1 := newCompletedTaskMessage("default", "task1", 30*time.Minute, now.Add(-2*time.Minute))
m2 := newCompletedTaskMessage("default", "task2", 30*time.Minute, now.Add(-5*time.Minute))
m3 := newCompletedTaskMessage("custom", "task3", 30*time.Minute, now.Add(-5*time.Minute))
tests := []struct {
completed map[string][]base.Z
qname string
want int64
wantCompleted map[string][]*base.TaskMessage
}{
{
completed: map[string][]base.Z{
"default": {
{Message: m1, Score: m1.CompletedAt + m1.Retention},
{Message: m2, Score: m2.CompletedAt + m2.Retention},
},
"custom": {
{Message: m3, Score: m2.CompletedAt + m3.Retention},
},
},
qname: "default",
want: 2,
wantCompleted: map[string][]*base.TaskMessage{
"default": {},
"custom": {m3},
},
},
{
completed: map[string][]base.Z{
"default": {},
},
qname: "default",
want: 0,
wantCompleted: map[string][]*base.TaskMessage{
"default": {},
},
},
}
for _, tc := range tests {
h.FlushDB(t, r.client) // clean up db before each test case
h.SeedAllCompletedQueues(t, r.client, tc.completed)
got, err := r.DeleteAllCompletedTasks(tc.qname)
if err != nil {
t.Errorf("r.DeleteAllCompletedTasks(%q) returned error: %v", tc.qname, err)
}
if got != tc.want {
t.Errorf("r.DeleteAllCompletedTasks(%q) = %d, nil, want %d, nil", tc.qname, got, tc.want)
}
for qname, want := range tc.wantCompleted {
gotCompleted := h.GetCompletedMessages(t, r.client, qname)
if diff := cmp.Diff(want, gotCompleted, h.SortMsgOpt); diff != "" {
t.Errorf("mismatch found in %q; (-want, +got)\n%s", base.CompletedKey(qname), diff)
}
}
}
}
func TestDeleteAllArchivedTasksWithUniqueKey(t *testing.T) { func TestDeleteAllArchivedTasksWithUniqueKey(t *testing.T) {
r := setup(t) r := setup(t)
defer r.Close() defer r.Close()
@ -3389,10 +3653,10 @@ func TestDeleteAllArchivedTasksWithUniqueKey(t *testing.T) {
got, err := r.DeleteAllArchivedTasks(tc.qname) got, err := r.DeleteAllArchivedTasks(tc.qname)
if err != nil { if err != nil {
t.Errorf("r.DeleteAllDeadTasks(%q) returned error: %v", tc.qname, err) t.Errorf("r.DeleteAllArchivedTasks(%q) returned error: %v", tc.qname, err)
} }
if got != tc.want { if got != tc.want {
t.Errorf("r.DeleteAllDeadTasks(%q) = %d, nil, want %d, nil", tc.qname, got, tc.want) t.Errorf("r.DeleteAllArchivedTasks(%q) = %d, nil, want %d, nil", tc.qname, got, tc.want)
} }
for qname, want := range tc.wantArchived { for qname, want := range tc.wantArchived {
gotArchived := h.GetArchivedMessages(t, r.client, qname) gotArchived := h.GetArchivedMessages(t, r.client, qname)

View File

@ -330,7 +330,7 @@ end
return redis.status_reply("OK") return redis.status_reply("OK")
`) `)
// Done removes the task from active queue to mark the task as done. // Done removes the task from active queue and deletes the task.
// It removes a uniqueness lock acquired by the task, if any. // It removes a uniqueness lock acquired by the task, if any.
func (r *RDB) Done(msg *base.TaskMessage) error { func (r *RDB) Done(msg *base.TaskMessage) error {
var op errors.Op = "rdb.Done" var op errors.Op = "rdb.Done"
@ -346,6 +346,7 @@ func (r *RDB) Done(msg *base.TaskMessage) error {
msg.ID, msg.ID,
expireAt.Unix(), expireAt.Unix(),
} }
// Note: We cannot pass empty unique key when running this script in redis-cluster.
if len(msg.UniqueKey) > 0 { if len(msg.UniqueKey) > 0 {
keys = append(keys, msg.UniqueKey) keys = append(keys, msg.UniqueKey)
return r.runScript(op, doneUniqueCmd, keys, argv...) return r.runScript(op, doneUniqueCmd, keys, argv...)
@ -353,6 +354,96 @@ func (r *RDB) Done(msg *base.TaskMessage) error {
return r.runScript(op, doneCmd, keys, argv...) return r.runScript(op, doneCmd, keys, argv...)
} }
// KEYS[1] -> asynq:{<qname>}:active
// KEYS[2] -> asynq:{<qname>}:deadlines
// KEYS[3] -> asynq:{<qname>}:completed
// KEYS[4] -> asynq:{<qname>}:t:<task_id>
// KEYS[5] -> asynq:{<qname>}:processed:<yyyy-mm-dd>
// ARGV[1] -> task ID
// ARGV[2] -> stats expiration timestamp
// ARGV[3] -> task exipration time in unix time
// ARGV[4] -> task message data
var markAsCompleteCmd = redis.NewScript(`
if redis.call("LREM", KEYS[1], 0, ARGV[1]) == 0 then
return redis.error_reply("NOT FOUND")
end
if redis.call("ZREM", KEYS[2], ARGV[1]) == 0 then
return redis.error_reply("NOT FOUND")
end
if redis.call("ZADD", KEYS[3], ARGV[3], ARGV[1]) ~= 1 then
redis.redis.error_reply("INTERNAL")
end
redis.call("HSET", KEYS[4], "msg", ARGV[4], "state", "completed")
local n = redis.call("INCR", KEYS[5])
if tonumber(n) == 1 then
redis.call("EXPIREAT", KEYS[5], ARGV[2])
end
return redis.status_reply("OK")
`)
// KEYS[1] -> asynq:{<qname>}:active
// KEYS[2] -> asynq:{<qname>}:deadlines
// KEYS[3] -> asynq:{<qname>}:completed
// KEYS[4] -> asynq:{<qname>}:t:<task_id>
// KEYS[5] -> asynq:{<qname>}:processed:<yyyy-mm-dd>
// KEYS[6] -> asynq:{<qname>}:unique:{<checksum>}
// ARGV[1] -> task ID
// ARGV[2] -> stats expiration timestamp
// ARGV[3] -> task exipration time in unix time
// ARGV[4] -> task message data
var markAsCompleteUniqueCmd = redis.NewScript(`
if redis.call("LREM", KEYS[1], 0, ARGV[1]) == 0 then
return redis.error_reply("NOT FOUND")
end
if redis.call("ZREM", KEYS[2], ARGV[1]) == 0 then
return redis.error_reply("NOT FOUND")
end
if redis.call("ZADD", KEYS[3], ARGV[3], ARGV[1]) ~= 1 then
redis.redis.error_reply("INTERNAL")
end
redis.call("HSET", KEYS[4], "msg", ARGV[4], "state", "completed")
local n = redis.call("INCR", KEYS[5])
if tonumber(n) == 1 then
redis.call("EXPIREAT", KEYS[5], ARGV[2])
end
if redis.call("GET", KEYS[6]) == ARGV[1] then
redis.call("DEL", KEYS[6])
end
return redis.status_reply("OK")
`)
// MarkAsComplete removes the task from active queue to mark the task as completed.
// It removes a uniqueness lock acquired by the task, if any.
func (r *RDB) MarkAsComplete(msg *base.TaskMessage) error {
var op errors.Op = "rdb.MarkAsComplete"
now := time.Now()
statsExpireAt := now.Add(statsTTL)
msg.CompletedAt = now.Unix()
encoded, err := base.EncodeMessage(msg)
if err != nil {
return errors.E(op, errors.Unknown, fmt.Sprintf("cannot encode message: %v", err))
}
keys := []string{
base.ActiveKey(msg.Queue),
base.DeadlinesKey(msg.Queue),
base.CompletedKey(msg.Queue),
base.TaskKey(msg.Queue, msg.ID),
base.ProcessedKey(msg.Queue, now),
}
argv := []interface{}{
msg.ID,
statsExpireAt.Unix(),
now.Unix() + msg.Retention,
encoded,
}
// Note: We cannot pass empty unique key when running this script in redis-cluster.
if len(msg.UniqueKey) > 0 {
keys = append(keys, msg.UniqueKey)
return r.runScript(op, markAsCompleteUniqueCmd, keys, argv...)
}
return r.runScript(op, markAsCompleteCmd, keys, argv...)
}
// KEYS[1] -> asynq:{<qname>}:active // KEYS[1] -> asynq:{<qname>}:active
// KEYS[2] -> asynq:{<qname>}:deadlines // KEYS[2] -> asynq:{<qname>}:deadlines
// KEYS[3] -> asynq:{<qname>}:pending // KEYS[3] -> asynq:{<qname>}:pending
@ -703,6 +794,57 @@ func (r *RDB) forwardAll(qname string) (err error) {
return nil return nil
} }
// KEYS[1] -> asynq:{<qname>}:completed
// ARGV[1] -> current time in unix time
// ARGV[2] -> task key prefix
// ARGV[3] -> batch size (i.e. maximum number of tasks to delete)
//
// Returns the number of tasks deleted.
var deleteExpiredCompletedTasksCmd = redis.NewScript(`
local ids = redis.call("ZRANGEBYSCORE", KEYS[1], "-inf", ARGV[1], "LIMIT", 0, tonumber(ARGV[3]))
for _, id in ipairs(ids) do
redis.call("DEL", ARGV[2] .. id)
redis.call("ZREM", KEYS[1], id)
end
return table.getn(ids)`)
// DeleteExpiredCompletedTasks checks for any expired tasks in the given queue's completed set,
// and delete all expired tasks.
func (r *RDB) DeleteExpiredCompletedTasks(qname string) error {
// Note: Do this operation in fix batches to prevent long running script.
const batchSize = 100
for {
n, err := r.deleteExpiredCompletedTasks(qname, batchSize)
if err != nil {
return err
}
if n == 0 {
return nil
}
}
}
// deleteExpiredCompletedTasks runs the lua script to delete expired deleted task with the specified
// batch size. It reports the number of tasks deleted.
func (r *RDB) deleteExpiredCompletedTasks(qname string, batchSize int) (int64, error) {
var op errors.Op = "rdb.DeleteExpiredCompletedTasks"
keys := []string{base.CompletedKey(qname)}
argv := []interface{}{
time.Now().Unix(),
base.TaskKeyPrefix(qname),
batchSize,
}
res, err := deleteExpiredCompletedTasksCmd.Run(context.Background(), r.client, keys, argv...).Result()
if err != nil {
return 0, errors.E(op, errors.Internal, fmt.Sprintf("redis eval error: %v", err))
}
n, ok := res.(int64)
if !ok {
return 0, errors.E(op, errors.Internal, fmt.Sprintf("unexpected return value from Lua script: %v", res))
}
return n, nil
}
// KEYS[1] -> asynq:{<qname>}:deadlines // KEYS[1] -> asynq:{<qname>}:deadlines
// ARGV[1] -> deadline in unix time // ARGV[1] -> deadline in unix time
// ARGV[2] -> task key prefix // ARGV[2] -> task key prefix
@ -910,3 +1052,13 @@ func (r *RDB) ClearSchedulerHistory(entryID string) error {
} }
return nil return nil
} }
// WriteResult writes the given result data for the specified task.
func (r *RDB) WriteResult(qname, taskID string, data []byte) (int, error) {
var op errors.Op = "rdb.WriteResult"
taskKey := base.TaskKey(qname, taskID)
if err := r.client.HSet(context.Background(), taskKey, "result", data).Err(); err != nil {
return 0, errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "hset", Err: err})
}
return len(data), nil
}

View File

@ -677,17 +677,17 @@ func TestDone(t *testing.T) {
Payload: nil, Payload: nil,
Timeout: 1800, Timeout: 1800,
Deadline: 0, Deadline: 0,
UniqueKey: "asynq:{default}:unique:reindex:nil", UniqueKey: "asynq:{default}:unique:b0804ec967f48520697662a204f5fe72",
Queue: "default", Queue: "default",
} }
t1Deadline := now.Unix() + t1.Timeout t1Deadline := now.Unix() + t1.Timeout
t2Deadline := t2.Deadline t2Deadline := t2.Deadline
t3Deadline := now.Unix() + t3.Deadline t3Deadline := now.Unix() + t3.Timeout
tests := []struct { tests := []struct {
desc string desc string
active map[string][]*base.TaskMessage // initial state of the active list active map[string][]*base.TaskMessage // initial state of the active list
deadlines map[string][]base.Z // initial state of deadlines set deadlines map[string][]base.Z // initial state of the deadlines set
target *base.TaskMessage // task to remove target *base.TaskMessage // task to remove
wantActive map[string][]*base.TaskMessage // final state of the active list wantActive map[string][]*base.TaskMessage // final state of the active list
wantDeadlines map[string][]base.Z // final state of the deadline set wantDeadlines map[string][]base.Z // final state of the deadline set
@ -804,6 +804,201 @@ func TestDone(t *testing.T) {
} }
} }
func TestMarkAsComplete(t *testing.T) {
r := setup(t)
defer r.Close()
now := time.Now()
t1 := &base.TaskMessage{
ID: uuid.NewString(),
Type: "send_email",
Payload: nil,
Timeout: 1800,
Deadline: 0,
Queue: "default",
Retention: 3600,
}
t2 := &base.TaskMessage{
ID: uuid.NewString(),
Type: "export_csv",
Payload: nil,
Timeout: 0,
Deadline: now.Add(2 * time.Hour).Unix(),
Queue: "custom",
Retention: 7200,
}
t3 := &base.TaskMessage{
ID: uuid.NewString(),
Type: "reindex",
Payload: nil,
Timeout: 1800,
Deadline: 0,
UniqueKey: "asynq:{default}:unique:b0804ec967f48520697662a204f5fe72",
Queue: "default",
Retention: 1800,
}
t1Deadline := now.Unix() + t1.Timeout
t2Deadline := t2.Deadline
t3Deadline := now.Unix() + t3.Timeout
tests := []struct {
desc string
active map[string][]*base.TaskMessage // initial state of the active list
deadlines map[string][]base.Z // initial state of the deadlines set
completed map[string][]base.Z // initial state of the completed set
target *base.TaskMessage // task to mark as completed
wantActive map[string][]*base.TaskMessage // final state of the active list
wantDeadlines map[string][]base.Z // final state of the deadline set
wantCompleted func(completedAt time.Time) map[string][]base.Z // final state of the completed set
}{
{
desc: "select a message from the correct queue",
active: map[string][]*base.TaskMessage{
"default": {t1},
"custom": {t2},
},
deadlines: map[string][]base.Z{
"default": {{Message: t1, Score: t1Deadline}},
"custom": {{Message: t2, Score: t2Deadline}},
},
completed: map[string][]base.Z{
"default": {},
"custom": {},
},
target: t1,
wantActive: map[string][]*base.TaskMessage{
"default": {},
"custom": {t2},
},
wantDeadlines: map[string][]base.Z{
"default": {},
"custom": {{Message: t2, Score: t2Deadline}},
},
wantCompleted: func(completedAt time.Time) map[string][]base.Z {
return map[string][]base.Z{
"default": {{Message: h.TaskMessageWithCompletedAt(*t1, completedAt), Score: completedAt.Unix() + t1.Retention}},
"custom": {},
}
},
},
{
desc: "with one queue",
active: map[string][]*base.TaskMessage{
"default": {t1},
},
deadlines: map[string][]base.Z{
"default": {{Message: t1, Score: t1Deadline}},
},
completed: map[string][]base.Z{
"default": {},
},
target: t1,
wantActive: map[string][]*base.TaskMessage{
"default": {},
},
wantDeadlines: map[string][]base.Z{
"default": {},
},
wantCompleted: func(completedAt time.Time) map[string][]base.Z {
return map[string][]base.Z{
"default": {{Message: h.TaskMessageWithCompletedAt(*t1, completedAt), Score: completedAt.Unix() + t1.Retention}},
}
},
},
{
desc: "with multiple messages in a queue",
active: map[string][]*base.TaskMessage{
"default": {t1, t3},
"custom": {t2},
},
deadlines: map[string][]base.Z{
"default": {{Message: t1, Score: t1Deadline}, {Message: t3, Score: t3Deadline}},
"custom": {{Message: t2, Score: t2Deadline}},
},
completed: map[string][]base.Z{
"default": {},
"custom": {},
},
target: t3,
wantActive: map[string][]*base.TaskMessage{
"default": {t1},
"custom": {t2},
},
wantDeadlines: map[string][]base.Z{
"default": {{Message: t1, Score: t1Deadline}},
"custom": {{Message: t2, Score: t2Deadline}},
},
wantCompleted: func(completedAt time.Time) map[string][]base.Z {
return map[string][]base.Z{
"default": {{Message: h.TaskMessageWithCompletedAt(*t3, completedAt), Score: completedAt.Unix() + t3.Retention}},
"custom": {},
}
},
},
}
for _, tc := range tests {
h.FlushDB(t, r.client) // clean up db before each test case
h.SeedAllDeadlines(t, r.client, tc.deadlines)
h.SeedAllActiveQueues(t, r.client, tc.active)
h.SeedAllCompletedQueues(t, r.client, tc.completed)
for _, msgs := range tc.active {
for _, msg := range msgs {
// Set uniqueness lock if unique key is present.
if len(msg.UniqueKey) > 0 {
err := r.client.SetNX(context.Background(), msg.UniqueKey, msg.ID, time.Minute).Err()
if err != nil {
t.Fatal(err)
}
}
}
}
completedAt := time.Now()
err := r.MarkAsComplete(tc.target)
if err != nil {
t.Errorf("%s; (*RDB).MarkAsCompleted(task) = %v, want nil", tc.desc, err)
continue
}
for queue, want := range tc.wantActive {
gotActive := h.GetActiveMessages(t, r.client, queue)
if diff := cmp.Diff(want, gotActive, h.SortMsgOpt); diff != "" {
t.Errorf("%s; mismatch found in %q: (-want, +got):\n%s", tc.desc, base.ActiveKey(queue), diff)
continue
}
}
for queue, want := range tc.wantDeadlines {
gotDeadlines := h.GetDeadlinesEntries(t, r.client, queue)
if diff := cmp.Diff(want, gotDeadlines, h.SortZSetEntryOpt); diff != "" {
t.Errorf("%s; mismatch found in %q: (-want, +got):\n%s", tc.desc, base.DeadlinesKey(queue), diff)
continue
}
}
for queue, want := range tc.wantCompleted(completedAt) {
gotCompleted := h.GetCompletedEntries(t, r.client, queue)
if diff := cmp.Diff(want, gotCompleted, h.SortZSetEntryOpt); diff != "" {
t.Errorf("%s; mismatch found in %q: (-want, +got):\n%s", tc.desc, base.CompletedKey(queue), diff)
continue
}
}
processedKey := base.ProcessedKey(tc.target.Queue, time.Now())
gotProcessed := r.client.Get(context.Background(), processedKey).Val()
if gotProcessed != "1" {
t.Errorf("%s; GET %q = %q, want 1", tc.desc, processedKey, gotProcessed)
}
gotTTL := r.client.TTL(context.Background(), processedKey).Val()
if gotTTL > statsTTL {
t.Errorf("%s; TTL %q = %v, want less than or equal to %v", tc.desc, processedKey, gotTTL, statsTTL)
}
if len(tc.target.UniqueKey) > 0 && r.client.Exists(context.Background(), tc.target.UniqueKey).Val() != 0 {
t.Errorf("%s; Uniqueness lock %q still exists", tc.desc, tc.target.UniqueKey)
}
}
}
func TestRequeue(t *testing.T) { func TestRequeue(t *testing.T) {
r := setup(t) r := setup(t)
defer r.Close() defer r.Close()
@ -1887,6 +2082,93 @@ func TestForwardIfReady(t *testing.T) {
} }
} }
func newCompletedTask(qname, typename string, payload []byte, completedAt time.Time) *base.TaskMessage {
msg := h.NewTaskMessageWithQueue(typename, payload, qname)
msg.CompletedAt = completedAt.Unix()
return msg
}
func TestDeleteExpiredCompletedTasks(t *testing.T) {
r := setup(t)
defer r.Close()
now := time.Now()
secondAgo := now.Add(-time.Second)
hourFromNow := now.Add(time.Hour)
hourAgo := now.Add(-time.Hour)
minuteAgo := now.Add(-time.Minute)
t1 := newCompletedTask("default", "task1", nil, hourAgo)
t2 := newCompletedTask("default", "task2", nil, minuteAgo)
t3 := newCompletedTask("default", "task3", nil, secondAgo)
t4 := newCompletedTask("critical", "critical_task", nil, hourAgo)
t5 := newCompletedTask("low", "low_priority_task", nil, hourAgo)
tests := []struct {
desc string
completed map[string][]base.Z
qname string
wantCompleted map[string][]base.Z
}{
{
desc: "deletes expired task from default queue",
completed: map[string][]base.Z{
"default": {
{Message: t1, Score: secondAgo.Unix()},
{Message: t2, Score: hourFromNow.Unix()},
{Message: t3, Score: now.Unix()},
},
},
qname: "default",
wantCompleted: map[string][]base.Z{
"default": {
{Message: t2, Score: hourFromNow.Unix()},
},
},
},
{
desc: "deletes expired task from specified queue",
completed: map[string][]base.Z{
"default": {
{Message: t2, Score: secondAgo.Unix()},
},
"critical": {
{Message: t4, Score: secondAgo.Unix()},
},
"low": {
{Message: t5, Score: now.Unix()},
},
},
qname: "critical",
wantCompleted: map[string][]base.Z{
"default": {
{Message: t2, Score: secondAgo.Unix()},
},
"critical": {},
"low": {
{Message: t5, Score: now.Unix()},
},
},
},
}
for _, tc := range tests {
h.FlushDB(t, r.client)
h.SeedAllCompletedQueues(t, r.client, tc.completed)
if err := r.DeleteExpiredCompletedTasks(tc.qname); err != nil {
t.Errorf("DeleteExpiredCompletedTasks(%q) failed: %v", tc.qname, err)
continue
}
for qname, want := range tc.wantCompleted {
got := h.GetCompletedEntries(t, r.client, qname)
if diff := cmp.Diff(want, got, h.SortZSetEntryOpt); diff != "" {
t.Errorf("%s: diff found in %q completed set: want=%v, got=%v\n%s", tc.desc, qname, want, got, diff)
}
}
}
}
func TestListDeadlineExceeded(t *testing.T) { func TestListDeadlineExceeded(t *testing.T) {
t1 := h.NewTaskMessageWithQueue("task1", nil, "default") t1 := h.NewTaskMessageWithQueue("task1", nil, "default")
t2 := h.NewTaskMessageWithQueue("task2", nil, "default") t2 := h.NewTaskMessageWithQueue("task2", nil, "default")
@ -2291,3 +2573,39 @@ func TestCancelationPubSub(t *testing.T) {
} }
mu.Unlock() mu.Unlock()
} }
func TestWriteResult(t *testing.T) {
r := setup(t)
defer r.Close()
tests := []struct {
qname string
taskID string
data []byte
}{
{
qname: "default",
taskID: uuid.NewString(),
data: []byte("hello"),
},
}
for _, tc := range tests {
h.FlushDB(t, r.client)
n, err := r.WriteResult(tc.qname, tc.taskID, tc.data)
if err != nil {
t.Errorf("WriteResult failed: %v", err)
continue
}
if n != len(tc.data) {
t.Errorf("WriteResult returned %d, want %d", n, len(tc.data))
}
taskKey := base.TaskKey(tc.qname, tc.taskID)
got := r.client.HGet(context.Background(), taskKey, "result").Val()
if got != string(tc.data) {
t.Errorf("`result` field under %q key is set to %q, want %q", taskKey, got, string(tc.data))
}
}
}

View File

@ -81,6 +81,15 @@ func (tb *TestBroker) Done(msg *base.TaskMessage) error {
return tb.real.Done(msg) return tb.real.Done(msg)
} }
func (tb *TestBroker) MarkAsComplete(msg *base.TaskMessage) error {
tb.mu.Lock()
defer tb.mu.Unlock()
if tb.sleeping {
return errRedisDown
}
return tb.real.MarkAsComplete(msg)
}
func (tb *TestBroker) Requeue(msg *base.TaskMessage) error { func (tb *TestBroker) Requeue(msg *base.TaskMessage) error {
tb.mu.Lock() tb.mu.Lock()
defer tb.mu.Unlock() defer tb.mu.Unlock()
@ -135,6 +144,15 @@ func (tb *TestBroker) ForwardIfReady(qnames ...string) error {
return tb.real.ForwardIfReady(qnames...) return tb.real.ForwardIfReady(qnames...)
} }
func (tb *TestBroker) DeleteExpiredCompletedTasks(qname string) error {
tb.mu.Lock()
defer tb.mu.Unlock()
if tb.sleeping {
return errRedisDown
}
return tb.real.DeleteExpiredCompletedTasks(qname)
}
func (tb *TestBroker) ListDeadlineExceeded(deadline time.Time, qnames ...string) ([]*base.TaskMessage, error) { func (tb *TestBroker) ListDeadlineExceeded(deadline time.Time, qnames ...string) ([]*base.TaskMessage, error) {
tb.mu.Lock() tb.mu.Lock()
defer tb.mu.Unlock() defer tb.mu.Unlock()
@ -180,6 +198,15 @@ func (tb *TestBroker) PublishCancelation(id string) error {
return tb.real.PublishCancelation(id) return tb.real.PublishCancelation(id)
} }
func (tb *TestBroker) WriteResult(qname, id string, data []byte) (int, error) {
tb.mu.Lock()
defer tb.mu.Unlock()
if tb.sleeping {
return 0, errRedisDown
}
return tb.real.WriteResult(qname, id, data)
}
func (tb *TestBroker) Ping() error { func (tb *TestBroker) Ping() error {
tb.mu.Lock() tb.mu.Lock()
defer tb.mu.Unlock() defer tb.mu.Unlock()

81
janitor.go Normal file
View File

@ -0,0 +1,81 @@
// Copyright 2021 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
package asynq
import (
"sync"
"time"
"github.com/hibiken/asynq/internal/base"
"github.com/hibiken/asynq/internal/log"
)
// A janitor is responsible for deleting expired completed tasks from the specified
// queues. It periodically checks for any expired tasks in the completed set, and
// deletes them.
type janitor struct {
logger *log.Logger
broker base.Broker
// channel to communicate back to the long running "janitor" goroutine.
done chan struct{}
// list of queue names to check.
queues []string
// average interval between checks.
avgInterval time.Duration
}
type janitorParams struct {
logger *log.Logger
broker base.Broker
queues []string
interval time.Duration
}
func newJanitor(params janitorParams) *janitor {
return &janitor{
logger: params.logger,
broker: params.broker,
done: make(chan struct{}),
queues: params.queues,
avgInterval: params.interval,
}
}
func (j *janitor) shutdown() {
j.logger.Debug("Janitor shutting down...")
// Signal the janitor goroutine to stop.
j.done <- struct{}{}
}
// start starts the "janitor" goroutine.
func (j *janitor) start(wg *sync.WaitGroup) {
wg.Add(1)
timer := time.NewTimer(j.avgInterval) // randomize this interval with margin of 1s
go func() {
defer wg.Done()
for {
select {
case <-j.done:
j.logger.Debug("Janitor done")
return
case <-timer.C:
j.exec()
timer.Reset(j.avgInterval)
}
}
}()
}
func (j *janitor) exec() {
for _, qname := range j.queues {
if err := j.broker.DeleteExpiredCompletedTasks(qname); err != nil {
j.logger.Errorf("Could not delete expired completed tasks from queue %q: %v",
qname, err)
}
}
}

89
janitor_test.go Normal file
View File

@ -0,0 +1,89 @@
// Copyright 2021 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
package asynq
import (
"sync"
"testing"
"time"
"github.com/google/go-cmp/cmp"
h "github.com/hibiken/asynq/internal/asynqtest"
"github.com/hibiken/asynq/internal/base"
"github.com/hibiken/asynq/internal/rdb"
)
func newCompletedTask(qname, tasktype string, payload []byte, completedAt time.Time) *base.TaskMessage {
msg := h.NewTaskMessageWithQueue(tasktype, payload, qname)
msg.CompletedAt = completedAt.Unix()
return msg
}
func TestJanitor(t *testing.T) {
r := setup(t)
defer r.Close()
rdbClient := rdb.NewRDB(r)
const interval = 1 * time.Second
janitor := newJanitor(janitorParams{
logger: testLogger,
broker: rdbClient,
queues: []string{"default", "custom"},
interval: interval,
})
now := time.Now()
hourAgo := now.Add(-1 * time.Hour)
minuteAgo := now.Add(-1 * time.Minute)
halfHourAgo := now.Add(-30 * time.Minute)
halfHourFromNow := now.Add(30 * time.Minute)
fiveMinFromNow := now.Add(5 * time.Minute)
msg1 := newCompletedTask("default", "task1", nil, hourAgo)
msg2 := newCompletedTask("default", "task2", nil, minuteAgo)
msg3 := newCompletedTask("custom", "task3", nil, hourAgo)
msg4 := newCompletedTask("custom", "task4", nil, minuteAgo)
tests := []struct {
completed map[string][]base.Z // initial completed sets
wantCompleted map[string][]base.Z // expected completed sets after janitor runs
}{
{
completed: map[string][]base.Z{
"default": {
{Message: msg1, Score: halfHourAgo.Unix()},
{Message: msg2, Score: fiveMinFromNow.Unix()},
},
"custom": {
{Message: msg3, Score: halfHourFromNow.Unix()},
{Message: msg4, Score: minuteAgo.Unix()},
},
},
wantCompleted: map[string][]base.Z{
"default": {
{Message: msg2, Score: fiveMinFromNow.Unix()},
},
"custom": {
{Message: msg3, Score: halfHourFromNow.Unix()},
},
},
},
}
for _, tc := range tests {
h.FlushDB(t, r)
h.SeedAllCompletedQueues(t, r, tc.completed)
var wg sync.WaitGroup
janitor.start(&wg)
time.Sleep(2 * interval) // make sure to let janitor run at least one time
janitor.shutdown()
for qname, want := range tc.wantCompleted {
got := h.GetCompletedEntries(t, r, qname)
if diff := cmp.Diff(want, got, h.SortZSetEntryOpt); diff != "" {
t.Errorf("diff found in %q after running janitor: (-want, +got)\n%s", base.CompletedKey(qname), diff)
}
}
}
}

View File

@ -201,14 +201,24 @@ func (p *processor) exec() {
select { select {
case <-ctx.Done(): case <-ctx.Done():
// already canceled (e.g. deadline exceeded). // already canceled (e.g. deadline exceeded).
p.retryOrArchive(ctx, msg, ctx.Err()) p.handleFailedMessage(ctx, msg, ctx.Err())
return return
default: default:
} }
resCh := make(chan error, 1) resCh := make(chan error, 1)
go func() { go func() {
resCh <- p.perform(ctx, NewTask(msg.Type, msg.Payload)) task := newTask(
msg.Type,
msg.Payload,
&ResultWriter{
id: msg.ID,
qname: msg.Queue,
broker: p.broker,
ctx: ctx,
},
)
resCh <- p.perform(ctx, task)
}() }()
select { select {
@ -218,18 +228,14 @@ func (p *processor) exec() {
p.requeue(msg) p.requeue(msg)
return return
case <-ctx.Done(): case <-ctx.Done():
p.retryOrArchive(ctx, msg, ctx.Err()) p.handleFailedMessage(ctx, msg, ctx.Err())
return return
case resErr := <-resCh: case resErr := <-resCh:
// Note: One of three things should happen.
// 1) Done -> Removes the message from Active
// 2) Retry -> Removes the message from Active & Adds the message to Retry
// 3) Archive -> Removes the message from Active & Adds the message to archive
if resErr != nil { if resErr != nil {
p.retryOrArchive(ctx, msg, resErr) p.handleFailedMessage(ctx, msg, resErr)
return return
} }
p.markAsDone(ctx, msg) p.handleSucceededMessage(ctx, msg)
} }
}() }()
} }
@ -244,6 +250,34 @@ func (p *processor) requeue(msg *base.TaskMessage) {
} }
} }
func (p *processor) handleSucceededMessage(ctx context.Context, msg *base.TaskMessage) {
if msg.Retention > 0 {
p.markAsComplete(ctx, msg)
} else {
p.markAsDone(ctx, msg)
}
}
func (p *processor) markAsComplete(ctx context.Context, msg *base.TaskMessage) {
err := p.broker.MarkAsComplete(msg)
if err != nil {
errMsg := fmt.Sprintf("Could not move task id=%s type=%q from %q to %q: %+v",
msg.ID, msg.Type, base.ActiveKey(msg.Queue), base.CompletedKey(msg.Queue), err)
deadline, ok := ctx.Deadline()
if !ok {
panic("asynq: internal error: missing deadline in context")
}
p.logger.Warnf("%s; Will retry syncing", errMsg)
p.syncRequestCh <- &syncRequest{
fn: func() error {
return p.broker.MarkAsComplete(msg)
},
errMsg: errMsg,
deadline: deadline,
}
}
}
func (p *processor) markAsDone(ctx context.Context, msg *base.TaskMessage) { func (p *processor) markAsDone(ctx context.Context, msg *base.TaskMessage) {
err := p.broker.Done(msg) err := p.broker.Done(msg)
if err != nil { if err != nil {
@ -267,7 +301,7 @@ func (p *processor) markAsDone(ctx context.Context, msg *base.TaskMessage) {
// the task should not be retried and should be archived instead. // the task should not be retried and should be archived instead.
var SkipRetry = errors.New("skip retry for the task") var SkipRetry = errors.New("skip retry for the task")
func (p *processor) retryOrArchive(ctx context.Context, msg *base.TaskMessage, err error) { func (p *processor) handleFailedMessage(ctx context.Context, msg *base.TaskMessage, err error) {
if p.errHandler != nil { if p.errHandler != nil {
p.errHandler.HandleError(ctx, NewTask(msg.Type, msg.Payload), err) p.errHandler.HandleError(ctx, NewTask(msg.Type, msg.Payload), err)
} }

View File

@ -14,11 +14,18 @@ import (
"time" "time"
"github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
h "github.com/hibiken/asynq/internal/asynqtest" h "github.com/hibiken/asynq/internal/asynqtest"
"github.com/hibiken/asynq/internal/base" "github.com/hibiken/asynq/internal/base"
"github.com/hibiken/asynq/internal/rdb" "github.com/hibiken/asynq/internal/rdb"
) )
var taskCmpOpts = []cmp.Option{
sortTaskOpt, // sort the tasks
cmp.AllowUnexported(Task{}), // allow typename, payload fields to be compared
cmpopts.IgnoreFields(Task{}, "opts", "w"), // ignore opts, w fields
}
// fakeHeartbeater receives from starting and finished channels and do nothing. // fakeHeartbeater receives from starting and finished channels and do nothing.
func fakeHeartbeater(starting <-chan *workerInfo, finished <-chan *base.TaskMessage, done <-chan struct{}) { func fakeHeartbeater(starting <-chan *workerInfo, finished <-chan *base.TaskMessage, done <-chan struct{}) {
for { for {
@ -42,6 +49,34 @@ func fakeSyncer(syncCh <-chan *syncRequest, done <-chan struct{}) {
} }
} }
// Returns a processor instance configured for testing purpose.
func newProcessorForTest(t *testing.T, r *rdb.RDB, h Handler) *processor {
starting := make(chan *workerInfo)
finished := make(chan *base.TaskMessage)
syncCh := make(chan *syncRequest)
done := make(chan struct{})
t.Cleanup(func() { close(done) })
go fakeHeartbeater(starting, finished, done)
go fakeSyncer(syncCh, done)
p := newProcessor(processorParams{
logger: testLogger,
broker: r,
retryDelayFunc: DefaultRetryDelayFunc,
isFailureFunc: defaultIsFailureFunc,
syncCh: syncCh,
cancelations: base.NewCancelations(),
concurrency: 10,
queues: defaultQueueConfig,
strictPriority: false,
errHandler: nil,
shutdownTimeout: defaultShutdownTimeout,
starting: starting,
finished: finished,
})
p.handler = h
return p
}
func TestProcessorSuccessWithSingleQueue(t *testing.T) { func TestProcessorSuccessWithSingleQueue(t *testing.T) {
r := setup(t) r := setup(t)
defer r.Close() defer r.Close()
@ -87,29 +122,7 @@ func TestProcessorSuccessWithSingleQueue(t *testing.T) {
processed = append(processed, task) processed = append(processed, task)
return nil return nil
} }
starting := make(chan *workerInfo) p := newProcessorForTest(t, rdbClient, HandlerFunc(handler))
finished := make(chan *base.TaskMessage)
syncCh := make(chan *syncRequest)
done := make(chan struct{})
defer func() { close(done) }()
go fakeHeartbeater(starting, finished, done)
go fakeSyncer(syncCh, done)
p := newProcessor(processorParams{
logger: testLogger,
broker: rdbClient,
retryDelayFunc: DefaultRetryDelayFunc,
isFailureFunc: defaultIsFailureFunc,
syncCh: syncCh,
cancelations: base.NewCancelations(),
concurrency: 10,
queues: defaultQueueConfig,
strictPriority: false,
errHandler: nil,
shutdownTimeout: defaultShutdownTimeout,
starting: starting,
finished: finished,
})
p.handler = HandlerFunc(handler)
p.start(&sync.WaitGroup{}) p.start(&sync.WaitGroup{})
for _, msg := range tc.incoming { for _, msg := range tc.incoming {
@ -126,7 +139,7 @@ func TestProcessorSuccessWithSingleQueue(t *testing.T) {
p.shutdown() p.shutdown()
mu.Lock() mu.Lock()
if diff := cmp.Diff(tc.wantProcessed, processed, sortTaskOpt, cmp.AllowUnexported(Task{})); diff != "" { if diff := cmp.Diff(tc.wantProcessed, processed, taskCmpOpts...); diff != "" {
t.Errorf("mismatch found in processed tasks; (-want, +got)\n%s", diff) t.Errorf("mismatch found in processed tasks; (-want, +got)\n%s", diff)
} }
mu.Unlock() mu.Unlock()
@ -180,33 +193,12 @@ func TestProcessorSuccessWithMultipleQueues(t *testing.T) {
processed = append(processed, task) processed = append(processed, task)
return nil return nil
} }
starting := make(chan *workerInfo) p := newProcessorForTest(t, rdbClient, HandlerFunc(handler))
finished := make(chan *base.TaskMessage) p.queueConfig = map[string]int{
syncCh := make(chan *syncRequest)
done := make(chan struct{})
defer func() { close(done) }()
go fakeHeartbeater(starting, finished, done)
go fakeSyncer(syncCh, done)
p := newProcessor(processorParams{
logger: testLogger,
broker: rdbClient,
retryDelayFunc: DefaultRetryDelayFunc,
isFailureFunc: defaultIsFailureFunc,
syncCh: syncCh,
cancelations: base.NewCancelations(),
concurrency: 10,
queues: map[string]int{
"default": 2, "default": 2,
"high": 3, "high": 3,
"low": 1, "low": 1,
}, }
strictPriority: false,
errHandler: nil,
shutdownTimeout: defaultShutdownTimeout,
starting: starting,
finished: finished,
})
p.handler = HandlerFunc(handler)
p.start(&sync.WaitGroup{}) p.start(&sync.WaitGroup{})
// Wait for two second to allow all pending tasks to be processed. // Wait for two second to allow all pending tasks to be processed.
@ -220,7 +212,7 @@ func TestProcessorSuccessWithMultipleQueues(t *testing.T) {
p.shutdown() p.shutdown()
mu.Lock() mu.Lock()
if diff := cmp.Diff(tc.wantProcessed, processed, sortTaskOpt, cmp.AllowUnexported(Task{})); diff != "" { if diff := cmp.Diff(tc.wantProcessed, processed, taskCmpOpts...); diff != "" {
t.Errorf("mismatch found in processed tasks; (-want, +got)\n%s", diff) t.Errorf("mismatch found in processed tasks; (-want, +got)\n%s", diff)
} }
mu.Unlock() mu.Unlock()
@ -267,29 +259,7 @@ func TestProcessTasksWithLargeNumberInPayload(t *testing.T) {
processed = append(processed, task) processed = append(processed, task)
return nil return nil
} }
starting := make(chan *workerInfo) p := newProcessorForTest(t, rdbClient, HandlerFunc(handler))
finished := make(chan *base.TaskMessage)
syncCh := make(chan *syncRequest)
done := make(chan struct{})
defer func() { close(done) }()
go fakeHeartbeater(starting, finished, done)
go fakeSyncer(syncCh, done)
p := newProcessor(processorParams{
logger: testLogger,
broker: rdbClient,
retryDelayFunc: DefaultRetryDelayFunc,
isFailureFunc: defaultIsFailureFunc,
syncCh: syncCh,
cancelations: base.NewCancelations(),
concurrency: 10,
queues: defaultQueueConfig,
strictPriority: false,
errHandler: nil,
shutdownTimeout: defaultShutdownTimeout,
starting: starting,
finished: finished,
})
p.handler = HandlerFunc(handler)
p.start(&sync.WaitGroup{}) p.start(&sync.WaitGroup{})
time.Sleep(2 * time.Second) // wait for two second to allow all pending tasks to be processed. time.Sleep(2 * time.Second) // wait for two second to allow all pending tasks to be processed.
@ -299,7 +269,7 @@ func TestProcessTasksWithLargeNumberInPayload(t *testing.T) {
p.shutdown() p.shutdown()
mu.Lock() mu.Lock()
if diff := cmp.Diff(tc.wantProcessed, processed, sortTaskOpt, cmp.AllowUnexported(Task{})); diff != "" { if diff := cmp.Diff(tc.wantProcessed, processed, taskCmpOpts...); diff != "" {
t.Errorf("mismatch found in processed tasks; (-want, +got)\n%s", diff) t.Errorf("mismatch found in processed tasks; (-want, +got)\n%s", diff)
} }
mu.Unlock() mu.Unlock()
@ -389,27 +359,9 @@ func TestProcessorRetry(t *testing.T) {
defer mu.Unlock() defer mu.Unlock()
n++ n++
} }
starting := make(chan *workerInfo) p := newProcessorForTest(t, rdbClient, tc.handler)
finished := make(chan *base.TaskMessage) p.errHandler = ErrorHandlerFunc(errHandler)
done := make(chan struct{}) p.retryDelayFunc = delayFunc
defer func() { close(done) }()
go fakeHeartbeater(starting, finished, done)
p := newProcessor(processorParams{
logger: testLogger,
broker: rdbClient,
retryDelayFunc: delayFunc,
isFailureFunc: defaultIsFailureFunc,
syncCh: nil,
cancelations: base.NewCancelations(),
concurrency: 10,
queues: defaultQueueConfig,
strictPriority: false,
errHandler: ErrorHandlerFunc(errHandler),
shutdownTimeout: defaultShutdownTimeout,
starting: starting,
finished: finished,
})
p.handler = tc.handler
p.start(&sync.WaitGroup{}) p.start(&sync.WaitGroup{})
runTime := time.Now() // time when processor is running runTime := time.Now() // time when processor is running
@ -453,6 +405,81 @@ func TestProcessorRetry(t *testing.T) {
} }
} }
func TestProcessorMarkAsComplete(t *testing.T) {
r := setup(t)
defer r.Close()
rdbClient := rdb.NewRDB(r)
msg1 := h.NewTaskMessage("one", nil)
msg2 := h.NewTaskMessage("two", nil)
msg3 := h.NewTaskMessageWithQueue("three", nil, "custom")
msg1.Retention = 3600
msg3.Retention = 7200
handler := func(ctx context.Context, task *Task) error { return nil }
tests := []struct {
pending map[string][]*base.TaskMessage
completed map[string][]base.Z
queueCfg map[string]int
wantPending map[string][]*base.TaskMessage
wantCompleted func(completedAt time.Time) map[string][]base.Z
}{
{
pending: map[string][]*base.TaskMessage{
"default": {msg1, msg2},
"custom": {msg3},
},
completed: map[string][]base.Z{
"default": {},
"custom": {},
},
queueCfg: map[string]int{
"default": 1,
"custom": 1,
},
wantPending: map[string][]*base.TaskMessage{
"default": {},
"custom": {},
},
wantCompleted: func(completedAt time.Time) map[string][]base.Z {
return map[string][]base.Z{
"default": {{Message: h.TaskMessageWithCompletedAt(*msg1, completedAt), Score: completedAt.Unix() + msg1.Retention}},
"custom": {{Message: h.TaskMessageWithCompletedAt(*msg3, completedAt), Score: completedAt.Unix() + msg3.Retention}},
}
},
},
}
for _, tc := range tests {
h.FlushDB(t, r)
h.SeedAllPendingQueues(t, r, tc.pending)
h.SeedAllCompletedQueues(t, r, tc.completed)
p := newProcessorForTest(t, rdbClient, HandlerFunc(handler))
p.queueConfig = tc.queueCfg
p.start(&sync.WaitGroup{})
runTime := time.Now() // time when processor is running
time.Sleep(2 * time.Second)
p.shutdown()
for qname, want := range tc.wantPending {
gotPending := h.GetPendingMessages(t, r, qname)
if diff := cmp.Diff(want, gotPending, cmpopts.EquateEmpty()); diff != "" {
t.Errorf("diff found in %q pending set; want=%v, got=%v\n%s", qname, want, gotPending, diff)
}
}
for qname, want := range tc.wantCompleted(runTime) {
gotCompleted := h.GetCompletedEntries(t, r, qname)
if diff := cmp.Diff(want, gotCompleted, cmpopts.EquateEmpty()); diff != "" {
t.Errorf("diff found in %q completed set; want=%v, got=%v\n%s", qname, want, gotCompleted, diff)
}
}
}
}
func TestProcessorQueues(t *testing.T) { func TestProcessorQueues(t *testing.T) {
sortOpt := cmp.Transformer("SortStrings", func(in []string) []string { sortOpt := cmp.Transformer("SortStrings", func(in []string) []string {
out := append([]string(nil), in...) // Copy input to avoid mutating it out := append([]string(nil), in...) // Copy input to avoid mutating it
@ -481,26 +508,10 @@ func TestProcessorQueues(t *testing.T) {
} }
for _, tc := range tests { for _, tc := range tests {
starting := make(chan *workerInfo) // Note: rdb and handler not needed for this test.
finished := make(chan *base.TaskMessage) p := newProcessorForTest(t, nil, nil)
done := make(chan struct{}) p.queueConfig = tc.queueCfg
defer func() { close(done) }()
go fakeHeartbeater(starting, finished, done)
p := newProcessor(processorParams{
logger: testLogger,
broker: nil,
retryDelayFunc: DefaultRetryDelayFunc,
isFailureFunc: defaultIsFailureFunc,
syncCh: nil,
cancelations: base.NewCancelations(),
concurrency: 10,
queues: tc.queueCfg,
strictPriority: false,
errHandler: nil,
shutdownTimeout: defaultShutdownTimeout,
starting: starting,
finished: finished,
})
got := p.queues() got := p.queues()
if diff := cmp.Diff(tc.want, got, sortOpt); diff != "" { if diff := cmp.Diff(tc.want, got, sortOpt); diff != "" {
t.Errorf("with queue config: %v\n(*processor).queues() = %v, want %v\n(-want,+got):\n%s", t.Errorf("with queue config: %v\n(*processor).queues() = %v, want %v\n(-want,+got):\n%s",
@ -605,7 +616,7 @@ func TestProcessorWithStrictPriority(t *testing.T) {
} }
p.shutdown() p.shutdown()
if diff := cmp.Diff(tc.wantProcessed, processed, sortTaskOpt, cmp.AllowUnexported(Task{})); diff != "" { if diff := cmp.Diff(tc.wantProcessed, processed, taskCmpOpts...); diff != "" {
t.Errorf("mismatch found in processed tasks; (-want, +got)\n%s", diff) t.Errorf("mismatch found in processed tasks; (-want, +got)\n%s", diff)
} }
@ -644,12 +655,9 @@ func TestProcessorPerform(t *testing.T) {
wantErr: true, wantErr: true,
}, },
} }
// Note: We don't need to fully initialize the processor since we are only testing // Note: We don't need to fully initialized the processor since we are only testing
// perform method. // perform method.
p := newProcessor(processorParams{ p := newProcessorForTest(t, nil, nil)
logger: testLogger,
queues: defaultQueueConfig,
})
for _, tc := range tests { for _, tc := range tests {
p.handler = tc.handler p.handler = tc.handler

View File

@ -49,6 +49,7 @@ type Server struct {
subscriber *subscriber subscriber *subscriber
recoverer *recoverer recoverer *recoverer
healthchecker *healthchecker healthchecker *healthchecker
janitor *janitor
} }
// Config specifies the server's background-task processing behavior. // Config specifies the server's background-task processing behavior.
@ -401,6 +402,12 @@ func NewServer(r RedisConnOpt, cfg Config) *Server {
interval: healthcheckInterval, interval: healthcheckInterval,
healthcheckFunc: cfg.HealthCheckFunc, healthcheckFunc: cfg.HealthCheckFunc,
}) })
janitor := newJanitor(janitorParams{
logger: logger,
broker: rdb,
queues: qnames,
interval: 8 * time.Second,
})
return &Server{ return &Server{
logger: logger, logger: logger,
broker: rdb, broker: rdb,
@ -412,6 +419,7 @@ func NewServer(r RedisConnOpt, cfg Config) *Server {
subscriber: subscriber, subscriber: subscriber,
recoverer: recoverer, recoverer: recoverer,
healthchecker: healthchecker, healthchecker: healthchecker,
janitor: janitor,
} }
} }
@ -493,6 +501,7 @@ func (srv *Server) Start(handler Handler) error {
srv.recoverer.start(&srv.wg) srv.recoverer.start(&srv.wg)
srv.forwarder.start(&srv.wg) srv.forwarder.start(&srv.wg)
srv.processor.start(&srv.wg) srv.processor.start(&srv.wg)
srv.janitor.start(&srv.wg)
return nil return nil
} }
@ -517,6 +526,7 @@ func (srv *Server) Shutdown() {
srv.recoverer.shutdown() srv.recoverer.shutdown()
srv.syncer.shutdown() srv.syncer.shutdown()
srv.subscriber.shutdown() srv.subscriber.shutdown()
srv.janitor.shutdown()
srv.healthchecker.shutdown() srv.healthchecker.shutdown()
srv.heartbeater.shutdown() srv.heartbeater.shutdown()

View File

@ -63,7 +63,7 @@ func cronList(cmd *cobra.Command, args []string) {
cols := []string{"EntryID", "Spec", "Type", "Payload", "Options", "Next", "Prev"} cols := []string{"EntryID", "Spec", "Type", "Payload", "Options", "Next", "Prev"}
printRows := func(w io.Writer, tmpl string) { printRows := func(w io.Writer, tmpl string) {
for _, e := range entries { for _, e := range entries {
fmt.Fprintf(w, tmpl, e.ID, e.Spec, e.Task.Type(), formatPayload(e.Task.Payload()), e.Opts, fmt.Fprintf(w, tmpl, e.ID, e.Spec, e.Task.Type(), sprintBytes(e.Task.Payload()), e.Opts,
nextEnqueue(e.Next), prevEnqueue(e.Prev)) nextEnqueue(e.Next), prevEnqueue(e.Prev))
} }
} }

View File

@ -1,405 +0,0 @@
// Copyright 2020 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
package cmd
import (
"context"
"encoding/json"
"fmt"
"os"
"strings"
"time"
"github.com/go-redis/redis/v8"
"github.com/google/uuid"
"github.com/hibiken/asynq/internal/base"
"github.com/hibiken/asynq/internal/errors"
"github.com/hibiken/asynq/internal/rdb"
"github.com/spf13/cobra"
)
// migrateCmd represents the migrate command.
var migrateCmd = &cobra.Command{
Use: "migrate",
Short: fmt.Sprintf("Migrate existing tasks and queues to be asynq%s compatible", base.Version),
Long: `Migrate (asynq migrate) will migrate existing tasks and queues in redis to be compatible with the latest version of asynq.
`,
Args: cobra.NoArgs,
Run: migrate,
}
func init() {
rootCmd.AddCommand(migrateCmd)
}
func backupKey(key string) string {
return fmt.Sprintf("%s:backup", key)
}
func renameKeyAsBackup(c redis.UniversalClient, key string) error {
if c.Exists(context.Background(), key).Val() == 0 {
return nil // key doesn't exist; no-op
}
return c.Rename(context.Background(), key, backupKey(key)).Err()
}
func failIfError(err error, msg string) {
if err != nil {
fmt.Printf("error: %s: %v\n", msg, err)
fmt.Println("*** Please report this issue at https://github.com/hibiken/asynq/issues ***")
os.Exit(1)
}
}
func logIfError(err error, msg string) {
if err != nil {
fmt.Printf("warning: %s: %v\n", msg, err)
}
}
func migrate(cmd *cobra.Command, args []string) {
r := createRDB()
queues, err := r.AllQueues()
failIfError(err, "Failed to get queue names")
// ---------------------------------------------
// Pre-check: Ensure no active servers, tasks.
// ---------------------------------------------
srvs, err := r.ListServers()
failIfError(err, "Failed to get server infos")
if len(srvs) > 0 {
fmt.Println("(error): Server(s) still running. Please ensure that no asynq servers are running when runnning migrate command.")
os.Exit(1)
}
for _, qname := range queues {
stats, err := r.CurrentStats(qname)
failIfError(err, "Failed to get stats")
if stats.Active > 0 {
fmt.Printf("(error): %d active tasks found. Please ensure that no active tasks exist when running migrate command.\n", stats.Active)
os.Exit(1)
}
}
// ---------------------------------------------
// Rename pending key
// ---------------------------------------------
fmt.Print("Renaming pending keys...")
for _, qname := range queues {
oldKey := fmt.Sprintf("asynq:{%s}", qname)
if r.Client().Exists(context.Background(), oldKey).Val() == 0 {
continue
}
newKey := base.PendingKey(qname)
err := r.Client().Rename(context.Background(), oldKey, newKey).Err()
failIfError(err, "Failed to rename key")
}
fmt.Print("Done\n")
// ---------------------------------------------
// Rename keys as backup
// ---------------------------------------------
fmt.Print("Renaming keys for backup...")
for _, qname := range queues {
keys := []string{
base.ActiveKey(qname),
base.PendingKey(qname),
base.ScheduledKey(qname),
base.RetryKey(qname),
base.ArchivedKey(qname),
}
for _, key := range keys {
err := renameKeyAsBackup(r.Client(), key)
failIfError(err, fmt.Sprintf("Failed to rename key %q for backup", key))
}
}
fmt.Print("Done\n")
// ---------------------------------------------
// Update to new schema
// ---------------------------------------------
fmt.Print("Updating to new schema...")
for _, qname := range queues {
updatePendingMessages(r, qname)
updateZSetMessages(r.Client(), base.ScheduledKey(qname), "scheduled")
updateZSetMessages(r.Client(), base.RetryKey(qname), "retry")
updateZSetMessages(r.Client(), base.ArchivedKey(qname), "archived")
}
fmt.Print("Done\n")
// ---------------------------------------------
// Delete backup keys
// ---------------------------------------------
fmt.Print("Deleting backup keys...")
for _, qname := range queues {
keys := []string{
backupKey(base.ActiveKey(qname)),
backupKey(base.PendingKey(qname)),
backupKey(base.ScheduledKey(qname)),
backupKey(base.RetryKey(qname)),
backupKey(base.ArchivedKey(qname)),
}
for _, key := range keys {
err := r.Client().Del(context.Background(), key).Err()
failIfError(err, "Failed to delete backup key")
}
}
fmt.Print("Done\n")
}
func UnmarshalOldMessage(encoded string) (*base.TaskMessage, error) {
oldMsg, err := DecodeMessage(encoded)
if err != nil {
return nil, err
}
payload, err := json.Marshal(oldMsg.Payload)
if err != nil {
return nil, fmt.Errorf("could not marshal payload: %v", err)
}
return &base.TaskMessage{
Type: oldMsg.Type,
Payload: payload,
ID: oldMsg.ID,
Queue: oldMsg.Queue,
Retry: oldMsg.Retry,
Retried: oldMsg.Retried,
ErrorMsg: oldMsg.ErrorMsg,
LastFailedAt: 0,
Timeout: oldMsg.Timeout,
Deadline: oldMsg.Deadline,
UniqueKey: oldMsg.UniqueKey,
}, nil
}
// TaskMessage from v0.17
type OldTaskMessage struct {
// Type indicates the kind of the task to be performed.
Type string
// Payload holds data needed to process the task.
Payload map[string]interface{}
// ID is a unique identifier for each task.
ID uuid.UUID
// Queue is a name this message should be enqueued to.
Queue string
// Retry is the max number of retry for this task.
Retry int
// Retried is the number of times we've retried this task so far.
Retried int
// ErrorMsg holds the error message from the last failure.
ErrorMsg string
// Timeout specifies timeout in seconds.
// If task processing doesn't complete within the timeout, the task will be retried
// if retry count is remaining. Otherwise it will be moved to the archive.
//
// Use zero to indicate no timeout.
Timeout int64
// Deadline specifies the deadline for the task in Unix time,
// the number of seconds elapsed since January 1, 1970 UTC.
// If task processing doesn't complete before the deadline, the task will be retried
// if retry count is remaining. Otherwise it will be moved to the archive.
//
// Use zero to indicate no deadline.
Deadline int64
// UniqueKey holds the redis key used for uniqueness lock for this task.
//
// Empty string indicates that no uniqueness lock was used.
UniqueKey string
}
// DecodeMessage unmarshals the given encoded string and returns a decoded task message.
// Code from v0.17.
func DecodeMessage(s string) (*OldTaskMessage, error) {
d := json.NewDecoder(strings.NewReader(s))
d.UseNumber()
var msg OldTaskMessage
if err := d.Decode(&msg); err != nil {
return nil, err
}
return &msg, nil
}
func updatePendingMessages(r *rdb.RDB, qname string) {
data, err := r.Client().LRange(context.Background(), backupKey(base.PendingKey(qname)), 0, -1).Result()
failIfError(err, "Failed to read backup pending key")
for _, s := range data {
msg, err := UnmarshalOldMessage(s)
failIfError(err, "Failed to unmarshal message")
if msg.UniqueKey != "" {
ttl, err := r.Client().TTL(context.Background(), msg.UniqueKey).Result()
failIfError(err, "Failed to get ttl")
if ttl > 0 {
err = r.Client().Del(context.Background(), msg.UniqueKey).Err()
logIfError(err, "Failed to delete unique key")
}
// Regenerate unique key.
msg.UniqueKey = base.UniqueKey(msg.Queue, msg.Type, msg.Payload)
if ttl > 0 {
err = r.EnqueueUnique(msg, ttl)
} else {
err = r.Enqueue(msg)
}
failIfError(err, "Failed to enqueue message")
} else {
err := r.Enqueue(msg)
failIfError(err, "Failed to enqueue message")
}
}
}
// KEYS[1] -> asynq:{<qname>}:t:<task_id>
// KEYS[2] -> asynq:{<qname>}:scheduled
// ARGV[1] -> task message data
// ARGV[2] -> zset score
// ARGV[3] -> task ID
// ARGV[4] -> task timeout in seconds (0 if not timeout)
// ARGV[5] -> task deadline in unix time (0 if no deadline)
// ARGV[6] -> task state (e.g. "retry", "archived")
var taskZAddCmd = redis.NewScript(`
redis.call("HSET", KEYS[1],
"msg", ARGV[1],
"state", ARGV[6],
"timeout", ARGV[4],
"deadline", ARGV[5])
redis.call("ZADD", KEYS[2], ARGV[2], ARGV[3])
return 1
`)
// ZAddTask adds task to zset.
func ZAddTask(c redis.UniversalClient, key string, msg *base.TaskMessage, score float64, state string) error {
// Special case; LastFailedAt field is new so assign a value inferred from zscore.
if state == "archived" {
msg.LastFailedAt = int64(score)
}
encoded, err := base.EncodeMessage(msg)
if err != nil {
return err
}
if err := c.SAdd(context.Background(), base.AllQueues, msg.Queue).Err(); err != nil {
return err
}
keys := []string{
base.TaskKey(msg.Queue, msg.ID.String()),
key,
}
argv := []interface{}{
encoded,
score,
msg.ID.String(),
msg.Timeout,
msg.Deadline,
state,
}
return taskZAddCmd.Run(context.Background(), c, keys, argv...).Err()
}
// KEYS[1] -> unique key
// KEYS[2] -> asynq:{<qname>}:t:<task_id>
// KEYS[3] -> zset key (e.g. asynq:{<qname>}:scheduled)
// --
// ARGV[1] -> task ID
// ARGV[2] -> uniqueness lock TTL
// ARGV[3] -> score (process_at timestamp)
// ARGV[4] -> task message
// ARGV[5] -> task timeout in seconds (0 if not timeout)
// ARGV[6] -> task deadline in unix time (0 if no deadline)
// ARGV[7] -> task state (oneof "scheduled", "retry", "archived")
var taskZAddUniqueCmd = redis.NewScript(`
local ok = redis.call("SET", KEYS[1], ARGV[1], "NX", "EX", ARGV[2])
if not ok then
return 0
end
redis.call("HSET", KEYS[2],
"msg", ARGV[4],
"state", ARGV[7],
"timeout", ARGV[5],
"deadline", ARGV[6],
"unique_key", KEYS[1])
redis.call("ZADD", KEYS[3], ARGV[3], ARGV[1])
return 1
`)
// ScheduleUnique adds the task to the backlog queue to be processed in the future if the uniqueness lock can be acquired.
// It returns ErrDuplicateTask if the lock cannot be acquired.
func ZAddTaskUnique(c redis.UniversalClient, key string, msg *base.TaskMessage, score float64, state string, ttl time.Duration) error {
encoded, err := base.EncodeMessage(msg)
if err != nil {
return err
}
if err := c.SAdd(context.Background(), base.AllQueues, msg.Queue).Err(); err != nil {
return err
}
keys := []string{
msg.UniqueKey,
base.TaskKey(msg.Queue, msg.ID.String()),
key,
}
argv := []interface{}{
msg.ID.String(),
int(ttl.Seconds()),
score,
encoded,
msg.Timeout,
msg.Deadline,
state,
}
res, err := taskZAddUniqueCmd.Run(context.Background(), c, keys, argv...).Result()
if err != nil {
return err
}
n, ok := res.(int64)
if !ok {
return errors.E(errors.Internal, fmt.Sprintf("cast error: unexpected return value from Lua script: %v", res))
}
if n == 0 {
return errors.E(errors.AlreadyExists, errors.ErrDuplicateTask)
}
return nil
}
func updateZSetMessages(c redis.UniversalClient, key, state string) {
zs, err := c.ZRangeWithScores(context.Background(), backupKey(key), 0, -1).Result()
failIfError(err, "Failed to read")
for _, z := range zs {
msg, err := UnmarshalOldMessage(z.Member.(string))
failIfError(err, "Failed to unmarshal message")
if msg.UniqueKey != "" {
ttl, err := c.TTL(context.Background(), msg.UniqueKey).Result()
failIfError(err, "Failed to get ttl")
if ttl > 0 {
err = c.Del(context.Background(), msg.UniqueKey).Err()
logIfError(err, "Failed to delete unique key")
}
// Regenerate unique key.
msg.UniqueKey = base.UniqueKey(msg.Queue, msg.Type, msg.Payload)
if ttl > 0 {
err = ZAddTaskUnique(c, key, msg, z.Score, state, ttl)
} else {
err = ZAddTask(c, key, msg, z.Score, state)
}
failIfError(err, "Failed to zadd message")
} else {
err := ZAddTask(c, key, msg, z.Score, state)
failIfError(err, "Failed to enqueue scheduled message")
}
}
}

View File

@ -148,9 +148,9 @@ func printQueueInfo(info *asynq.QueueInfo) {
fmt.Printf("Paused: %t\n\n", info.Paused) fmt.Printf("Paused: %t\n\n", info.Paused)
bold.Println("Task Count by State") bold.Println("Task Count by State")
printTable( printTable(
[]string{"active", "pending", "scheduled", "retry", "archived"}, []string{"active", "pending", "scheduled", "retry", "archived", "completed"},
func(w io.Writer, tmpl string) { func(w io.Writer, tmpl string) {
fmt.Fprintf(w, tmpl, info.Active, info.Pending, info.Scheduled, info.Retry, info.Archived) fmt.Fprintf(w, tmpl, info.Active, info.Pending, info.Scheduled, info.Retry, info.Archived, info.Completed)
}, },
) )
fmt.Println() fmt.Println()

View File

@ -199,9 +199,9 @@ func printTable(cols []string, printRows func(w io.Writer, tmpl string)) {
tw.Flush() tw.Flush()
} }
// formatPayload returns string representation of payload if data is printable. // sprintBytes returns a string representation of the given byte slice if data is printable.
// If data is not printable, it returns a string describing payload is not printable. // If data is not printable, it returns a string describing it is not printable.
func formatPayload(payload []byte) string { func sprintBytes(payload []byte) string {
if !isPrintable(payload) { if !isPrintable(payload) {
return "non-printable bytes" return "non-printable bytes"
} }

View File

@ -7,11 +7,13 @@ package cmd
import ( import (
"fmt" "fmt"
"io" "io"
"math"
"os" "os"
"strconv" "strconv"
"strings" "strings"
"text/tabwriter" "text/tabwriter"
"time" "time"
"unicode/utf8"
"github.com/fatih/color" "github.com/fatih/color"
"github.com/hibiken/asynq/internal/rdb" "github.com/hibiken/asynq/internal/rdb"
@ -58,6 +60,7 @@ type AggregateStats struct {
Scheduled int Scheduled int
Retry int Retry int
Archived int Archived int
Completed int
Processed int Processed int
Failed int Failed int
Timestamp time.Time Timestamp time.Time
@ -85,6 +88,7 @@ func stats(cmd *cobra.Command, args []string) {
aggStats.Scheduled += s.Scheduled aggStats.Scheduled += s.Scheduled
aggStats.Retry += s.Retry aggStats.Retry += s.Retry
aggStats.Archived += s.Archived aggStats.Archived += s.Archived
aggStats.Completed += s.Completed
aggStats.Processed += s.Processed aggStats.Processed += s.Processed
aggStats.Failed += s.Failed aggStats.Failed += s.Failed
aggStats.Timestamp = s.Timestamp aggStats.Timestamp = s.Timestamp
@ -124,22 +128,50 @@ func stats(cmd *cobra.Command, args []string) {
} }
func printStatsByState(s *AggregateStats) { func printStatsByState(s *AggregateStats) {
format := strings.Repeat("%v\t", 5) + "\n" format := strings.Repeat("%v\t", 6) + "\n"
tw := new(tabwriter.Writer).Init(os.Stdout, 0, 8, 2, ' ', 0) tw := new(tabwriter.Writer).Init(os.Stdout, 0, 8, 2, ' ', 0)
fmt.Fprintf(tw, format, "active", "pending", "scheduled", "retry", "archived") fmt.Fprintf(tw, format, "active", "pending", "scheduled", "retry", "archived", "completed")
fmt.Fprintf(tw, format, "----------", "--------", "---------", "-----", "----") width := maxInt(9 /* defaultWidth */, maxWidthOf(s.Active, s.Pending, s.Scheduled, s.Retry, s.Archived, s.Completed)) // length of widest column
fmt.Fprintf(tw, format, s.Active, s.Pending, s.Scheduled, s.Retry, s.Archived) sep := strings.Repeat("-", width)
fmt.Fprintf(tw, format, sep, sep, sep, sep, sep, sep)
fmt.Fprintf(tw, format, s.Active, s.Pending, s.Scheduled, s.Retry, s.Archived, s.Completed)
tw.Flush() tw.Flush()
} }
// numDigits returns the number of digits in n.
func numDigits(n int) int {
return len(strconv.Itoa(n))
}
// maxWidthOf returns the max number of digits amount the provided vals.
func maxWidthOf(vals ...int) int {
max := 0
for _, v := range vals {
if vw := numDigits(v); vw > max {
max = vw
}
}
return max
}
func maxInt(a, b int) int {
return int(math.Max(float64(a), float64(b)))
}
func printStatsByQueue(stats []*rdb.Stats) { func printStatsByQueue(stats []*rdb.Stats) {
var headers, seps, counts []string var headers, seps, counts []string
maxHeaderWidth := 0
for _, s := range stats { for _, s := range stats {
title := queueTitle(s) title := queueTitle(s)
headers = append(headers, title) headers = append(headers, title)
seps = append(seps, strings.Repeat("-", len(title))) if w := utf8.RuneCountInString(title); w > maxHeaderWidth {
maxHeaderWidth = w
}
counts = append(counts, strconv.Itoa(s.Size)) counts = append(counts, strconv.Itoa(s.Size))
} }
for i := 0; i < len(headers); i++ {
seps = append(seps, strings.Repeat("-", maxHeaderWidth))
}
format := strings.Repeat("%v\t", len(headers)) + "\n" format := strings.Repeat("%v\t", len(headers)) + "\n"
tw := new(tabwriter.Writer).Init(os.Stdout, 0, 8, 2, ' ', 0) tw := new(tabwriter.Writer).Init(os.Stdout, 0, 8, 2, ' ', 0)
fmt.Fprintf(tw, format, toInterfaceSlice(headers)...) fmt.Fprintf(tw, format, toInterfaceSlice(headers)...)

View File

@ -86,6 +86,7 @@ The value for the state flag should be one of:
- scheduled - scheduled
- retry - retry
- archived - archived
- completed
List opeartion paginates the result set. List opeartion paginates the result set.
By default, the command fetches the first 30 tasks. By default, the command fetches the first 30 tasks.
@ -189,6 +190,8 @@ func taskList(cmd *cobra.Command, args []string) {
listRetryTasks(qname, pageNum, pageSize) listRetryTasks(qname, pageNum, pageSize)
case "archived": case "archived":
listArchivedTasks(qname, pageNum, pageSize) listArchivedTasks(qname, pageNum, pageSize)
case "completed":
listCompletedTasks(qname, pageNum, pageSize)
default: default:
fmt.Printf("error: state=%q is not supported\n", state) fmt.Printf("error: state=%q is not supported\n", state)
os.Exit(1) os.Exit(1)
@ -210,7 +213,7 @@ func listActiveTasks(qname string, pageNum, pageSize int) {
[]string{"ID", "Type", "Payload"}, []string{"ID", "Type", "Payload"},
func(w io.Writer, tmpl string) { func(w io.Writer, tmpl string) {
for _, t := range tasks { for _, t := range tasks {
fmt.Fprintf(w, tmpl, t.ID, t.Type, formatPayload(t.Payload)) fmt.Fprintf(w, tmpl, t.ID, t.Type, sprintBytes(t.Payload))
} }
}, },
) )
@ -231,7 +234,7 @@ func listPendingTasks(qname string, pageNum, pageSize int) {
[]string{"ID", "Type", "Payload"}, []string{"ID", "Type", "Payload"},
func(w io.Writer, tmpl string) { func(w io.Writer, tmpl string) {
for _, t := range tasks { for _, t := range tasks {
fmt.Fprintf(w, tmpl, t.ID, t.Type, formatPayload(t.Payload)) fmt.Fprintf(w, tmpl, t.ID, t.Type, sprintBytes(t.Payload))
} }
}, },
) )
@ -252,7 +255,7 @@ func listScheduledTasks(qname string, pageNum, pageSize int) {
[]string{"ID", "Type", "Payload", "Process In"}, []string{"ID", "Type", "Payload", "Process In"},
func(w io.Writer, tmpl string) { func(w io.Writer, tmpl string) {
for _, t := range tasks { for _, t := range tasks {
fmt.Fprintf(w, tmpl, t.ID, t.Type, formatPayload(t.Payload), formatProcessAt(t.NextProcessAt)) fmt.Fprintf(w, tmpl, t.ID, t.Type, sprintBytes(t.Payload), formatProcessAt(t.NextProcessAt))
} }
}, },
) )
@ -284,8 +287,8 @@ func listRetryTasks(qname string, pageNum, pageSize int) {
[]string{"ID", "Type", "Payload", "Next Retry", "Last Error", "Last Failed", "Retried", "Max Retry"}, []string{"ID", "Type", "Payload", "Next Retry", "Last Error", "Last Failed", "Retried", "Max Retry"},
func(w io.Writer, tmpl string) { func(w io.Writer, tmpl string) {
for _, t := range tasks { for _, t := range tasks {
fmt.Fprintf(w, tmpl, t.ID, t.Type, formatPayload(t.Payload), formatProcessAt(t.NextProcessAt), fmt.Fprintf(w, tmpl, t.ID, t.Type, sprintBytes(t.Payload), formatProcessAt(t.NextProcessAt),
t.LastErr, formatLastFailedAt(t.LastFailedAt), t.Retried, t.MaxRetry) t.LastErr, formatPastTime(t.LastFailedAt), t.Retried, t.MaxRetry)
} }
}, },
) )
@ -306,7 +309,27 @@ func listArchivedTasks(qname string, pageNum, pageSize int) {
[]string{"ID", "Type", "Payload", "Last Failed", "Last Error"}, []string{"ID", "Type", "Payload", "Last Failed", "Last Error"},
func(w io.Writer, tmpl string) { func(w io.Writer, tmpl string) {
for _, t := range tasks { for _, t := range tasks {
fmt.Fprintf(w, tmpl, t.ID, t.Type, formatPayload(t.Payload), formatLastFailedAt(t.LastFailedAt), t.LastErr) fmt.Fprintf(w, tmpl, t.ID, t.Type, sprintBytes(t.Payload), formatPastTime(t.LastFailedAt), t.LastErr)
}
})
}
func listCompletedTasks(qname string, pageNum, pageSize int) {
i := createInspector()
tasks, err := i.ListCompletedTasks(qname, asynq.PageSize(pageSize), asynq.Page(pageNum))
if err != nil {
fmt.Println(err)
os.Exit(1)
}
if len(tasks) == 0 {
fmt.Printf("No completed tasks in %q queue\n", qname)
return
}
printTable(
[]string{"ID", "Type", "Payload", "CompletedAt", "Result"},
func(w io.Writer, tmpl string) {
for _, t := range tasks {
fmt.Fprintf(w, tmpl, t.ID, t.Type, sprintBytes(t.Payload), formatPastTime(t.CompletedAt), sprintBytes(t.Result))
} }
}) })
} }
@ -356,7 +379,7 @@ func printTaskInfo(info *asynq.TaskInfo) {
if len(info.LastErr) != 0 { if len(info.LastErr) != 0 {
fmt.Println() fmt.Println()
bold.Println("Last Failure") bold.Println("Last Failure")
fmt.Printf("Failed at: %s\n", formatLastFailedAt(info.LastFailedAt)) fmt.Printf("Failed at: %s\n", formatPastTime(info.LastFailedAt))
fmt.Printf("Error message: %s\n", info.LastErr) fmt.Printf("Error message: %s\n", info.LastErr)
} }
} }
@ -371,11 +394,12 @@ func formatNextProcessAt(processAt time.Time) string {
return fmt.Sprintf("%s (in %v)", processAt.Format(time.UnixDate), processAt.Sub(time.Now()).Round(time.Second)) return fmt.Sprintf("%s (in %v)", processAt.Format(time.UnixDate), processAt.Sub(time.Now()).Round(time.Second))
} }
func formatLastFailedAt(lastFailedAt time.Time) string { // formatPastTime takes t which is time in the past and returns a user-friendly string.
if lastFailedAt.IsZero() || lastFailedAt.Unix() == 0 { func formatPastTime(t time.Time) string {
if t.IsZero() || t.Unix() == 0 {
return "" return ""
} }
return lastFailedAt.Format(time.UnixDate) return t.Format(time.UnixDate)
} }
func taskArchive(cmd *cobra.Command, args []string) { func taskArchive(cmd *cobra.Command, args []string) {
@ -496,6 +520,8 @@ func taskDeleteAll(cmd *cobra.Command, args []string) {
n, err = i.DeleteAllRetryTasks(qname) n, err = i.DeleteAllRetryTasks(qname)
case "archived": case "archived":
n, err = i.DeleteAllArchivedTasks(qname) n, err = i.DeleteAllArchivedTasks(qname)
case "completed":
n, err = i.DeleteAllCompletedTasks(qname)
default: default:
fmt.Printf("error: unsupported state %q\n", state) fmt.Printf("error: unsupported state %q\n", state)
os.Exit(1) os.Exit(1)

View File

@ -4,14 +4,15 @@ import (
"context" "context"
"flag" "flag"
"fmt" "fmt"
"strings"
"testing"
"time"
"github.com/go-redis/redis/v8" "github.com/go-redis/redis/v8"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/hibiken/asynq" "github.com/hibiken/asynq"
"github.com/hibiken/asynq/internal/base" "github.com/hibiken/asynq/internal/base"
asynqcontext "github.com/hibiken/asynq/internal/context" asynqcontext "github.com/hibiken/asynq/internal/context"
"strings"
"testing"
"time"
) )
var ( var (
@ -80,16 +81,16 @@ func TestNewSemaphore_Acquire(t *testing.T) {
desc string desc string
name string name string
maxConcurrency int maxConcurrency int
taskIDs []uuid.UUID taskIDs []string
ctxFunc func(uuid.UUID) (context.Context, context.CancelFunc) ctxFunc func(string) (context.Context, context.CancelFunc)
want []bool want []bool
}{ }{
{ {
desc: "Should acquire token when current token count is less than maxTokens", desc: "Should acquire token when current token count is less than maxTokens",
name: "task-1", name: "task-1",
maxConcurrency: 3, maxConcurrency: 3,
taskIDs: []uuid.UUID{uuid.New(), uuid.New()}, taskIDs: []string{uuid.NewString(), uuid.NewString()},
ctxFunc: func(id uuid.UUID) (context.Context, context.CancelFunc) { ctxFunc: func(id string) (context.Context, context.CancelFunc) {
return asynqcontext.New(&base.TaskMessage{ return asynqcontext.New(&base.TaskMessage{
ID: id, ID: id,
Queue: "task-1", Queue: "task-1",
@ -101,8 +102,8 @@ func TestNewSemaphore_Acquire(t *testing.T) {
desc: "Should fail acquiring token when current token count is equal to maxTokens", desc: "Should fail acquiring token when current token count is equal to maxTokens",
name: "task-2", name: "task-2",
maxConcurrency: 3, maxConcurrency: 3,
taskIDs: []uuid.UUID{uuid.New(), uuid.New(), uuid.New(), uuid.New()}, taskIDs: []string{uuid.NewString(), uuid.NewString(), uuid.NewString(), uuid.NewString()},
ctxFunc: func(id uuid.UUID) (context.Context, context.CancelFunc) { ctxFunc: func(id string) (context.Context, context.CancelFunc) {
return asynqcontext.New(&base.TaskMessage{ return asynqcontext.New(&base.TaskMessage{
ID: id, ID: id,
Queue: "task-2", Queue: "task-2",
@ -148,16 +149,16 @@ func TestNewSemaphore_Acquire_Error(t *testing.T) {
desc string desc string
name string name string
maxConcurrency int maxConcurrency int
taskIDs []uuid.UUID taskIDs []string
ctxFunc func(uuid.UUID) (context.Context, context.CancelFunc) ctxFunc func(string) (context.Context, context.CancelFunc)
errStr string errStr string
}{ }{
{ {
desc: "Should return error if context has no deadline", desc: "Should return error if context has no deadline",
name: "task-3", name: "task-3",
maxConcurrency: 1, maxConcurrency: 1,
taskIDs: []uuid.UUID{uuid.New(), uuid.New()}, taskIDs: []string{uuid.NewString(), uuid.NewString()},
ctxFunc: func(id uuid.UUID) (context.Context, context.CancelFunc) { ctxFunc: func(id string) (context.Context, context.CancelFunc) {
return context.Background(), func() {} return context.Background(), func() {}
}, },
errStr: "provided context must have a deadline", errStr: "provided context must have a deadline",
@ -166,8 +167,8 @@ func TestNewSemaphore_Acquire_Error(t *testing.T) {
desc: "Should return error when context is missing taskID", desc: "Should return error when context is missing taskID",
name: "task-4", name: "task-4",
maxConcurrency: 1, maxConcurrency: 1,
taskIDs: []uuid.UUID{uuid.New()}, taskIDs: []string{uuid.NewString()},
ctxFunc: func(_ uuid.UUID) (context.Context, context.CancelFunc) { ctxFunc: func(_ string) (context.Context, context.CancelFunc) {
return context.WithTimeout(context.Background(), time.Second) return context.WithTimeout(context.Background(), time.Second)
}, },
errStr: "provided context is missing task ID value", errStr: "provided context is missing task ID value",
@ -206,13 +207,13 @@ func TestNewSemaphore_Acquire_StaleToken(t *testing.T) {
rc := opt.MakeRedisClient().(redis.UniversalClient) rc := opt.MakeRedisClient().(redis.UniversalClient)
defer rc.Close() defer rc.Close()
taskID := uuid.New() taskID := uuid.NewString()
// adding a set member to mimic the case where token is acquired but the goroutine crashed, // adding a set member to mimic the case where token is acquired but the goroutine crashed,
// in which case, the token will not be explicitly removed and should be present already // in which case, the token will not be explicitly removed and should be present already
rc.ZAdd(context.Background(), semaphoreKey("stale-token"), &redis.Z{ rc.ZAdd(context.Background(), semaphoreKey("stale-token"), &redis.Z{
Score: float64(time.Now().Add(-10 * time.Second).Unix()), Score: float64(time.Now().Add(-10 * time.Second).Unix()),
Member: taskID.String(), Member: taskID,
}) })
sema := NewSemaphore(opt, "stale-token", 1) sema := NewSemaphore(opt, "stale-token", 1)
@ -238,15 +239,15 @@ func TestNewSemaphore_Release(t *testing.T) {
tests := []struct { tests := []struct {
desc string desc string
name string name string
taskIDs []uuid.UUID taskIDs []string
ctxFunc func(uuid.UUID) (context.Context, context.CancelFunc) ctxFunc func(string) (context.Context, context.CancelFunc)
wantCount int64 wantCount int64
}{ }{
{ {
desc: "Should decrease token count", desc: "Should decrease token count",
name: "task-5", name: "task-5",
taskIDs: []uuid.UUID{uuid.New()}, taskIDs: []string{uuid.NewString()},
ctxFunc: func(id uuid.UUID) (context.Context, context.CancelFunc) { ctxFunc: func(id string) (context.Context, context.CancelFunc) {
return asynqcontext.New(&base.TaskMessage{ return asynqcontext.New(&base.TaskMessage{
ID: id, ID: id,
Queue: "task-3", Queue: "task-3",
@ -256,8 +257,8 @@ func TestNewSemaphore_Release(t *testing.T) {
{ {
desc: "Should decrease token count by 2", desc: "Should decrease token count by 2",
name: "task-6", name: "task-6",
taskIDs: []uuid.UUID{uuid.New(), uuid.New()}, taskIDs: []string{uuid.NewString(), uuid.NewString()},
ctxFunc: func(id uuid.UUID) (context.Context, context.CancelFunc) { ctxFunc: func(id string) (context.Context, context.CancelFunc) {
return asynqcontext.New(&base.TaskMessage{ return asynqcontext.New(&base.TaskMessage{
ID: id, ID: id,
Queue: "task-4", Queue: "task-4",
@ -280,7 +281,7 @@ func TestNewSemaphore_Release(t *testing.T) {
for i := 0; i < len(tt.taskIDs); i++ { for i := 0; i < len(tt.taskIDs); i++ {
members = append(members, &redis.Z{ members = append(members, &redis.Z{
Score: float64(time.Now().Add(time.Duration(i) * time.Second).Unix()), Score: float64(time.Now().Add(time.Duration(i) * time.Second).Unix()),
Member: tt.taskIDs[i].String(), Member: tt.taskIDs[i],
}) })
} }
if err := rc.ZAdd(context.Background(), semaphoreKey(tt.name), members...).Err(); err != nil { if err := rc.ZAdd(context.Background(), semaphoreKey(tt.name), members...).Err(); err != nil {
@ -313,20 +314,20 @@ func TestNewSemaphore_Release(t *testing.T) {
} }
func TestNewSemaphore_Release_Error(t *testing.T) { func TestNewSemaphore_Release_Error(t *testing.T) {
testID := uuid.New() testID := uuid.NewString()
tests := []struct { tests := []struct {
desc string desc string
name string name string
taskIDs []uuid.UUID taskIDs []string
ctxFunc func(uuid.UUID) (context.Context, context.CancelFunc) ctxFunc func(string) (context.Context, context.CancelFunc)
errStr string errStr string
}{ }{
{ {
desc: "Should return error when context is missing taskID", desc: "Should return error when context is missing taskID",
name: "task-7", name: "task-7",
taskIDs: []uuid.UUID{uuid.New()}, taskIDs: []string{uuid.NewString()},
ctxFunc: func(_ uuid.UUID) (context.Context, context.CancelFunc) { ctxFunc: func(_ string) (context.Context, context.CancelFunc) {
return context.WithTimeout(context.Background(), time.Second) return context.WithTimeout(context.Background(), time.Second)
}, },
errStr: "provided context is missing task ID value", errStr: "provided context is missing task ID value",
@ -334,14 +335,14 @@ func TestNewSemaphore_Release_Error(t *testing.T) {
{ {
desc: "Should return error when context has taskID which never acquired token", desc: "Should return error when context has taskID which never acquired token",
name: "task-8", name: "task-8",
taskIDs: []uuid.UUID{uuid.New()}, taskIDs: []string{uuid.NewString()},
ctxFunc: func(_ uuid.UUID) (context.Context, context.CancelFunc) { ctxFunc: func(_ string) (context.Context, context.CancelFunc) {
return asynqcontext.New(&base.TaskMessage{ return asynqcontext.New(&base.TaskMessage{
ID: testID, ID: testID,
Queue: "task-4", Queue: "task-4",
}, time.Now().Add(time.Second)) }, time.Now().Add(time.Second))
}, },
errStr: fmt.Sprintf("no token found for task %q", testID.String()), errStr: fmt.Sprintf("no token found for task %q", testID),
}, },
} }
@ -359,7 +360,7 @@ func TestNewSemaphore_Release_Error(t *testing.T) {
for i := 0; i < len(tt.taskIDs); i++ { for i := 0; i < len(tt.taskIDs); i++ {
members = append(members, &redis.Z{ members = append(members, &redis.Z{
Score: float64(time.Now().Add(time.Duration(i) * time.Second).Unix()), Score: float64(time.Now().Add(time.Duration(i) * time.Second).Unix()),
Member: tt.taskIDs[i].String(), Member: tt.taskIDs[i],
}) })
} }
if err := rc.ZAdd(context.Background(), semaphoreKey(tt.name), members...).Err(); err != nil { if err := rc.ZAdd(context.Background(), semaphoreKey(tt.name), members...).Err(); err != nil {