mirror of
https://github.com/hibiken/asynq.git
synced 2025-09-17 20:30:06 +08:00
Compare commits
27 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
970cb7a606 | ||
![]() |
157e97e72e | ||
![]() |
22e6c9d297 | ||
![]() |
99a6750656 | ||
![]() |
e7c1c3ad6f | ||
![]() |
c9183374c5 | ||
![]() |
6e7106c8f2 | ||
![]() |
9f2c321e98 | ||
![]() |
e2b61c9056 | ||
![]() |
531d1ef089 | ||
![]() |
413afc2ab6 | ||
![]() |
6bb4818509 | ||
![]() |
f4ddac4dcc | ||
![]() |
4638405cbd | ||
![]() |
9e2f88c00d | ||
![]() |
dbdd9c6d5f | ||
![]() |
2261c7c9a0 | ||
![]() |
83cae4bb24 | ||
![]() |
23c522dc9f | ||
![]() |
0d2c0f612b | ||
![]() |
d612a8a9e4 | ||
![]() |
b3ef9e91a9 | ||
![]() |
05534c6f24 | ||
![]() |
f0db219f6a | ||
![]() |
3ae0e7f528 | ||
![]() |
421dc584ff | ||
![]() |
cfd1a1dfe8 |
12
.github/FUNDING.yml
vendored
Normal file
12
.github/FUNDING.yml
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
# These are supported funding model platforms
|
||||
|
||||
github: [hibiken] # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2]
|
||||
patreon: # Replace with a single Patreon username
|
||||
open_collective: # Replace with a single Open Collective username
|
||||
ko_fi: # Replace with a single Ko-fi username
|
||||
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
|
||||
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
|
||||
liberapay: # Replace with a single Liberapay username
|
||||
issuehunt: # Replace with a single IssueHunt username
|
||||
otechie: # Replace with a single Otechie username
|
||||
custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']
|
2
.github/workflows/build.yml
vendored
2
.github/workflows/build.yml
vendored
@@ -7,7 +7,7 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-latest]
|
||||
go-version: [1.13.x, 1.14.x, 1.15.x, 1.16.x]
|
||||
go-version: [1.14.x, 1.15.x, 1.16.x, 1.17.x]
|
||||
runs-on: ${{ matrix.os }}
|
||||
services:
|
||||
redis:
|
||||
|
3
.gitignore
vendored
3
.gitignore
vendored
@@ -21,4 +21,5 @@
|
||||
.asynq.*
|
||||
|
||||
# Ignore editor config files
|
||||
.vscode
|
||||
.vscode
|
||||
.idea
|
54
CHANGELOG.md
54
CHANGELOG.md
@@ -7,13 +7,61 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
||||
|
||||
## [Unreleased]
|
||||
|
||||
## [0.18.3] - 2020-08-09
|
||||
## [0.19.1] - 2021-12-12
|
||||
|
||||
### Added
|
||||
|
||||
- `Latency` field is added to `QueueInfo`.
|
||||
- `EnqueueContext` method is added to `Client`.
|
||||
|
||||
### Fixed
|
||||
|
||||
- Fixed an error when user pass a duration less than 1s to `Unique` option
|
||||
|
||||
## [0.19.0] - 2021-11-06
|
||||
|
||||
### Changed
|
||||
|
||||
- `NewTask` takes `Option` as variadic argument
|
||||
- Bumped minimum supported go version to 1.14 (i.e. go1.14 or higher is required).
|
||||
|
||||
### Added
|
||||
|
||||
- `Retention` option is added to allow user to specify task retention duration after completion.
|
||||
- `TaskID` option is added to allow user to specify task ID.
|
||||
- `ErrTaskIDConflict` sentinel error value is added.
|
||||
- `ResultWriter` type is added and provided through `Task.ResultWriter` method.
|
||||
- `TaskInfo` has new fields `CompletedAt`, `Result` and `Retention`.
|
||||
|
||||
### Removed
|
||||
|
||||
- `Client.SetDefaultOptions` is removed. Use `NewTask` instead to pass default options for tasks.
|
||||
|
||||
## [0.18.6] - 2021-10-03
|
||||
|
||||
### Changed
|
||||
|
||||
- Updated `github.com/go-redis/redis` package to v8
|
||||
|
||||
## [0.18.5] - 2021-09-01
|
||||
|
||||
### Added
|
||||
|
||||
- `IsFailure` config option is added to determine whether error returned from Handler counts as a failure.
|
||||
|
||||
## [0.18.4] - 2021-08-17
|
||||
|
||||
### Fixed
|
||||
|
||||
- Scheduler methods are now thread-safe. It's now safe to call `Register` and `Unregister` concurrently.
|
||||
|
||||
## [0.18.3] - 2021-08-09
|
||||
|
||||
### Changed
|
||||
|
||||
- `Client.Enqueue` no longer enqueues tasks with empty typename; Error message is returned.
|
||||
|
||||
## [0.18.2] - 2020-07-15
|
||||
## [0.18.2] - 2021-07-15
|
||||
|
||||
### Changed
|
||||
|
||||
@@ -24,7 +72,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
||||
|
||||
- Fixed latency issue around memory usage (see https://github.com/hibiken/asynq/issues/309).
|
||||
|
||||
## [0.18.1] - 2020-07-04
|
||||
## [0.18.1] - 2021-07-04
|
||||
|
||||
### Changed
|
||||
|
||||
|
22
README.md
22
README.md
@@ -49,7 +49,7 @@ Task queues are used as a mechanism to distribute work across multiple machines.
|
||||
|
||||
## Quickstart
|
||||
|
||||
Make sure you have Go installed ([download](https://golang.org/dl/)). Version `1.13` or higher is required.
|
||||
Make sure you have Go installed ([download](https://golang.org/dl/)). Version `1.14` or higher is required.
|
||||
|
||||
Initialize your project by creating a folder and then running `go mod init github.com/your/repo` ([learn more](https://blog.golang.org/using-go-modules)) inside the folder. Then install Asynq library with the [`go get`](https://golang.org/cmd/go/#hdr-Add_dependencies_to_current_module_and_install_them) command:
|
||||
|
||||
@@ -103,7 +103,8 @@ func NewImageResizeTask(src string) (*asynq.Task, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return asynq.NewTask(TypeImageResize, payload), nil
|
||||
// task options can be passed to NewTask, which can be overridden at enqueue time.
|
||||
return asynq.NewTask(TypeImageResize, payload, asynq.MaxRetry(5), asynq.Timeout(20 * time.Minute)), nil
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------
|
||||
@@ -196,28 +197,15 @@ func main() {
|
||||
// Options include MaxRetry, Queue, Timeout, Deadline, Unique etc.
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
client.SetDefaultOptions(tasks.TypeImageResize, asynq.MaxRetry(10), asynq.Timeout(3*time.Minute))
|
||||
|
||||
task, err = tasks.NewImageResizeTask("https://example.com/myassets/image.jpg")
|
||||
if err != nil {
|
||||
log.Fatalf("could not create task: %v", err)
|
||||
}
|
||||
info, err = client.Enqueue(task)
|
||||
info, err = client.Enqueue(task, asynq.MaxRetry(10), asynq.Timeout(3 * time.Minute))
|
||||
if err != nil {
|
||||
log.Fatalf("could not enqueue task: %v", err)
|
||||
}
|
||||
log.Printf("enqueued task: id=%s queue=%s", info.ID, info.Queue)
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Example 4: Pass options to tune task processing behavior at enqueue time.
|
||||
// Options passed at enqueue time override default ones.
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
info, err = client.Enqueue(task, asynq.Queue("critical"), asynq.Timeout(30*time.Second))
|
||||
if err != nil {
|
||||
log.Fatal("could not enqueue task: %v", err)
|
||||
}
|
||||
log.Printf("enqueued task: id=%s queue=%s", info.ID, info.Queue)
|
||||
}
|
||||
```
|
||||
|
||||
@@ -239,7 +227,7 @@ const redisAddr = "127.0.0.1:6379"
|
||||
|
||||
func main() {
|
||||
srv := asynq.NewServer(
|
||||
asynq.RedisClientOpt{Addr: redisAddr}
|
||||
asynq.RedisClientOpt{Addr: redisAddr},
|
||||
asynq.Config{
|
||||
// Specify how many concurrent workers to use
|
||||
Concurrency: 10,
|
||||
|
99
asynq.go
99
asynq.go
@@ -5,6 +5,7 @@
|
||||
package asynq
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"net/url"
|
||||
@@ -12,7 +13,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-redis/redis/v7"
|
||||
"github.com/go-redis/redis/v8"
|
||||
"github.com/hibiken/asynq/internal/base"
|
||||
)
|
||||
|
||||
@@ -23,16 +24,39 @@ type Task struct {
|
||||
|
||||
// payload holds data needed to perform the task.
|
||||
payload []byte
|
||||
|
||||
// opts holds options for the task.
|
||||
opts []Option
|
||||
|
||||
// w is the ResultWriter for the task.
|
||||
w *ResultWriter
|
||||
}
|
||||
|
||||
func (t *Task) Type() string { return t.typename }
|
||||
func (t *Task) Payload() []byte { return t.payload }
|
||||
|
||||
// ResultWriter returns a pointer to the ResultWriter associated with the task.
|
||||
//
|
||||
// Nil pointer is returned if called on a newly created task (i.e. task created by calling NewTask).
|
||||
// Only the tasks passed to Handler.ProcessTask have a valid ResultWriter pointer.
|
||||
func (t *Task) ResultWriter() *ResultWriter { return t.w }
|
||||
|
||||
// NewTask returns a new Task given a type name and payload data.
|
||||
func NewTask(typename string, payload []byte) *Task {
|
||||
// Options can be passed to configure task processing behavior.
|
||||
func NewTask(typename string, payload []byte, opts ...Option) *Task {
|
||||
return &Task{
|
||||
typename: typename,
|
||||
payload: payload,
|
||||
opts: opts,
|
||||
}
|
||||
}
|
||||
|
||||
// newTask creates a task with the given typename, payload and ResultWriter.
|
||||
func newTask(typename string, payload []byte, w *ResultWriter) *Task {
|
||||
return &Task{
|
||||
typename: typename,
|
||||
payload: payload,
|
||||
w: w,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -76,11 +100,31 @@ type TaskInfo struct {
|
||||
// NextProcessAt is the time the task is scheduled to be processed,
|
||||
// zero if not applicable.
|
||||
NextProcessAt time.Time
|
||||
|
||||
// Retention is duration of the retention period after the task is successfully processed.
|
||||
Retention time.Duration
|
||||
|
||||
// CompletedAt is the time when the task is processed successfully.
|
||||
// Zero value (i.e. time.Time{}) indicates no value.
|
||||
CompletedAt time.Time
|
||||
|
||||
// Result holds the result data associated with the task.
|
||||
// Use ResultWriter to write result data from the Handler.
|
||||
Result []byte
|
||||
}
|
||||
|
||||
func newTaskInfo(msg *base.TaskMessage, state base.TaskState, nextProcessAt time.Time) *TaskInfo {
|
||||
// If t is non-zero, returns time converted from t as unix time in seconds.
|
||||
// If t is zero, returns zero value of time.Time.
|
||||
func fromUnixTimeOrZero(t int64) time.Time {
|
||||
if t == 0 {
|
||||
return time.Time{}
|
||||
}
|
||||
return time.Unix(t, 0)
|
||||
}
|
||||
|
||||
func newTaskInfo(msg *base.TaskMessage, state base.TaskState, nextProcessAt time.Time, result []byte) *TaskInfo {
|
||||
info := TaskInfo{
|
||||
ID: msg.ID.String(),
|
||||
ID: msg.ID,
|
||||
Queue: msg.Queue,
|
||||
Type: msg.Type,
|
||||
Payload: msg.Payload, // Do we need to make a copy?
|
||||
@@ -88,18 +132,12 @@ func newTaskInfo(msg *base.TaskMessage, state base.TaskState, nextProcessAt time
|
||||
Retried: msg.Retried,
|
||||
LastErr: msg.ErrorMsg,
|
||||
Timeout: time.Duration(msg.Timeout) * time.Second,
|
||||
Deadline: fromUnixTimeOrZero(msg.Deadline),
|
||||
Retention: time.Duration(msg.Retention) * time.Second,
|
||||
NextProcessAt: nextProcessAt,
|
||||
}
|
||||
if msg.LastFailedAt == 0 {
|
||||
info.LastFailedAt = time.Time{}
|
||||
} else {
|
||||
info.LastFailedAt = time.Unix(msg.LastFailedAt, 0)
|
||||
}
|
||||
|
||||
if msg.Deadline == 0 {
|
||||
info.Deadline = time.Time{}
|
||||
} else {
|
||||
info.Deadline = time.Unix(msg.Deadline, 0)
|
||||
LastFailedAt: fromUnixTimeOrZero(msg.LastFailedAt),
|
||||
CompletedAt: fromUnixTimeOrZero(msg.CompletedAt),
|
||||
Result: result,
|
||||
}
|
||||
|
||||
switch state {
|
||||
@@ -113,6 +151,8 @@ func newTaskInfo(msg *base.TaskMessage, state base.TaskState, nextProcessAt time
|
||||
info.State = TaskStateRetry
|
||||
case base.TaskStateArchived:
|
||||
info.State = TaskStateArchived
|
||||
case base.TaskStateCompleted:
|
||||
info.State = TaskStateCompleted
|
||||
default:
|
||||
panic(fmt.Sprintf("internal error: unknown state: %d", state))
|
||||
}
|
||||
@@ -137,6 +177,9 @@ const (
|
||||
|
||||
// Indicates that the task is archived and stored for inspection purposes.
|
||||
TaskStateArchived
|
||||
|
||||
// Indicates that the task is processed successfully and retained until the retention TTL expires.
|
||||
TaskStateCompleted
|
||||
)
|
||||
|
||||
func (s TaskState) String() string {
|
||||
@@ -151,6 +194,8 @@ func (s TaskState) String() string {
|
||||
return "retry"
|
||||
case TaskStateArchived:
|
||||
return "archived"
|
||||
case TaskStateCompleted:
|
||||
return "completed"
|
||||
}
|
||||
panic("asynq: unknown task state")
|
||||
}
|
||||
@@ -435,3 +480,27 @@ func parseRedisSentinelURI(u *url.URL) (RedisConnOpt, error) {
|
||||
}
|
||||
return RedisFailoverClientOpt{MasterName: master, SentinelAddrs: addrs, Password: password}, nil
|
||||
}
|
||||
|
||||
// ResultWriter is a client interface to write result data for a task.
|
||||
// It writes the data to the redis instance the server is connected to.
|
||||
type ResultWriter struct {
|
||||
id string // task ID this writer is responsible for
|
||||
qname string // queue name the task belongs to
|
||||
broker base.Broker
|
||||
ctx context.Context // context associated with the task
|
||||
}
|
||||
|
||||
// Write writes the given data as a result of the task the ResultWriter is associated with.
|
||||
func (w *ResultWriter) Write(data []byte) (n int, err error) {
|
||||
select {
|
||||
case <-w.ctx.Done():
|
||||
return 0, fmt.Errorf("failed to result task result: %v", w.ctx.Err())
|
||||
default:
|
||||
}
|
||||
return w.broker.WriteResult(w.qname, w.id, data)
|
||||
}
|
||||
|
||||
// TaskID returns the ID of the task the ResultWriter is associated with.
|
||||
func (w *ResultWriter) TaskID() string {
|
||||
return w.id
|
||||
}
|
||||
|
@@ -10,7 +10,7 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/go-redis/redis/v7"
|
||||
"github.com/go-redis/redis/v8"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
h "github.com/hibiken/asynq/internal/asynqtest"
|
||||
"github.com/hibiken/asynq/internal/log"
|
||||
|
131
client.go
131
client.go
@@ -5,12 +5,12 @@
|
||||
package asynq
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-redis/redis/v7"
|
||||
"github.com/go-redis/redis/v8"
|
||||
"github.com/google/uuid"
|
||||
"github.com/hibiken/asynq/internal/base"
|
||||
"github.com/hibiken/asynq/internal/errors"
|
||||
@@ -24,9 +24,7 @@ import (
|
||||
//
|
||||
// Clients are safe for concurrent use by multiple goroutines.
|
||||
type Client struct {
|
||||
mu sync.Mutex
|
||||
opts map[string][]Option
|
||||
rdb *rdb.RDB
|
||||
rdb *rdb.RDB
|
||||
}
|
||||
|
||||
// NewClient returns a new Client instance given a redis connection option.
|
||||
@@ -35,11 +33,7 @@ func NewClient(r RedisConnOpt) *Client {
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("asynq: unsupported RedisConnOpt type %T", r))
|
||||
}
|
||||
rdb := rdb.NewRDB(c)
|
||||
return &Client{
|
||||
opts: make(map[string][]Option),
|
||||
rdb: rdb,
|
||||
}
|
||||
return &Client{rdb: rdb.NewRDB(c)}
|
||||
}
|
||||
|
||||
type OptionType int
|
||||
@@ -52,6 +46,8 @@ const (
|
||||
UniqueOpt
|
||||
ProcessAtOpt
|
||||
ProcessInOpt
|
||||
TaskIDOpt
|
||||
RetentionOpt
|
||||
)
|
||||
|
||||
// Option specifies the task processing behavior.
|
||||
@@ -70,11 +66,13 @@ type Option interface {
|
||||
type (
|
||||
retryOption int
|
||||
queueOption string
|
||||
taskIDOption string
|
||||
timeoutOption time.Duration
|
||||
deadlineOption time.Time
|
||||
uniqueOption time.Duration
|
||||
processAtOption time.Time
|
||||
processInOption time.Duration
|
||||
retentionOption time.Duration
|
||||
)
|
||||
|
||||
// MaxRetry returns an option to specify the max number of times
|
||||
@@ -101,6 +99,15 @@ func (qname queueOption) String() string { return fmt.Sprintf("Queue(%q)", s
|
||||
func (qname queueOption) Type() OptionType { return QueueOpt }
|
||||
func (qname queueOption) Value() interface{} { return string(qname) }
|
||||
|
||||
// TaskID returns an option to specify the task ID.
|
||||
func TaskID(id string) Option {
|
||||
return taskIDOption(id)
|
||||
}
|
||||
|
||||
func (id taskIDOption) String() string { return fmt.Sprintf("TaskID(%q)", string(id)) }
|
||||
func (id taskIDOption) Type() OptionType { return TaskIDOpt }
|
||||
func (id taskIDOption) Value() interface{} { return string(id) }
|
||||
|
||||
// Timeout returns an option to specify how long a task may run.
|
||||
// If the timeout elapses before the Handler returns, then the task
|
||||
// will be retried.
|
||||
@@ -137,6 +144,7 @@ func (t deadlineOption) Value() interface{} { return time.Time(t) }
|
||||
// Task enqueued with this option is guaranteed to be unique within the given ttl.
|
||||
// Once the task gets processed successfully or once the TTL has expired, another task with the same uniqueness may be enqueued.
|
||||
// ErrDuplicateTask error is returned when enqueueing a duplicate task.
|
||||
// TTL duration must be greater than or equal to 1 second.
|
||||
//
|
||||
// Uniqueness of a task is based on the following properties:
|
||||
// - Task Type
|
||||
@@ -174,18 +182,36 @@ func (d processInOption) String() string { return fmt.Sprintf("ProcessIn(%v)
|
||||
func (d processInOption) Type() OptionType { return ProcessInOpt }
|
||||
func (d processInOption) Value() interface{} { return time.Duration(d) }
|
||||
|
||||
// Retention returns an option to specify the duration of retention period for the task.
|
||||
// If this option is provided, the task will be stored as a completed task after successful processing.
|
||||
// A completed task will be deleted after the specified duration elapses.
|
||||
func Retention(d time.Duration) Option {
|
||||
return retentionOption(d)
|
||||
}
|
||||
|
||||
func (ttl retentionOption) String() string { return fmt.Sprintf("Retention(%v)", time.Duration(ttl)) }
|
||||
func (ttl retentionOption) Type() OptionType { return RetentionOpt }
|
||||
func (ttl retentionOption) Value() interface{} { return time.Duration(ttl) }
|
||||
|
||||
// ErrDuplicateTask indicates that the given task could not be enqueued since it's a duplicate of another task.
|
||||
//
|
||||
// ErrDuplicateTask error only applies to tasks enqueued with a Unique option.
|
||||
var ErrDuplicateTask = errors.New("task already exists")
|
||||
|
||||
// ErrTaskIDConflict indicates that the given task could not be enqueued since its task ID already exists.
|
||||
//
|
||||
// ErrTaskIDConflict error only applies to tasks enqueued with a TaskID option.
|
||||
var ErrTaskIDConflict = errors.New("task ID conflicts with another task")
|
||||
|
||||
type option struct {
|
||||
retry int
|
||||
queue string
|
||||
taskID string
|
||||
timeout time.Duration
|
||||
deadline time.Time
|
||||
uniqueTTL time.Duration
|
||||
processAt time.Time
|
||||
retention time.Duration
|
||||
}
|
||||
|
||||
// composeOptions merges user provided options into the default options
|
||||
@@ -196,6 +222,7 @@ func composeOptions(opts ...Option) (option, error) {
|
||||
res := option{
|
||||
retry: defaultMaxRetry,
|
||||
queue: base.DefaultQueueName,
|
||||
taskID: uuid.NewString(),
|
||||
timeout: 0, // do not set to deafultTimeout here
|
||||
deadline: time.Time{},
|
||||
processAt: time.Now(),
|
||||
@@ -210,16 +237,28 @@ func composeOptions(opts ...Option) (option, error) {
|
||||
return option{}, err
|
||||
}
|
||||
res.queue = qname
|
||||
case taskIDOption:
|
||||
id := string(opt)
|
||||
if err := validateTaskID(id); err != nil {
|
||||
return option{}, err
|
||||
}
|
||||
res.taskID = id
|
||||
case timeoutOption:
|
||||
res.timeout = time.Duration(opt)
|
||||
case deadlineOption:
|
||||
res.deadline = time.Time(opt)
|
||||
case uniqueOption:
|
||||
res.uniqueTTL = time.Duration(opt)
|
||||
ttl := time.Duration(opt)
|
||||
if ttl < 1*time.Second {
|
||||
return option{}, errors.New("Unique TTL cannot be less than 1s")
|
||||
}
|
||||
res.uniqueTTL = ttl
|
||||
case processAtOption:
|
||||
res.processAt = time.Time(opt)
|
||||
case processInOption:
|
||||
res.processAt = time.Now().Add(time.Duration(opt))
|
||||
case retentionOption:
|
||||
res.retention = time.Duration(opt)
|
||||
default:
|
||||
// ignore unexpected option
|
||||
}
|
||||
@@ -227,6 +266,14 @@ func composeOptions(opts ...Option) (option, error) {
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// validates user provided task ID string.
|
||||
func validateTaskID(id string) error {
|
||||
if strings.TrimSpace(id) == "" {
|
||||
return errors.New("task ID cannot be empty")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
const (
|
||||
// Default max retry count used if nothing is specified.
|
||||
defaultMaxRetry = 25
|
||||
@@ -241,40 +288,45 @@ var (
|
||||
noDeadline time.Time = time.Unix(0, 0)
|
||||
)
|
||||
|
||||
// SetDefaultOptions sets options to be used for a given task type.
|
||||
// The argument opts specifies the behavior of task processing.
|
||||
// If there are conflicting Option values the last one overrides others.
|
||||
//
|
||||
// Default options can be overridden by options passed at enqueue time.
|
||||
func (c *Client) SetDefaultOptions(taskType string, opts ...Option) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
c.opts[taskType] = opts
|
||||
}
|
||||
|
||||
// Close closes the connection with redis.
|
||||
func (c *Client) Close() error {
|
||||
return c.rdb.Close()
|
||||
}
|
||||
|
||||
// Enqueue enqueues the given task to be processed asynchronously.
|
||||
// Enqueue enqueues the given task to a queue.
|
||||
//
|
||||
// Enqueue returns TaskInfo and nil error if the task is enqueued successfully, otherwise returns a non-nil error.
|
||||
//
|
||||
// The argument opts specifies the behavior of task processing.
|
||||
// If there are conflicting Option values the last one overrides others.
|
||||
// Any options provided to NewTask can be overridden by options passed to Enqueue.
|
||||
// By deafult, max retry is set to 25 and timeout is set to 30 minutes.
|
||||
//
|
||||
// If no ProcessAt or ProcessIn options are provided, the task will be pending immediately.
|
||||
//
|
||||
// Enqueue uses context.Background internally; to specify the context, use EnqueueContext.
|
||||
func (c *Client) Enqueue(task *Task, opts ...Option) (*TaskInfo, error) {
|
||||
return c.EnqueueContext(context.Background(), task, opts...)
|
||||
}
|
||||
|
||||
// EnqueueContext enqueues the given task to a queue.
|
||||
//
|
||||
// EnqueueContext returns TaskInfo and nil error if the task is enqueued successfully, otherwise returns a non-nil error.
|
||||
//
|
||||
// The argument opts specifies the behavior of task processing.
|
||||
// If there are conflicting Option values the last one overrides others.
|
||||
// Any options provided to NewTask can be overridden by options passed to Enqueue.
|
||||
// By deafult, max retry is set to 25 and timeout is set to 30 minutes.
|
||||
//
|
||||
// If no ProcessAt or ProcessIn options are provided, the task will be pending immediately.
|
||||
//
|
||||
// The first argument context applies to the enqueue operation. To specify task timeout and deadline, use Timeout and Deadline option instead.
|
||||
func (c *Client) EnqueueContext(ctx context.Context, task *Task, opts ...Option) (*TaskInfo, error) {
|
||||
if strings.TrimSpace(task.Type()) == "" {
|
||||
return nil, fmt.Errorf("task typename cannot be empty")
|
||||
}
|
||||
c.mu.Lock()
|
||||
if defaults, ok := c.opts[task.Type()]; ok {
|
||||
opts = append(defaults, opts...)
|
||||
}
|
||||
c.mu.Unlock()
|
||||
// merge task options with the options provided at enqueue time.
|
||||
opts = append(task.opts, opts...)
|
||||
opt, err := composeOptions(opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -296,7 +348,7 @@ func (c *Client) Enqueue(task *Task, opts ...Option) (*TaskInfo, error) {
|
||||
uniqueKey = base.UniqueKey(opt.queue, task.Type(), task.Payload())
|
||||
}
|
||||
msg := &base.TaskMessage{
|
||||
ID: uuid.New(),
|
||||
ID: opt.taskID,
|
||||
Type: task.Type(),
|
||||
Payload: task.Payload(),
|
||||
Queue: opt.queue,
|
||||
@@ -304,37 +356,40 @@ func (c *Client) Enqueue(task *Task, opts ...Option) (*TaskInfo, error) {
|
||||
Deadline: deadline.Unix(),
|
||||
Timeout: int64(timeout.Seconds()),
|
||||
UniqueKey: uniqueKey,
|
||||
Retention: int64(opt.retention.Seconds()),
|
||||
}
|
||||
now := time.Now()
|
||||
var state base.TaskState
|
||||
if opt.processAt.Before(now) || opt.processAt.Equal(now) {
|
||||
opt.processAt = now
|
||||
err = c.enqueue(msg, opt.uniqueTTL)
|
||||
err = c.enqueue(ctx, msg, opt.uniqueTTL)
|
||||
state = base.TaskStatePending
|
||||
} else {
|
||||
err = c.schedule(msg, opt.processAt, opt.uniqueTTL)
|
||||
err = c.schedule(ctx, msg, opt.processAt, opt.uniqueTTL)
|
||||
state = base.TaskStateScheduled
|
||||
}
|
||||
switch {
|
||||
case errors.Is(err, errors.ErrDuplicateTask):
|
||||
return nil, fmt.Errorf("%w", ErrDuplicateTask)
|
||||
case errors.Is(err, errors.ErrTaskIdConflict):
|
||||
return nil, fmt.Errorf("%w", ErrTaskIDConflict)
|
||||
case err != nil:
|
||||
return nil, err
|
||||
}
|
||||
return newTaskInfo(msg, state, opt.processAt), nil
|
||||
return newTaskInfo(msg, state, opt.processAt, nil), nil
|
||||
}
|
||||
|
||||
func (c *Client) enqueue(msg *base.TaskMessage, uniqueTTL time.Duration) error {
|
||||
func (c *Client) enqueue(ctx context.Context, msg *base.TaskMessage, uniqueTTL time.Duration) error {
|
||||
if uniqueTTL > 0 {
|
||||
return c.rdb.EnqueueUnique(msg, uniqueTTL)
|
||||
return c.rdb.EnqueueUnique(ctx, msg, uniqueTTL)
|
||||
}
|
||||
return c.rdb.Enqueue(msg)
|
||||
return c.rdb.Enqueue(ctx, msg)
|
||||
}
|
||||
|
||||
func (c *Client) schedule(msg *base.TaskMessage, t time.Time, uniqueTTL time.Duration) error {
|
||||
func (c *Client) schedule(ctx context.Context, msg *base.TaskMessage, t time.Time, uniqueTTL time.Duration) error {
|
||||
if uniqueTTL > 0 {
|
||||
ttl := t.Add(uniqueTTL).Sub(time.Now())
|
||||
return c.rdb.ScheduleUnique(msg, t, ttl)
|
||||
return c.rdb.ScheduleUnique(ctx, msg, t, ttl)
|
||||
}
|
||||
return c.rdb.Schedule(msg, t)
|
||||
return c.rdb.Schedule(ctx, msg, t)
|
||||
}
|
||||
|
170
client_test.go
170
client_test.go
@@ -5,6 +5,7 @@
|
||||
package asynq
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -415,6 +416,40 @@ func TestClientEnqueue(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "With Retention option",
|
||||
task: task,
|
||||
opts: []Option{
|
||||
Retention(24 * time.Hour),
|
||||
},
|
||||
wantInfo: &TaskInfo{
|
||||
Queue: "default",
|
||||
Type: task.Type(),
|
||||
Payload: task.Payload(),
|
||||
State: TaskStatePending,
|
||||
MaxRetry: defaultMaxRetry,
|
||||
Retried: 0,
|
||||
LastErr: "",
|
||||
LastFailedAt: time.Time{},
|
||||
Timeout: defaultTimeout,
|
||||
Deadline: time.Time{},
|
||||
NextProcessAt: now,
|
||||
Retention: 24 * time.Hour,
|
||||
},
|
||||
wantPending: map[string][]*base.TaskMessage{
|
||||
"default": {
|
||||
{
|
||||
Type: task.Type(),
|
||||
Payload: task.Payload(),
|
||||
Retry: defaultMaxRetry,
|
||||
Queue: "default",
|
||||
Timeout: int64(defaultTimeout.Seconds()),
|
||||
Deadline: noDeadline.Unix(),
|
||||
Retention: int64((24 * time.Hour).Seconds()),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
@@ -443,6 +478,100 @@ func TestClientEnqueue(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestClientEnqueueWithTaskIDOption(t *testing.T) {
|
||||
r := setup(t)
|
||||
client := NewClient(getRedisConnOpt(t))
|
||||
defer client.Close()
|
||||
|
||||
task := NewTask("send_email", nil)
|
||||
now := time.Now()
|
||||
|
||||
tests := []struct {
|
||||
desc string
|
||||
task *Task
|
||||
opts []Option
|
||||
wantInfo *TaskInfo
|
||||
wantPending map[string][]*base.TaskMessage
|
||||
}{
|
||||
{
|
||||
desc: "With a valid TaskID option",
|
||||
task: task,
|
||||
opts: []Option{
|
||||
TaskID("custom_id"),
|
||||
},
|
||||
wantInfo: &TaskInfo{
|
||||
ID: "custom_id",
|
||||
Queue: "default",
|
||||
Type: task.Type(),
|
||||
Payload: task.Payload(),
|
||||
State: TaskStatePending,
|
||||
MaxRetry: defaultMaxRetry,
|
||||
Retried: 0,
|
||||
LastErr: "",
|
||||
LastFailedAt: time.Time{},
|
||||
Timeout: defaultTimeout,
|
||||
Deadline: time.Time{},
|
||||
NextProcessAt: now,
|
||||
},
|
||||
wantPending: map[string][]*base.TaskMessage{
|
||||
"default": {
|
||||
{
|
||||
ID: "custom_id",
|
||||
Type: task.Type(),
|
||||
Payload: task.Payload(),
|
||||
Retry: defaultMaxRetry,
|
||||
Queue: "default",
|
||||
Timeout: int64(defaultTimeout.Seconds()),
|
||||
Deadline: noDeadline.Unix(),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
h.FlushDB(t, r) // clean up db before each test case.
|
||||
|
||||
gotInfo, err := client.Enqueue(tc.task, tc.opts...)
|
||||
if err != nil {
|
||||
t.Errorf("got non-nil error %v, want nil", err)
|
||||
continue
|
||||
}
|
||||
|
||||
cmpOptions := []cmp.Option{
|
||||
cmpopts.EquateApproxTime(500 * time.Millisecond),
|
||||
}
|
||||
if diff := cmp.Diff(tc.wantInfo, gotInfo, cmpOptions...); diff != "" {
|
||||
t.Errorf("%s;\nEnqueue(task) returned %v, want %v; (-want,+got)\n%s",
|
||||
tc.desc, gotInfo, tc.wantInfo, diff)
|
||||
}
|
||||
|
||||
for qname, want := range tc.wantPending {
|
||||
got := h.GetPendingMessages(t, r, qname)
|
||||
if diff := cmp.Diff(want, got); diff != "" {
|
||||
t.Errorf("%s;\nmismatch found in %q; (-want,+got)\n%s", tc.desc, base.PendingKey(qname), diff)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestClientEnqueueWithConflictingTaskID(t *testing.T) {
|
||||
setup(t)
|
||||
client := NewClient(getRedisConnOpt(t))
|
||||
defer client.Close()
|
||||
|
||||
const taskID = "custom_id"
|
||||
task := NewTask("foo", nil)
|
||||
|
||||
if _, err := client.Enqueue(task, TaskID(taskID)); err != nil {
|
||||
t.Fatalf("First task: Enqueue failed: %v", err)
|
||||
}
|
||||
_, err := client.Enqueue(task, TaskID(taskID))
|
||||
if !errors.Is(err, ErrTaskIDConflict) {
|
||||
t.Errorf("Second task: Enqueue returned %v, want %v", err, ErrTaskIDConflict)
|
||||
}
|
||||
}
|
||||
|
||||
func TestClientEnqueueWithProcessInOption(t *testing.T) {
|
||||
r := setup(t)
|
||||
client := NewClient(getRedisConnOpt(t))
|
||||
@@ -595,6 +724,21 @@ func TestClientEnqueueError(t *testing.T) {
|
||||
task: NewTask(" ", h.JSON(map[string]interface{}{})),
|
||||
opts: []Option{},
|
||||
},
|
||||
{
|
||||
desc: "With empty task ID",
|
||||
task: NewTask("foo", nil),
|
||||
opts: []Option{TaskID("")},
|
||||
},
|
||||
{
|
||||
desc: "With blank task ID",
|
||||
task: NewTask("foo", nil),
|
||||
opts: []Option{TaskID(" ")},
|
||||
},
|
||||
{
|
||||
desc: "With unique option less than 1s",
|
||||
task: NewTask("foo", nil),
|
||||
opts: []Option{Unique(300 * time.Millisecond)},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
@@ -607,16 +751,17 @@ func TestClientEnqueueError(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestClientDefaultOptions(t *testing.T) {
|
||||
func TestClientWithDefaultOptions(t *testing.T) {
|
||||
r := setup(t)
|
||||
|
||||
now := time.Now()
|
||||
|
||||
tests := []struct {
|
||||
desc string
|
||||
defaultOpts []Option // options set at the client level.
|
||||
defaultOpts []Option // options set at task initialization time
|
||||
opts []Option // options used at enqueue time.
|
||||
task *Task
|
||||
tasktype string
|
||||
payload []byte
|
||||
wantInfo *TaskInfo
|
||||
queue string // queue that the message should go into.
|
||||
want *base.TaskMessage
|
||||
@@ -625,7 +770,8 @@ func TestClientDefaultOptions(t *testing.T) {
|
||||
desc: "With queue routing option",
|
||||
defaultOpts: []Option{Queue("feed")},
|
||||
opts: []Option{},
|
||||
task: NewTask("feed:import", nil),
|
||||
tasktype: "feed:import",
|
||||
payload: nil,
|
||||
wantInfo: &TaskInfo{
|
||||
Queue: "feed",
|
||||
Type: "feed:import",
|
||||
@@ -653,7 +799,8 @@ func TestClientDefaultOptions(t *testing.T) {
|
||||
desc: "With multiple options",
|
||||
defaultOpts: []Option{Queue("feed"), MaxRetry(5)},
|
||||
opts: []Option{},
|
||||
task: NewTask("feed:import", nil),
|
||||
tasktype: "feed:import",
|
||||
payload: nil,
|
||||
wantInfo: &TaskInfo{
|
||||
Queue: "feed",
|
||||
Type: "feed:import",
|
||||
@@ -681,7 +828,8 @@ func TestClientDefaultOptions(t *testing.T) {
|
||||
desc: "With overriding options at enqueue time",
|
||||
defaultOpts: []Option{Queue("feed"), MaxRetry(5)},
|
||||
opts: []Option{Queue("critical")},
|
||||
task: NewTask("feed:import", nil),
|
||||
tasktype: "feed:import",
|
||||
payload: nil,
|
||||
wantInfo: &TaskInfo{
|
||||
Queue: "critical",
|
||||
Type: "feed:import",
|
||||
@@ -710,8 +858,8 @@ func TestClientDefaultOptions(t *testing.T) {
|
||||
h.FlushDB(t, r)
|
||||
c := NewClient(getRedisConnOpt(t))
|
||||
defer c.Close()
|
||||
c.SetDefaultOptions(tc.task.Type(), tc.defaultOpts...)
|
||||
gotInfo, err := c.Enqueue(tc.task, tc.opts...)
|
||||
task := NewTask(tc.tasktype, tc.payload, tc.defaultOpts...)
|
||||
gotInfo, err := c.Enqueue(task, tc.opts...)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -761,7 +909,7 @@ func TestClientEnqueueUnique(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
gotTTL := r.TTL(base.UniqueKey(base.DefaultQueueName, tc.task.Type(), tc.task.Payload())).Val()
|
||||
gotTTL := r.TTL(context.Background(), base.UniqueKey(base.DefaultQueueName, tc.task.Type(), tc.task.Payload())).Val()
|
||||
if !cmp.Equal(tc.ttl.Seconds(), gotTTL.Seconds(), cmpopts.EquateApprox(0, 1)) {
|
||||
t.Errorf("TTL = %v, want %v", gotTTL, tc.ttl)
|
||||
continue
|
||||
@@ -806,7 +954,7 @@ func TestClientEnqueueUniqueWithProcessInOption(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
gotTTL := r.TTL(base.UniqueKey(base.DefaultQueueName, tc.task.Type(), tc.task.Payload())).Val()
|
||||
gotTTL := r.TTL(context.Background(), base.UniqueKey(base.DefaultQueueName, tc.task.Type(), tc.task.Payload())).Val()
|
||||
wantTTL := time.Duration(tc.ttl.Seconds()+tc.d.Seconds()) * time.Second
|
||||
if !cmp.Equal(wantTTL.Seconds(), gotTTL.Seconds(), cmpopts.EquateApprox(0, 1)) {
|
||||
t.Errorf("TTL = %v, want %v", gotTTL, wantTTL)
|
||||
@@ -852,7 +1000,7 @@ func TestClientEnqueueUniqueWithProcessAtOption(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
gotTTL := r.TTL(base.UniqueKey(base.DefaultQueueName, tc.task.Type(), tc.task.Payload())).Val()
|
||||
gotTTL := r.TTL(context.Background(), base.UniqueKey(base.DefaultQueueName, tc.task.Type(), tc.task.Payload())).Val()
|
||||
wantTTL := tc.at.Add(tc.ttl).Sub(time.Now())
|
||||
if !cmp.Equal(wantTTL.Seconds(), gotTTL.Seconds(), cmpopts.EquateApprox(0, 1)) {
|
||||
t.Errorf("TTL = %v, want %v", gotTTL, wantTTL)
|
||||
|
55
context.go
55
context.go
@@ -6,49 +6,16 @@ package asynq
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/hibiken/asynq/internal/base"
|
||||
asynqcontext "github.com/hibiken/asynq/internal/context"
|
||||
)
|
||||
|
||||
// A taskMetadata holds task scoped data to put in context.
|
||||
type taskMetadata struct {
|
||||
id string
|
||||
maxRetry int
|
||||
retryCount int
|
||||
qname string
|
||||
}
|
||||
|
||||
// ctxKey type is unexported to prevent collisions with context keys defined in
|
||||
// other packages.
|
||||
type ctxKey int
|
||||
|
||||
// metadataCtxKey is the context key for the task metadata.
|
||||
// Its value of zero is arbitrary.
|
||||
const metadataCtxKey ctxKey = 0
|
||||
|
||||
// createContext returns a context and cancel function for a given task message.
|
||||
func createContext(msg *base.TaskMessage, deadline time.Time) (context.Context, context.CancelFunc) {
|
||||
metadata := taskMetadata{
|
||||
id: msg.ID.String(),
|
||||
maxRetry: msg.Retry,
|
||||
retryCount: msg.Retried,
|
||||
qname: msg.Queue,
|
||||
}
|
||||
ctx := context.WithValue(context.Background(), metadataCtxKey, metadata)
|
||||
return context.WithDeadline(ctx, deadline)
|
||||
}
|
||||
|
||||
// GetTaskID extracts a task ID from a context, if any.
|
||||
//
|
||||
// ID of a task is guaranteed to be unique.
|
||||
// ID of a task doesn't change if the task is being retried.
|
||||
func GetTaskID(ctx context.Context) (id string, ok bool) {
|
||||
metadata, ok := ctx.Value(metadataCtxKey).(taskMetadata)
|
||||
if !ok {
|
||||
return "", false
|
||||
}
|
||||
return metadata.id, true
|
||||
return asynqcontext.GetTaskID(ctx)
|
||||
}
|
||||
|
||||
// GetRetryCount extracts retry count from a context, if any.
|
||||
@@ -56,11 +23,7 @@ func GetTaskID(ctx context.Context) (id string, ok bool) {
|
||||
// Return value n indicates the number of times associated task has been
|
||||
// retried so far.
|
||||
func GetRetryCount(ctx context.Context) (n int, ok bool) {
|
||||
metadata, ok := ctx.Value(metadataCtxKey).(taskMetadata)
|
||||
if !ok {
|
||||
return 0, false
|
||||
}
|
||||
return metadata.retryCount, true
|
||||
return asynqcontext.GetRetryCount(ctx)
|
||||
}
|
||||
|
||||
// GetMaxRetry extracts maximum retry from a context, if any.
|
||||
@@ -68,20 +31,12 @@ func GetRetryCount(ctx context.Context) (n int, ok bool) {
|
||||
// Return value n indicates the maximum number of times the assoicated task
|
||||
// can be retried if ProcessTask returns a non-nil error.
|
||||
func GetMaxRetry(ctx context.Context) (n int, ok bool) {
|
||||
metadata, ok := ctx.Value(metadataCtxKey).(taskMetadata)
|
||||
if !ok {
|
||||
return 0, false
|
||||
}
|
||||
return metadata.maxRetry, true
|
||||
return asynqcontext.GetMaxRetry(ctx)
|
||||
}
|
||||
|
||||
// GetQueueName extracts queue name from a context, if any.
|
||||
//
|
||||
// Return value qname indicates which queue the task was pulled from.
|
||||
func GetQueueName(ctx context.Context) (qname string, ok bool) {
|
||||
metadata, ok := ctx.Value(metadataCtxKey).(taskMetadata)
|
||||
if !ok {
|
||||
return "", false
|
||||
}
|
||||
return metadata.qname, true
|
||||
return asynqcontext.GetQueueName(ctx)
|
||||
}
|
||||
|
12
go.mod
12
go.mod
@@ -3,15 +3,17 @@ module github.com/hibiken/asynq
|
||||
go 1.13
|
||||
|
||||
require (
|
||||
github.com/go-redis/redis/v7 v7.4.0
|
||||
github.com/golang/protobuf v1.4.1
|
||||
github.com/google/go-cmp v0.5.0
|
||||
github.com/go-redis/redis/v8 v8.11.2
|
||||
github.com/golang/protobuf v1.4.2
|
||||
github.com/google/go-cmp v0.5.6
|
||||
github.com/google/uuid v1.2.0
|
||||
github.com/kr/pretty v0.1.0 // indirect
|
||||
github.com/robfig/cron/v3 v3.0.1
|
||||
github.com/spf13/cast v1.3.1
|
||||
github.com/stretchr/testify v1.6.1 // indirect
|
||||
go.uber.org/goleak v0.10.0
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e
|
||||
golang.org/x/sys v0.0.0-20210112080510-489259a85091
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4
|
||||
google.golang.org/protobuf v1.25.0
|
||||
gopkg.in/yaml.v2 v2.2.7 // indirect
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect
|
||||
)
|
||||
|
111
go.sum
111
go.sum
@@ -1,91 +1,129 @@
|
||||
cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ=
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473 h1:4cmBvAEBNJaGARUEs3/suWRyfyBfhf7I60WBZq+bv2w=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/go-redis/redis/v7 v7.2.0 h1:CrCexy/jYWZjW0AyVoHlcJUeZN19VWlbepTh1Vq6dJs=
|
||||
github.com/go-redis/redis/v7 v7.2.0/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRfnwdHj/Dcg=
|
||||
github.com/go-redis/redis/v7 v7.4.0 h1:7obg6wUoj05T0EpY0o8B59S9w5yeMWql7sw2kwNW1x4=
|
||||
github.com/go-redis/redis/v7 v7.4.0/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRfnwdHj/Dcg=
|
||||
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/go-redis/redis/v8 v8.11.2 h1:WqlSpAwz8mxDSMCvbyz1Mkiqe0LE5OY4j3lgkvu1Ts0=
|
||||
github.com/go-redis/redis/v8 v8.11.2/go.mod h1:DLomh7y2e3ggQXQLd1YgmvIfecPJoFl7WU5SOQ/r06M=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/mock v1.1.1 h1:G5FRp8JnTd7RQH5kemVNlMeyXQAztQ3mOWV95KxsXH8=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.1 h1:ZFgWrT+bLgsYPirOnRfKLYJLvssAegOj/hgyMFdJZe0=
|
||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w=
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
|
||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs=
|
||||
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pty v1.1.1 h1:VkoXIwSboBpnk99O/KFauAEILuNHv5DVFKZMBN/gUgw=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
|
||||
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo=
|
||||
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME=
|
||||
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||
github.com/onsi/ginkgo v1.15.0 h1:1V1NfVQR87RtWAgp1lv9JZJ5Jap+XFGKPi00andXGi4=
|
||||
github.com/onsi/ginkgo v1.15.0/go.mod h1:hF8qUzuuC8DJGygJH3726JnCZX4MYbRB8yFfISqnKUg=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/onsi/gomega v1.10.5 h1:7n6FEkpFmfCoo2t+YYqXH0evK+a9ICQz0xcAy9dYcaQ=
|
||||
github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
|
||||
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
|
||||
github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng=
|
||||
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
|
||||
github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/yuin/goldmark v1.2.1 h1:ruQGxdhGHe7FWOJPT0mKs5+pD2Xs1Bm/kdGlHO04FmM=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
go.uber.org/goleak v0.10.0 h1:G3eWbSNIskeRqtsN/1uI5B+eP73y3JUuBsv9AZjehb4=
|
||||
go.uber.org/goleak v0.10.0/go.mod h1:VCZuO8V8mFPlL0F5J5GK1rtHV3DrFcQ1R8ryq7FK0aI=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4 h1:c2HOrn5iMezYjSlGPncknSEr/8x5LELb/ilJbXi9DEA=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3 h1:XQyxROzUlZH+WIQwySDgnISgOivlhjIEwaQaJEJrrN0=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190923162816-aa69164e4478 h1:l5EDrHhldLYb3ZRHDUhXF7Om7MvYXnkV9/iQNo1lX6g=
|
||||
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb h1:eBmm0M9fYhWpKZLjQUUKka/LtIxf46G4fxeEz5KJr9U=
|
||||
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 h1:SQFwaSi55rU7vdNs9Yr0Z324VNlrF+0wMqRXT4St8ck=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e h1:o3PsSEY8E4eXWkXrIP9YJALUkVZqzHJT5DOasTyn8Vs=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e h1:9vRrk9YW2BTzLP0VCB9ZDjU4cPqkg+IDWL7XgxA1yxQ=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210112080510-489259a85091 h1:DMyOG0U+gKfu8JZzg2UQe9MeaC1X+xQWlAKcRnjxjCw=
|
||||
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
@@ -93,15 +131,24 @@ golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGm
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e h1:4nW4NLDYnU28ojHaHO8OVxFHk/aQ33U01a9cjED+pzE=
|
||||
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.27.0 h1:rRYRFMVgRv6E0D70Skyfsr28tDXIuuPZyWGMPdMcnXg=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
@@ -109,6 +156,7 @@ google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQ
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c=
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
@@ -119,10 +167,11 @@ gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo=
|
||||
gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc h1:/hemPrYIhOhy8zYrNj+069zDB68us2sMGsfkFJO0iZs=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
|
@@ -125,10 +125,10 @@ func (h *heartbeater) start(wg *sync.WaitGroup) {
|
||||
timer.Reset(h.interval)
|
||||
|
||||
case w := <-h.starting:
|
||||
h.workers[w.msg.ID.String()] = w
|
||||
h.workers[w.msg.ID] = w
|
||||
|
||||
case msg := <-h.finished:
|
||||
delete(h.workers, msg.ID.String())
|
||||
delete(h.workers, msg.ID)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
153
inspector.go
153
inspector.go
@@ -10,8 +10,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-redis/redis/v7"
|
||||
"github.com/google/uuid"
|
||||
"github.com/go-redis/redis/v8"
|
||||
"github.com/hibiken/asynq/internal/base"
|
||||
"github.com/hibiken/asynq/internal/errors"
|
||||
"github.com/hibiken/asynq/internal/rdb"
|
||||
@@ -53,6 +52,9 @@ type QueueInfo struct {
|
||||
// It is an approximate memory usage value in bytes since the value is computed by sampling.
|
||||
MemoryUsage int64
|
||||
|
||||
// Latency of the queue, measured by the oldest pending task in the queue.
|
||||
Latency time.Duration
|
||||
|
||||
// Size is the total number of tasks in the queue.
|
||||
// The value is the sum of Pending, Active, Scheduled, Retry, and Archived.
|
||||
Size int
|
||||
@@ -67,6 +69,8 @@ type QueueInfo struct {
|
||||
Retry int
|
||||
// Number of archived tasks.
|
||||
Archived int
|
||||
// Number of stored completed tasks.
|
||||
Completed int
|
||||
|
||||
// Total number of tasks being processed during the given date.
|
||||
// The number includes both succeeded and failed tasks.
|
||||
@@ -94,12 +98,14 @@ func (i *Inspector) GetQueueInfo(qname string) (*QueueInfo, error) {
|
||||
return &QueueInfo{
|
||||
Queue: stats.Queue,
|
||||
MemoryUsage: stats.MemoryUsage,
|
||||
Latency: stats.Latency,
|
||||
Size: stats.Size,
|
||||
Pending: stats.Pending,
|
||||
Active: stats.Active,
|
||||
Scheduled: stats.Scheduled,
|
||||
Retry: stats.Retry,
|
||||
Archived: stats.Archived,
|
||||
Completed: stats.Completed,
|
||||
Processed: stats.Processed,
|
||||
Failed: stats.Failed,
|
||||
Paused: stats.Paused,
|
||||
@@ -175,14 +181,10 @@ func (i *Inspector) DeleteQueue(qname string, force bool) error {
|
||||
|
||||
// GetTaskInfo retrieves task information given a task id and queue name.
|
||||
//
|
||||
// Returns ErrQueueNotFound if a queue with the given name doesn't exist.
|
||||
// Returns ErrTaskNotFound if a task with the given id doesn't exist in the queue.
|
||||
// Returns an error wrapping ErrQueueNotFound if a queue with the given name doesn't exist.
|
||||
// Returns an error wrapping ErrTaskNotFound if a task with the given id doesn't exist in the queue.
|
||||
func (i *Inspector) GetTaskInfo(qname, id string) (*TaskInfo, error) {
|
||||
taskid, err := uuid.Parse(id)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("asynq: %s is not a valid task id", id)
|
||||
}
|
||||
info, err := i.rdb.GetTaskInfo(qname, taskid)
|
||||
info, err := i.rdb.GetTaskInfo(qname, id)
|
||||
switch {
|
||||
case errors.IsQueueNotFound(err):
|
||||
return nil, fmt.Errorf("asynq: %w", ErrQueueNotFound)
|
||||
@@ -191,7 +193,7 @@ func (i *Inspector) GetTaskInfo(qname, id string) (*TaskInfo, error) {
|
||||
case err != nil:
|
||||
return nil, fmt.Errorf("asynq: %v", err)
|
||||
}
|
||||
return newTaskInfo(info.Message, info.State, info.NextProcessAt), nil
|
||||
return newTaskInfo(info.Message, info.State, info.NextProcessAt, info.Result), nil
|
||||
}
|
||||
|
||||
// ListOption specifies behavior of list operation.
|
||||
@@ -264,17 +266,21 @@ func (i *Inspector) ListPendingTasks(qname string, opts ...ListOption) ([]*TaskI
|
||||
}
|
||||
opt := composeListOptions(opts...)
|
||||
pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1}
|
||||
msgs, err := i.rdb.ListPending(qname, pgn)
|
||||
infos, err := i.rdb.ListPending(qname, pgn)
|
||||
switch {
|
||||
case errors.IsQueueNotFound(err):
|
||||
return nil, fmt.Errorf("asynq: %w", ErrQueueNotFound)
|
||||
case err != nil:
|
||||
return nil, fmt.Errorf("asynq: %v", err)
|
||||
}
|
||||
now := time.Now()
|
||||
var tasks []*TaskInfo
|
||||
for _, m := range msgs {
|
||||
tasks = append(tasks, newTaskInfo(m, base.TaskStatePending, now))
|
||||
for _, i := range infos {
|
||||
tasks = append(tasks, newTaskInfo(
|
||||
i.Message,
|
||||
i.State,
|
||||
i.NextProcessAt,
|
||||
i.Result,
|
||||
))
|
||||
}
|
||||
return tasks, err
|
||||
}
|
||||
@@ -288,7 +294,7 @@ func (i *Inspector) ListActiveTasks(qname string, opts ...ListOption) ([]*TaskIn
|
||||
}
|
||||
opt := composeListOptions(opts...)
|
||||
pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1}
|
||||
msgs, err := i.rdb.ListActive(qname, pgn)
|
||||
infos, err := i.rdb.ListActive(qname, pgn)
|
||||
switch {
|
||||
case errors.IsQueueNotFound(err):
|
||||
return nil, fmt.Errorf("asynq: %w", ErrQueueNotFound)
|
||||
@@ -296,8 +302,13 @@ func (i *Inspector) ListActiveTasks(qname string, opts ...ListOption) ([]*TaskIn
|
||||
return nil, fmt.Errorf("asynq: %v", err)
|
||||
}
|
||||
var tasks []*TaskInfo
|
||||
for _, m := range msgs {
|
||||
tasks = append(tasks, newTaskInfo(m, base.TaskStateActive, time.Time{}))
|
||||
for _, i := range infos {
|
||||
tasks = append(tasks, newTaskInfo(
|
||||
i.Message,
|
||||
i.State,
|
||||
i.NextProcessAt,
|
||||
i.Result,
|
||||
))
|
||||
}
|
||||
return tasks, err
|
||||
}
|
||||
@@ -312,7 +323,7 @@ func (i *Inspector) ListScheduledTasks(qname string, opts ...ListOption) ([]*Tas
|
||||
}
|
||||
opt := composeListOptions(opts...)
|
||||
pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1}
|
||||
zs, err := i.rdb.ListScheduled(qname, pgn)
|
||||
infos, err := i.rdb.ListScheduled(qname, pgn)
|
||||
switch {
|
||||
case errors.IsQueueNotFound(err):
|
||||
return nil, fmt.Errorf("asynq: %w", ErrQueueNotFound)
|
||||
@@ -320,11 +331,12 @@ func (i *Inspector) ListScheduledTasks(qname string, opts ...ListOption) ([]*Tas
|
||||
return nil, fmt.Errorf("asynq: %v", err)
|
||||
}
|
||||
var tasks []*TaskInfo
|
||||
for _, z := range zs {
|
||||
for _, i := range infos {
|
||||
tasks = append(tasks, newTaskInfo(
|
||||
z.Message,
|
||||
base.TaskStateScheduled,
|
||||
time.Unix(z.Score, 0),
|
||||
i.Message,
|
||||
i.State,
|
||||
i.NextProcessAt,
|
||||
i.Result,
|
||||
))
|
||||
}
|
||||
return tasks, nil
|
||||
@@ -340,7 +352,7 @@ func (i *Inspector) ListRetryTasks(qname string, opts ...ListOption) ([]*TaskInf
|
||||
}
|
||||
opt := composeListOptions(opts...)
|
||||
pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1}
|
||||
zs, err := i.rdb.ListRetry(qname, pgn)
|
||||
infos, err := i.rdb.ListRetry(qname, pgn)
|
||||
switch {
|
||||
case errors.IsQueueNotFound(err):
|
||||
return nil, fmt.Errorf("asynq: %w", ErrQueueNotFound)
|
||||
@@ -348,11 +360,12 @@ func (i *Inspector) ListRetryTasks(qname string, opts ...ListOption) ([]*TaskInf
|
||||
return nil, fmt.Errorf("asynq: %v", err)
|
||||
}
|
||||
var tasks []*TaskInfo
|
||||
for _, z := range zs {
|
||||
for _, i := range infos {
|
||||
tasks = append(tasks, newTaskInfo(
|
||||
z.Message,
|
||||
base.TaskStateRetry,
|
||||
time.Unix(z.Score, 0),
|
||||
i.Message,
|
||||
i.State,
|
||||
i.NextProcessAt,
|
||||
i.Result,
|
||||
))
|
||||
}
|
||||
return tasks, nil
|
||||
@@ -368,7 +381,7 @@ func (i *Inspector) ListArchivedTasks(qname string, opts ...ListOption) ([]*Task
|
||||
}
|
||||
opt := composeListOptions(opts...)
|
||||
pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1}
|
||||
zs, err := i.rdb.ListArchived(qname, pgn)
|
||||
infos, err := i.rdb.ListArchived(qname, pgn)
|
||||
switch {
|
||||
case errors.IsQueueNotFound(err):
|
||||
return nil, fmt.Errorf("asynq: %w", ErrQueueNotFound)
|
||||
@@ -376,11 +389,41 @@ func (i *Inspector) ListArchivedTasks(qname string, opts ...ListOption) ([]*Task
|
||||
return nil, fmt.Errorf("asynq: %v", err)
|
||||
}
|
||||
var tasks []*TaskInfo
|
||||
for _, z := range zs {
|
||||
for _, i := range infos {
|
||||
tasks = append(tasks, newTaskInfo(
|
||||
z.Message,
|
||||
base.TaskStateArchived,
|
||||
time.Time{},
|
||||
i.Message,
|
||||
i.State,
|
||||
i.NextProcessAt,
|
||||
i.Result,
|
||||
))
|
||||
}
|
||||
return tasks, nil
|
||||
}
|
||||
|
||||
// ListCompletedTasks retrieves completed tasks from the specified queue.
|
||||
// Tasks are sorted by expiration time (i.e. CompletedAt + Retention) in descending order.
|
||||
//
|
||||
// By default, it retrieves the first 30 tasks.
|
||||
func (i *Inspector) ListCompletedTasks(qname string, opts ...ListOption) ([]*TaskInfo, error) {
|
||||
if err := base.ValidateQueueName(qname); err != nil {
|
||||
return nil, fmt.Errorf("asynq: %v", err)
|
||||
}
|
||||
opt := composeListOptions(opts...)
|
||||
pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1}
|
||||
infos, err := i.rdb.ListCompleted(qname, pgn)
|
||||
switch {
|
||||
case errors.IsQueueNotFound(err):
|
||||
return nil, fmt.Errorf("asynq: %w", ErrQueueNotFound)
|
||||
case err != nil:
|
||||
return nil, fmt.Errorf("asynq: %v", err)
|
||||
}
|
||||
var tasks []*TaskInfo
|
||||
for _, i := range infos {
|
||||
tasks = append(tasks, newTaskInfo(
|
||||
i.Message,
|
||||
i.State,
|
||||
i.NextProcessAt,
|
||||
i.Result,
|
||||
))
|
||||
}
|
||||
return tasks, nil
|
||||
@@ -426,22 +469,28 @@ func (i *Inspector) DeleteAllArchivedTasks(qname string) (int, error) {
|
||||
return int(n), err
|
||||
}
|
||||
|
||||
// DeleteAllCompletedTasks deletes all completed tasks from the specified queue,
|
||||
// and reports the number tasks deleted.
|
||||
func (i *Inspector) DeleteAllCompletedTasks(qname string) (int, error) {
|
||||
if err := base.ValidateQueueName(qname); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
n, err := i.rdb.DeleteAllCompletedTasks(qname)
|
||||
return int(n), err
|
||||
}
|
||||
|
||||
// DeleteTask deletes a task with the given id from the given queue.
|
||||
// The task needs to be in pending, scheduled, retry, or archived state,
|
||||
// otherwise DeleteTask will return an error.
|
||||
//
|
||||
// If a queue with the given name doesn't exist, it returns ErrQueueNotFound.
|
||||
// If a task with the given id doesn't exist in the queue, it returns ErrTaskNotFound.
|
||||
// If a queue with the given name doesn't exist, it returns an error wrapping ErrQueueNotFound.
|
||||
// If a task with the given id doesn't exist in the queue, it returns an error wrapping ErrTaskNotFound.
|
||||
// If the task is in active state, it returns a non-nil error.
|
||||
func (i *Inspector) DeleteTask(qname, id string) error {
|
||||
if err := base.ValidateQueueName(qname); err != nil {
|
||||
return fmt.Errorf("asynq: %v", err)
|
||||
}
|
||||
taskid, err := uuid.Parse(id)
|
||||
if err != nil {
|
||||
return fmt.Errorf("asynq: %s is not a valid task id", id)
|
||||
}
|
||||
err = i.rdb.DeleteTask(qname, taskid)
|
||||
err := i.rdb.DeleteTask(qname, id)
|
||||
switch {
|
||||
case errors.IsQueueNotFound(err):
|
||||
return fmt.Errorf("asynq: %w", ErrQueueNotFound)
|
||||
@@ -488,18 +537,14 @@ func (i *Inspector) RunAllArchivedTasks(qname string) (int, error) {
|
||||
// The task needs to be in scheduled, retry, or archived state, otherwise RunTask
|
||||
// will return an error.
|
||||
//
|
||||
// If a queue with the given name doesn't exist, it returns ErrQueueNotFound.
|
||||
// If a task with the given id doesn't exist in the queue, it returns ErrTaskNotFound.
|
||||
// If a queue with the given name doesn't exist, it returns an error wrapping ErrQueueNotFound.
|
||||
// If a task with the given id doesn't exist in the queue, it returns an error wrapping ErrTaskNotFound.
|
||||
// If the task is in pending or active state, it returns a non-nil error.
|
||||
func (i *Inspector) RunTask(qname, id string) error {
|
||||
if err := base.ValidateQueueName(qname); err != nil {
|
||||
return fmt.Errorf("asynq: %v", err)
|
||||
}
|
||||
taskid, err := uuid.Parse(id)
|
||||
if err != nil {
|
||||
return fmt.Errorf("asynq: %s is not a valid task id", id)
|
||||
}
|
||||
err = i.rdb.RunTask(qname, taskid)
|
||||
err := i.rdb.RunTask(qname, id)
|
||||
switch {
|
||||
case errors.IsQueueNotFound(err):
|
||||
return fmt.Errorf("asynq: %w", ErrQueueNotFound)
|
||||
@@ -545,18 +590,14 @@ func (i *Inspector) ArchiveAllRetryTasks(qname string) (int, error) {
|
||||
// The task needs to be in pending, scheduled, or retry state, otherwise ArchiveTask
|
||||
// will return an error.
|
||||
//
|
||||
// If a queue with the given name doesn't exist, it returns ErrQueueNotFound.
|
||||
// If a task with the given id doesn't exist in the queue, it returns ErrTaskNotFound.
|
||||
// If a queue with the given name doesn't exist, it returns an error wrapping ErrQueueNotFound.
|
||||
// If a task with the given id doesn't exist in the queue, it returns an error wrapping ErrTaskNotFound.
|
||||
// If the task is in already archived, it returns a non-nil error.
|
||||
func (i *Inspector) ArchiveTask(qname, id string) error {
|
||||
if err := base.ValidateQueueName(qname); err != nil {
|
||||
return fmt.Errorf("asynq: err")
|
||||
}
|
||||
taskid, err := uuid.Parse(id)
|
||||
if err != nil {
|
||||
return fmt.Errorf("asynq: %s is not a valid task id", id)
|
||||
}
|
||||
err = i.rdb.ArchiveTask(qname, taskid)
|
||||
err := i.rdb.ArchiveTask(qname, id)
|
||||
switch {
|
||||
case errors.IsQueueNotFound(err):
|
||||
return fmt.Errorf("asynq: %w", ErrQueueNotFound)
|
||||
@@ -807,6 +848,12 @@ func parseOption(s string) (Option, error) {
|
||||
return nil, err
|
||||
}
|
||||
return ProcessIn(d), nil
|
||||
case "Retention":
|
||||
d, err := time.ParseDuration(arg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return Retention(d), nil
|
||||
default:
|
||||
return nil, fmt.Errorf("cannot not parse option string %q", s)
|
||||
}
|
||||
|
@@ -5,6 +5,7 @@
|
||||
package asynq
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
@@ -18,6 +19,7 @@ import (
|
||||
h "github.com/hibiken/asynq/internal/asynqtest"
|
||||
"github.com/hibiken/asynq/internal/base"
|
||||
"github.com/hibiken/asynq/internal/rdb"
|
||||
"github.com/hibiken/asynq/internal/timeutil"
|
||||
)
|
||||
|
||||
func TestInspectorQueues(t *testing.T) {
|
||||
@@ -37,7 +39,7 @@ func TestInspectorQueues(t *testing.T) {
|
||||
for _, tc := range tests {
|
||||
h.FlushDB(t, r)
|
||||
for _, qname := range tc.queues {
|
||||
if err := r.SAdd(base.AllQueues, qname).Err(); err != nil {
|
||||
if err := r.SAdd(context.Background(), base.AllQueues, qname).Err(); err != nil {
|
||||
t.Fatalf("could not initialize all queue set: %v", err)
|
||||
}
|
||||
}
|
||||
@@ -136,7 +138,7 @@ func TestInspectorDeleteQueue(t *testing.T) {
|
||||
tc.qname, tc.force, err)
|
||||
continue
|
||||
}
|
||||
if r.SIsMember(base.AllQueues, tc.qname).Val() {
|
||||
if r.SIsMember(context.Background(), base.AllQueues, tc.qname).Val() {
|
||||
t.Errorf("%q is a member of %q", tc.qname, base.AllQueues)
|
||||
}
|
||||
}
|
||||
@@ -268,17 +270,20 @@ func TestInspectorGetQueueInfo(t *testing.T) {
|
||||
ignoreMemUsg := cmpopts.IgnoreFields(QueueInfo{}, "MemoryUsage")
|
||||
|
||||
inspector := NewInspector(getRedisConnOpt(t))
|
||||
inspector.rdb.SetClock(timeutil.NewSimulatedClock(now))
|
||||
|
||||
tests := []struct {
|
||||
pending map[string][]*base.TaskMessage
|
||||
active map[string][]*base.TaskMessage
|
||||
scheduled map[string][]base.Z
|
||||
retry map[string][]base.Z
|
||||
archived map[string][]base.Z
|
||||
processed map[string]int
|
||||
failed map[string]int
|
||||
qname string
|
||||
want *QueueInfo
|
||||
pending map[string][]*base.TaskMessage
|
||||
active map[string][]*base.TaskMessage
|
||||
scheduled map[string][]base.Z
|
||||
retry map[string][]base.Z
|
||||
archived map[string][]base.Z
|
||||
completed map[string][]base.Z
|
||||
processed map[string]int
|
||||
failed map[string]int
|
||||
oldestPendingMessageEnqueueTime map[string]time.Time
|
||||
qname string
|
||||
want *QueueInfo
|
||||
}{
|
||||
{
|
||||
pending: map[string][]*base.TaskMessage{
|
||||
@@ -309,6 +314,11 @@ func TestInspectorGetQueueInfo(t *testing.T) {
|
||||
"critical": {},
|
||||
"low": {},
|
||||
},
|
||||
completed: map[string][]base.Z{
|
||||
"default": {},
|
||||
"critical": {},
|
||||
"low": {},
|
||||
},
|
||||
processed: map[string]int{
|
||||
"default": 120,
|
||||
"critical": 100,
|
||||
@@ -319,15 +329,22 @@ func TestInspectorGetQueueInfo(t *testing.T) {
|
||||
"critical": 0,
|
||||
"low": 5,
|
||||
},
|
||||
oldestPendingMessageEnqueueTime: map[string]time.Time{
|
||||
"default": now.Add(-15 * time.Second),
|
||||
"critical": now.Add(-200 * time.Millisecond),
|
||||
"low": now.Add(-30 * time.Second),
|
||||
},
|
||||
qname: "default",
|
||||
want: &QueueInfo{
|
||||
Queue: "default",
|
||||
Latency: 15 * time.Second,
|
||||
Size: 4,
|
||||
Pending: 1,
|
||||
Active: 1,
|
||||
Scheduled: 2,
|
||||
Retry: 0,
|
||||
Archived: 0,
|
||||
Completed: 0,
|
||||
Processed: 120,
|
||||
Failed: 2,
|
||||
Paused: false,
|
||||
@@ -343,13 +360,22 @@ func TestInspectorGetQueueInfo(t *testing.T) {
|
||||
h.SeedAllScheduledQueues(t, r, tc.scheduled)
|
||||
h.SeedAllRetryQueues(t, r, tc.retry)
|
||||
h.SeedAllArchivedQueues(t, r, tc.archived)
|
||||
h.SeedAllCompletedQueues(t, r, tc.completed)
|
||||
ctx := context.Background()
|
||||
for qname, n := range tc.processed {
|
||||
processedKey := base.ProcessedKey(qname, now)
|
||||
r.Set(processedKey, n, 0)
|
||||
r.Set(ctx, processedKey, n, 0)
|
||||
}
|
||||
for qname, n := range tc.failed {
|
||||
failedKey := base.FailedKey(qname, now)
|
||||
r.Set(failedKey, n, 0)
|
||||
r.Set(ctx, failedKey, n, 0)
|
||||
}
|
||||
for qname, enqueueTime := range tc.oldestPendingMessageEnqueueTime {
|
||||
if enqueueTime.IsZero() {
|
||||
continue
|
||||
}
|
||||
oldestPendingMessageID := r.LRange(ctx, base.PendingKey(qname), -1, -1).Val()[0] // get the right most msg in the list
|
||||
r.HSet(ctx, base.TaskKey(qname, oldestPendingMessageID), "pending_since", enqueueTime.UnixNano())
|
||||
}
|
||||
|
||||
got, err := inspector.GetQueueInfo(tc.qname)
|
||||
@@ -385,14 +411,14 @@ func TestInspectorHistory(t *testing.T) {
|
||||
for _, tc := range tests {
|
||||
h.FlushDB(t, r)
|
||||
|
||||
r.SAdd(base.AllQueues, tc.qname)
|
||||
r.SAdd(context.Background(), base.AllQueues, tc.qname)
|
||||
// populate last n days data
|
||||
for i := 0; i < tc.n; i++ {
|
||||
ts := now.Add(-time.Duration(i) * 24 * time.Hour)
|
||||
processedKey := base.ProcessedKey(tc.qname, ts)
|
||||
failedKey := base.FailedKey(tc.qname, ts)
|
||||
r.Set(processedKey, (i+1)*1000, 0)
|
||||
r.Set(failedKey, (i+1)*10, 0)
|
||||
r.Set(context.Background(), processedKey, (i+1)*1000, 0)
|
||||
r.Set(context.Background(), failedKey, (i+1)*10, 0)
|
||||
}
|
||||
|
||||
got, err := inspector.History(tc.qname, tc.n)
|
||||
@@ -423,7 +449,7 @@ func TestInspectorHistory(t *testing.T) {
|
||||
}
|
||||
|
||||
func createPendingTask(msg *base.TaskMessage) *TaskInfo {
|
||||
return newTaskInfo(msg, base.TaskStatePending, time.Now())
|
||||
return newTaskInfo(msg, base.TaskStatePending, time.Now(), nil)
|
||||
}
|
||||
|
||||
func TestInspectorGetTaskInfo(t *testing.T) {
|
||||
@@ -483,47 +509,52 @@ func TestInspectorGetTaskInfo(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
qname: "default",
|
||||
id: m1.ID.String(),
|
||||
id: m1.ID,
|
||||
want: newTaskInfo(
|
||||
m1,
|
||||
base.TaskStateActive,
|
||||
time.Time{}, // zero value for n/a
|
||||
nil,
|
||||
),
|
||||
},
|
||||
{
|
||||
qname: "default",
|
||||
id: m2.ID.String(),
|
||||
id: m2.ID,
|
||||
want: newTaskInfo(
|
||||
m2,
|
||||
base.TaskStateScheduled,
|
||||
fiveMinsFromNow,
|
||||
nil,
|
||||
),
|
||||
},
|
||||
{
|
||||
qname: "custom",
|
||||
id: m3.ID.String(),
|
||||
id: m3.ID,
|
||||
want: newTaskInfo(
|
||||
m3,
|
||||
base.TaskStateRetry,
|
||||
oneHourFromNow,
|
||||
nil,
|
||||
),
|
||||
},
|
||||
{
|
||||
qname: "custom",
|
||||
id: m4.ID.String(),
|
||||
id: m4.ID,
|
||||
want: newTaskInfo(
|
||||
m4,
|
||||
base.TaskStateArchived,
|
||||
time.Time{}, // zero value for n/a
|
||||
nil,
|
||||
),
|
||||
},
|
||||
{
|
||||
qname: "custom",
|
||||
id: m5.ID.String(),
|
||||
id: m5.ID,
|
||||
want: newTaskInfo(
|
||||
m5,
|
||||
base.TaskStatePending,
|
||||
now,
|
||||
nil,
|
||||
),
|
||||
},
|
||||
}
|
||||
@@ -602,7 +633,7 @@ func TestInspectorGetTaskInfoError(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
qname: "nonexistent",
|
||||
id: m1.ID.String(),
|
||||
id: m1.ID,
|
||||
wantErr: ErrQueueNotFound,
|
||||
},
|
||||
{
|
||||
@@ -721,8 +752,8 @@ func TestInspectorListActiveTasks(t *testing.T) {
|
||||
},
|
||||
qname: "default",
|
||||
want: []*TaskInfo{
|
||||
newTaskInfo(m1, base.TaskStateActive, time.Time{}),
|
||||
newTaskInfo(m2, base.TaskStateActive, time.Time{}),
|
||||
newTaskInfo(m1, base.TaskStateActive, time.Time{}, nil),
|
||||
newTaskInfo(m2, base.TaskStateActive, time.Time{}, nil),
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -748,6 +779,7 @@ func createScheduledTask(z base.Z) *TaskInfo {
|
||||
z.Message,
|
||||
base.TaskStateScheduled,
|
||||
time.Unix(z.Score, 0),
|
||||
nil,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -817,6 +849,7 @@ func createRetryTask(z base.Z) *TaskInfo {
|
||||
z.Message,
|
||||
base.TaskStateRetry,
|
||||
time.Unix(z.Score, 0),
|
||||
nil,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -887,6 +920,7 @@ func createArchivedTask(z base.Z) *TaskInfo {
|
||||
z.Message,
|
||||
base.TaskStateArchived,
|
||||
time.Time{}, // zero value for n/a
|
||||
nil,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -951,6 +985,83 @@ func TestInspectorListArchivedTasks(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func newCompletedTaskMessage(typename, qname string, retention time.Duration, completedAt time.Time) *base.TaskMessage {
|
||||
msg := h.NewTaskMessageWithQueue(typename, nil, qname)
|
||||
msg.Retention = int64(retention.Seconds())
|
||||
msg.CompletedAt = completedAt.Unix()
|
||||
return msg
|
||||
}
|
||||
|
||||
func createCompletedTask(z base.Z) *TaskInfo {
|
||||
return newTaskInfo(
|
||||
z.Message,
|
||||
base.TaskStateCompleted,
|
||||
time.Time{}, // zero value for n/a
|
||||
nil, // TODO: Test with result data
|
||||
)
|
||||
}
|
||||
|
||||
func TestInspectorListCompletedTasks(t *testing.T) {
|
||||
r := setup(t)
|
||||
defer r.Close()
|
||||
now := time.Now()
|
||||
m1 := newCompletedTaskMessage("task1", "default", 1*time.Hour, now.Add(-3*time.Minute)) // Expires in 57 mins
|
||||
m2 := newCompletedTaskMessage("task2", "default", 30*time.Minute, now.Add(-10*time.Minute)) // Expires in 20 mins
|
||||
m3 := newCompletedTaskMessage("task3", "default", 2*time.Hour, now.Add(-30*time.Minute)) // Expires in 90 mins
|
||||
m4 := newCompletedTaskMessage("task4", "custom", 15*time.Minute, now.Add(-2*time.Minute)) // Expires in 13 mins
|
||||
z1 := base.Z{Message: m1, Score: m1.CompletedAt + m1.Retention}
|
||||
z2 := base.Z{Message: m2, Score: m2.CompletedAt + m2.Retention}
|
||||
z3 := base.Z{Message: m3, Score: m3.CompletedAt + m3.Retention}
|
||||
z4 := base.Z{Message: m4, Score: m4.CompletedAt + m4.Retention}
|
||||
|
||||
inspector := NewInspector(getRedisConnOpt(t))
|
||||
|
||||
tests := []struct {
|
||||
desc string
|
||||
completed map[string][]base.Z
|
||||
qname string
|
||||
want []*TaskInfo
|
||||
}{
|
||||
{
|
||||
desc: "with a few completed tasks",
|
||||
completed: map[string][]base.Z{
|
||||
"default": {z1, z2, z3},
|
||||
"custom": {z4},
|
||||
},
|
||||
qname: "default",
|
||||
// Should be sorted by expiration time (CompletedAt + Retention).
|
||||
want: []*TaskInfo{
|
||||
createCompletedTask(z2),
|
||||
createCompletedTask(z1),
|
||||
createCompletedTask(z3),
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "with empty completed queue",
|
||||
completed: map[string][]base.Z{
|
||||
"default": {},
|
||||
},
|
||||
qname: "default",
|
||||
want: []*TaskInfo(nil),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
h.FlushDB(t, r)
|
||||
h.SeedAllCompletedQueues(t, r, tc.completed)
|
||||
|
||||
got, err := inspector.ListCompletedTasks(tc.qname)
|
||||
if err != nil {
|
||||
t.Errorf("%s; ListCompletedTasks(%q) returned error: %v", tc.desc, tc.qname, err)
|
||||
continue
|
||||
}
|
||||
if diff := cmp.Diff(tc.want, got, cmp.AllowUnexported(TaskInfo{})); diff != "" {
|
||||
t.Errorf("%s; ListCompletedTasks(%q) = %v, want %v; (-want,+got)\n%s",
|
||||
tc.desc, tc.qname, got, tc.want, diff)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestInspectorListPagination(t *testing.T) {
|
||||
// Create 100 tasks.
|
||||
var msgs []*base.TaskMessage
|
||||
@@ -1049,6 +1160,9 @@ func TestInspectorListTasksQueueNotFoundError(t *testing.T) {
|
||||
if _, err := inspector.ListArchivedTasks(tc.qname); !errors.Is(err, tc.wantErr) {
|
||||
t.Errorf("ListArchivedTasks(%q) returned error %v, want %v", tc.qname, err, tc.wantErr)
|
||||
}
|
||||
if _, err := inspector.ListCompletedTasks(tc.qname); !errors.Is(err, tc.wantErr) {
|
||||
t.Errorf("ListCompletedTasks(%q) returned error %v, want %v", tc.qname, err, tc.wantErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1314,6 +1428,72 @@ func TestInspectorDeleteAllArchivedTasks(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestInspectorDeleteAllCompletedTasks(t *testing.T) {
|
||||
r := setup(t)
|
||||
defer r.Close()
|
||||
now := time.Now()
|
||||
m1 := newCompletedTaskMessage("task1", "default", 30*time.Minute, now.Add(-2*time.Minute))
|
||||
m2 := newCompletedTaskMessage("task2", "default", 30*time.Minute, now.Add(-5*time.Minute))
|
||||
m3 := newCompletedTaskMessage("task3", "default", 30*time.Minute, now.Add(-10*time.Minute))
|
||||
m4 := newCompletedTaskMessage("task4", "custom", 30*time.Minute, now.Add(-3*time.Minute))
|
||||
z1 := base.Z{Message: m1, Score: m1.CompletedAt + m1.Retention}
|
||||
z2 := base.Z{Message: m2, Score: m2.CompletedAt + m2.Retention}
|
||||
z3 := base.Z{Message: m3, Score: m3.CompletedAt + m3.Retention}
|
||||
z4 := base.Z{Message: m4, Score: m4.CompletedAt + m4.Retention}
|
||||
|
||||
inspector := NewInspector(getRedisConnOpt(t))
|
||||
|
||||
tests := []struct {
|
||||
completed map[string][]base.Z
|
||||
qname string
|
||||
want int
|
||||
wantCompleted map[string][]base.Z
|
||||
}{
|
||||
{
|
||||
completed: map[string][]base.Z{
|
||||
"default": {z1, z2, z3},
|
||||
"custom": {z4},
|
||||
},
|
||||
qname: "default",
|
||||
want: 3,
|
||||
wantCompleted: map[string][]base.Z{
|
||||
"default": {},
|
||||
"custom": {z4},
|
||||
},
|
||||
},
|
||||
{
|
||||
completed: map[string][]base.Z{
|
||||
"default": {},
|
||||
},
|
||||
qname: "default",
|
||||
want: 0,
|
||||
wantCompleted: map[string][]base.Z{
|
||||
"default": {},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
h.FlushDB(t, r)
|
||||
h.SeedAllCompletedQueues(t, r, tc.completed)
|
||||
|
||||
got, err := inspector.DeleteAllCompletedTasks(tc.qname)
|
||||
if err != nil {
|
||||
t.Errorf("DeleteAllCompletedTasks(%q) returned error: %v", tc.qname, err)
|
||||
continue
|
||||
}
|
||||
if got != tc.want {
|
||||
t.Errorf("DeleteAllCompletedTasks(%q) = %d, want %d", tc.qname, got, tc.want)
|
||||
}
|
||||
for qname, want := range tc.wantCompleted {
|
||||
gotCompleted := h.GetCompletedEntries(t, r, qname)
|
||||
if diff := cmp.Diff(want, gotCompleted, h.SortZSetEntryOpt); diff != "" {
|
||||
t.Errorf("unexpected completed tasks in queue %q: (-want, +got)\n%s", qname, diff)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestInspectorArchiveAllPendingTasks(t *testing.T) {
|
||||
r := setup(t)
|
||||
defer r.Close()
|
||||
@@ -3033,6 +3213,7 @@ func TestParseOption(t *testing.T) {
|
||||
{`Unique(1h)`, UniqueOpt, 1 * time.Hour},
|
||||
{ProcessAt(oneHourFromNow).String(), ProcessAtOpt, oneHourFromNow},
|
||||
{`ProcessIn(10m)`, ProcessInOpt, 10 * time.Minute},
|
||||
{`Retention(24h)`, RetentionOpt, 24 * time.Hour},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
@@ -3064,7 +3245,7 @@ func TestParseOption(t *testing.T) {
|
||||
if gotVal != tc.wantVal.(int) {
|
||||
t.Fatalf("got value %v, want %v", gotVal, tc.wantVal)
|
||||
}
|
||||
case TimeoutOpt, UniqueOpt, ProcessInOpt:
|
||||
case TimeoutOpt, UniqueOpt, ProcessInOpt, RetentionOpt:
|
||||
gotVal, ok := got.Value().(time.Duration)
|
||||
if !ok {
|
||||
t.Fatal("returned Option with non duration value")
|
||||
|
@@ -6,13 +6,14 @@
|
||||
package asynqtest
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"math"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/go-redis/redis/v7"
|
||||
"github.com/go-redis/redis/v8"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
"github.com/google/uuid"
|
||||
@@ -31,7 +32,7 @@ func EquateInt64Approx(margin int64) cmp.Option {
|
||||
var SortMsgOpt = cmp.Transformer("SortTaskMessages", func(in []*base.TaskMessage) []*base.TaskMessage {
|
||||
out := append([]*base.TaskMessage(nil), in...) // Copy input to avoid mutating it
|
||||
sort.Slice(out, func(i, j int) bool {
|
||||
return out[i].ID.String() < out[j].ID.String()
|
||||
return out[i].ID < out[j].ID
|
||||
})
|
||||
return out
|
||||
})
|
||||
@@ -40,7 +41,7 @@ var SortMsgOpt = cmp.Transformer("SortTaskMessages", func(in []*base.TaskMessage
|
||||
var SortZSetEntryOpt = cmp.Transformer("SortZSetEntries", func(in []base.Z) []base.Z {
|
||||
out := append([]base.Z(nil), in...) // Copy input to avoid mutating it
|
||||
sort.Slice(out, func(i, j int) bool {
|
||||
return out[i].Message.ID.String() < out[j].Message.ID.String()
|
||||
return out[i].Message.ID < out[j].Message.ID
|
||||
})
|
||||
return out
|
||||
})
|
||||
@@ -103,7 +104,7 @@ func NewTaskMessage(taskType string, payload []byte) *base.TaskMessage {
|
||||
// task type, payload and queue name.
|
||||
func NewTaskMessageWithQueue(taskType string, payload []byte, qname string) *base.TaskMessage {
|
||||
return &base.TaskMessage{
|
||||
ID: uuid.New(),
|
||||
ID: uuid.NewString(),
|
||||
Type: taskType,
|
||||
Queue: qname,
|
||||
Retry: 25,
|
||||
@@ -138,6 +139,12 @@ func TaskMessageWithError(t base.TaskMessage, errMsg string, failedAt time.Time)
|
||||
return &t
|
||||
}
|
||||
|
||||
// TaskMessageWithCompletedAt returns an updated copy of t after completion.
|
||||
func TaskMessageWithCompletedAt(t base.TaskMessage, completedAt time.Time) *base.TaskMessage {
|
||||
t.CompletedAt = completedAt.Unix()
|
||||
return &t
|
||||
}
|
||||
|
||||
// MustMarshal marshals given task message and returns a json string.
|
||||
// Calling test will fail if marshaling errors out.
|
||||
func MustMarshal(tb testing.TB, msg *base.TaskMessage) string {
|
||||
@@ -165,12 +172,12 @@ func FlushDB(tb testing.TB, r redis.UniversalClient) {
|
||||
tb.Helper()
|
||||
switch r := r.(type) {
|
||||
case *redis.Client:
|
||||
if err := r.FlushDB().Err(); err != nil {
|
||||
if err := r.FlushDB(context.Background()).Err(); err != nil {
|
||||
tb.Fatal(err)
|
||||
}
|
||||
case *redis.ClusterClient:
|
||||
err := r.ForEachMaster(func(c *redis.Client) error {
|
||||
if err := c.FlushAll().Err(); err != nil {
|
||||
err := r.ForEachMaster(context.Background(), func(ctx context.Context, c *redis.Client) error {
|
||||
if err := c.FlushAll(ctx).Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@@ -184,45 +191,52 @@ func FlushDB(tb testing.TB, r redis.UniversalClient) {
|
||||
// SeedPendingQueue initializes the specified queue with the given messages.
|
||||
func SeedPendingQueue(tb testing.TB, r redis.UniversalClient, msgs []*base.TaskMessage, qname string) {
|
||||
tb.Helper()
|
||||
r.SAdd(base.AllQueues, qname)
|
||||
r.SAdd(context.Background(), base.AllQueues, qname)
|
||||
seedRedisList(tb, r, base.PendingKey(qname), msgs, base.TaskStatePending)
|
||||
}
|
||||
|
||||
// SeedActiveQueue initializes the active queue with the given messages.
|
||||
func SeedActiveQueue(tb testing.TB, r redis.UniversalClient, msgs []*base.TaskMessage, qname string) {
|
||||
tb.Helper()
|
||||
r.SAdd(base.AllQueues, qname)
|
||||
r.SAdd(context.Background(), base.AllQueues, qname)
|
||||
seedRedisList(tb, r, base.ActiveKey(qname), msgs, base.TaskStateActive)
|
||||
}
|
||||
|
||||
// SeedScheduledQueue initializes the scheduled queue with the given messages.
|
||||
func SeedScheduledQueue(tb testing.TB, r redis.UniversalClient, entries []base.Z, qname string) {
|
||||
tb.Helper()
|
||||
r.SAdd(base.AllQueues, qname)
|
||||
r.SAdd(context.Background(), base.AllQueues, qname)
|
||||
seedRedisZSet(tb, r, base.ScheduledKey(qname), entries, base.TaskStateScheduled)
|
||||
}
|
||||
|
||||
// SeedRetryQueue initializes the retry queue with the given messages.
|
||||
func SeedRetryQueue(tb testing.TB, r redis.UniversalClient, entries []base.Z, qname string) {
|
||||
tb.Helper()
|
||||
r.SAdd(base.AllQueues, qname)
|
||||
r.SAdd(context.Background(), base.AllQueues, qname)
|
||||
seedRedisZSet(tb, r, base.RetryKey(qname), entries, base.TaskStateRetry)
|
||||
}
|
||||
|
||||
// SeedArchivedQueue initializes the archived queue with the given messages.
|
||||
func SeedArchivedQueue(tb testing.TB, r redis.UniversalClient, entries []base.Z, qname string) {
|
||||
tb.Helper()
|
||||
r.SAdd(base.AllQueues, qname)
|
||||
r.SAdd(context.Background(), base.AllQueues, qname)
|
||||
seedRedisZSet(tb, r, base.ArchivedKey(qname), entries, base.TaskStateArchived)
|
||||
}
|
||||
|
||||
// SeedDeadlines initializes the deadlines set with the given entries.
|
||||
func SeedDeadlines(tb testing.TB, r redis.UniversalClient, entries []base.Z, qname string) {
|
||||
tb.Helper()
|
||||
r.SAdd(base.AllQueues, qname)
|
||||
r.SAdd(context.Background(), base.AllQueues, qname)
|
||||
seedRedisZSet(tb, r, base.DeadlinesKey(qname), entries, base.TaskStateActive)
|
||||
}
|
||||
|
||||
// SeedCompletedQueue initializes the completed set witht the given entries.
|
||||
func SeedCompletedQueue(tb testing.TB, r redis.UniversalClient, entries []base.Z, qname string) {
|
||||
tb.Helper()
|
||||
r.SAdd(context.Background(), base.AllQueues, qname)
|
||||
seedRedisZSet(tb, r, base.CompletedKey(qname), entries, base.TaskStateCompleted)
|
||||
}
|
||||
|
||||
// SeedAllPendingQueues initializes all of the specified queues with the given messages.
|
||||
//
|
||||
// pending maps a queue name to a list of messages.
|
||||
@@ -273,15 +287,23 @@ func SeedAllDeadlines(tb testing.TB, r redis.UniversalClient, deadlines map[stri
|
||||
}
|
||||
}
|
||||
|
||||
// SeedAllCompletedQueues initializes all of the completed queues with the given entries.
|
||||
func SeedAllCompletedQueues(tb testing.TB, r redis.UniversalClient, completed map[string][]base.Z) {
|
||||
tb.Helper()
|
||||
for q, entries := range completed {
|
||||
SeedCompletedQueue(tb, r, entries, q)
|
||||
}
|
||||
}
|
||||
|
||||
func seedRedisList(tb testing.TB, c redis.UniversalClient, key string,
|
||||
msgs []*base.TaskMessage, state base.TaskState) {
|
||||
tb.Helper()
|
||||
for _, msg := range msgs {
|
||||
encoded := MustMarshal(tb, msg)
|
||||
if err := c.LPush(key, msg.ID.String()).Err(); err != nil {
|
||||
if err := c.LPush(context.Background(), key, msg.ID).Err(); err != nil {
|
||||
tb.Fatal(err)
|
||||
}
|
||||
key := base.TaskKey(msg.Queue, msg.ID.String())
|
||||
key := base.TaskKey(msg.Queue, msg.ID)
|
||||
data := map[string]interface{}{
|
||||
"msg": encoded,
|
||||
"state": state.String(),
|
||||
@@ -289,11 +311,11 @@ func seedRedisList(tb testing.TB, c redis.UniversalClient, key string,
|
||||
"deadline": msg.Deadline,
|
||||
"unique_key": msg.UniqueKey,
|
||||
}
|
||||
if err := c.HSet(key, data).Err(); err != nil {
|
||||
if err := c.HSet(context.Background(), key, data).Err(); err != nil {
|
||||
tb.Fatal(err)
|
||||
}
|
||||
if len(msg.UniqueKey) > 0 {
|
||||
err := c.SetNX(msg.UniqueKey, msg.ID.String(), 1*time.Minute).Err()
|
||||
err := c.SetNX(context.Background(), msg.UniqueKey, msg.ID, 1*time.Minute).Err()
|
||||
if err != nil {
|
||||
tb.Fatalf("Failed to set unique lock in redis: %v", err)
|
||||
}
|
||||
@@ -307,11 +329,11 @@ func seedRedisZSet(tb testing.TB, c redis.UniversalClient, key string,
|
||||
for _, item := range items {
|
||||
msg := item.Message
|
||||
encoded := MustMarshal(tb, msg)
|
||||
z := &redis.Z{Member: msg.ID.String(), Score: float64(item.Score)}
|
||||
if err := c.ZAdd(key, z).Err(); err != nil {
|
||||
z := &redis.Z{Member: msg.ID, Score: float64(item.Score)}
|
||||
if err := c.ZAdd(context.Background(), key, z).Err(); err != nil {
|
||||
tb.Fatal(err)
|
||||
}
|
||||
key := base.TaskKey(msg.Queue, msg.ID.String())
|
||||
key := base.TaskKey(msg.Queue, msg.ID)
|
||||
data := map[string]interface{}{
|
||||
"msg": encoded,
|
||||
"state": state.String(),
|
||||
@@ -319,11 +341,11 @@ func seedRedisZSet(tb testing.TB, c redis.UniversalClient, key string,
|
||||
"deadline": msg.Deadline,
|
||||
"unique_key": msg.UniqueKey,
|
||||
}
|
||||
if err := c.HSet(key, data).Err(); err != nil {
|
||||
if err := c.HSet(context.Background(), key, data).Err(); err != nil {
|
||||
tb.Fatal(err)
|
||||
}
|
||||
if len(msg.UniqueKey) > 0 {
|
||||
err := c.SetNX(msg.UniqueKey, msg.ID.String(), 1*time.Minute).Err()
|
||||
err := c.SetNX(context.Background(), msg.UniqueKey, msg.ID, 1*time.Minute).Err()
|
||||
if err != nil {
|
||||
tb.Fatalf("Failed to set unique lock in redis: %v", err)
|
||||
}
|
||||
@@ -366,6 +388,13 @@ func GetArchivedMessages(tb testing.TB, r redis.UniversalClient, qname string) [
|
||||
return getMessagesFromZSet(tb, r, qname, base.ArchivedKey, base.TaskStateArchived)
|
||||
}
|
||||
|
||||
// GetCompletedMessages returns all completed task messages in the given queue.
|
||||
// It also asserts the state field of the task.
|
||||
func GetCompletedMessages(tb testing.TB, r redis.UniversalClient, qname string) []*base.TaskMessage {
|
||||
tb.Helper()
|
||||
return getMessagesFromZSet(tb, r, qname, base.CompletedKey, base.TaskStateCompleted)
|
||||
}
|
||||
|
||||
// GetScheduledEntries returns all scheduled messages and its score in the given queue.
|
||||
// It also asserts the state field of the task.
|
||||
func GetScheduledEntries(tb testing.TB, r redis.UniversalClient, qname string) []base.Z {
|
||||
@@ -394,17 +423,24 @@ func GetDeadlinesEntries(tb testing.TB, r redis.UniversalClient, qname string) [
|
||||
return getMessagesFromZSetWithScores(tb, r, qname, base.DeadlinesKey, base.TaskStateActive)
|
||||
}
|
||||
|
||||
// GetCompletedEntries returns all completed messages and its score in the given queue.
|
||||
// It also asserts the state field of the task.
|
||||
func GetCompletedEntries(tb testing.TB, r redis.UniversalClient, qname string) []base.Z {
|
||||
tb.Helper()
|
||||
return getMessagesFromZSetWithScores(tb, r, qname, base.CompletedKey, base.TaskStateCompleted)
|
||||
}
|
||||
|
||||
// Retrieves all messages stored under `keyFn(qname)` key in redis list.
|
||||
func getMessagesFromList(tb testing.TB, r redis.UniversalClient, qname string,
|
||||
keyFn func(qname string) string, state base.TaskState) []*base.TaskMessage {
|
||||
tb.Helper()
|
||||
ids := r.LRange(keyFn(qname), 0, -1).Val()
|
||||
ids := r.LRange(context.Background(), keyFn(qname), 0, -1).Val()
|
||||
var msgs []*base.TaskMessage
|
||||
for _, id := range ids {
|
||||
taskKey := base.TaskKey(qname, id)
|
||||
data := r.HGet(taskKey, "msg").Val()
|
||||
data := r.HGet(context.Background(), taskKey, "msg").Val()
|
||||
msgs = append(msgs, MustUnmarshal(tb, data))
|
||||
if gotState := r.HGet(taskKey, "state").Val(); gotState != state.String() {
|
||||
if gotState := r.HGet(context.Background(), taskKey, "state").Val(); gotState != state.String() {
|
||||
tb.Errorf("task (id=%q) is in %q state, want %v", id, gotState, state)
|
||||
}
|
||||
}
|
||||
@@ -415,13 +451,13 @@ func getMessagesFromList(tb testing.TB, r redis.UniversalClient, qname string,
|
||||
func getMessagesFromZSet(tb testing.TB, r redis.UniversalClient, qname string,
|
||||
keyFn func(qname string) string, state base.TaskState) []*base.TaskMessage {
|
||||
tb.Helper()
|
||||
ids := r.ZRange(keyFn(qname), 0, -1).Val()
|
||||
ids := r.ZRange(context.Background(), keyFn(qname), 0, -1).Val()
|
||||
var msgs []*base.TaskMessage
|
||||
for _, id := range ids {
|
||||
taskKey := base.TaskKey(qname, id)
|
||||
msg := r.HGet(taskKey, "msg").Val()
|
||||
msg := r.HGet(context.Background(), taskKey, "msg").Val()
|
||||
msgs = append(msgs, MustUnmarshal(tb, msg))
|
||||
if gotState := r.HGet(taskKey, "state").Val(); gotState != state.String() {
|
||||
if gotState := r.HGet(context.Background(), taskKey, "state").Val(); gotState != state.String() {
|
||||
tb.Errorf("task (id=%q) is in %q state, want %v", id, gotState, state)
|
||||
}
|
||||
}
|
||||
@@ -432,14 +468,14 @@ func getMessagesFromZSet(tb testing.TB, r redis.UniversalClient, qname string,
|
||||
func getMessagesFromZSetWithScores(tb testing.TB, r redis.UniversalClient,
|
||||
qname string, keyFn func(qname string) string, state base.TaskState) []base.Z {
|
||||
tb.Helper()
|
||||
zs := r.ZRangeWithScores(keyFn(qname), 0, -1).Val()
|
||||
zs := r.ZRangeWithScores(context.Background(), keyFn(qname), 0, -1).Val()
|
||||
var res []base.Z
|
||||
for _, z := range zs {
|
||||
taskID := z.Member.(string)
|
||||
taskKey := base.TaskKey(qname, taskID)
|
||||
msg := r.HGet(taskKey, "msg").Val()
|
||||
msg := r.HGet(context.Background(), taskKey, "msg").Val()
|
||||
res = append(res, base.Z{Message: MustUnmarshal(tb, msg), Score: int64(z.Score)})
|
||||
if gotState := r.HGet(taskKey, "state").Val(); gotState != state.String() {
|
||||
if gotState := r.HGet(context.Background(), taskKey, "state").Val(); gotState != state.String() {
|
||||
tb.Errorf("task (id=%q) is in %q state, want %v", taskID, gotState, state)
|
||||
}
|
||||
}
|
||||
|
@@ -14,16 +14,15 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-redis/redis/v7"
|
||||
"github.com/go-redis/redis/v8"
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
"github.com/google/uuid"
|
||||
"github.com/hibiken/asynq/internal/errors"
|
||||
pb "github.com/hibiken/asynq/internal/proto"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
// Version of asynq library and CLI.
|
||||
const Version = "0.18.3"
|
||||
const Version = "0.19.1"
|
||||
|
||||
// DefaultQueueName is the queue name used if none are specified by user.
|
||||
const DefaultQueueName = "default"
|
||||
@@ -49,6 +48,7 @@ const (
|
||||
TaskStateScheduled
|
||||
TaskStateRetry
|
||||
TaskStateArchived
|
||||
TaskStateCompleted
|
||||
)
|
||||
|
||||
func (s TaskState) String() string {
|
||||
@@ -63,6 +63,8 @@ func (s TaskState) String() string {
|
||||
return "retry"
|
||||
case TaskStateArchived:
|
||||
return "archived"
|
||||
case TaskStateCompleted:
|
||||
return "completed"
|
||||
}
|
||||
panic(fmt.Sprintf("internal error: unknown task state %d", s))
|
||||
}
|
||||
@@ -79,6 +81,8 @@ func TaskStateFromString(s string) (TaskState, error) {
|
||||
return TaskStateRetry, nil
|
||||
case "archived":
|
||||
return TaskStateArchived, nil
|
||||
case "completed":
|
||||
return TaskStateCompleted, nil
|
||||
}
|
||||
return 0, errors.E(errors.FailedPrecondition, fmt.Sprintf("%q is not supported task state", s))
|
||||
}
|
||||
@@ -137,6 +141,10 @@ func DeadlinesKey(qname string) string {
|
||||
return fmt.Sprintf("%sdeadlines", QueueKeyPrefix(qname))
|
||||
}
|
||||
|
||||
func CompletedKey(qname string) string {
|
||||
return fmt.Sprintf("%scompleted", QueueKeyPrefix(qname))
|
||||
}
|
||||
|
||||
// PausedKey returns a redis key to indicate that the given queue is paused.
|
||||
func PausedKey(qname string) string {
|
||||
return fmt.Sprintf("%spaused", QueueKeyPrefix(qname))
|
||||
@@ -191,7 +199,7 @@ type TaskMessage struct {
|
||||
Payload []byte
|
||||
|
||||
// ID is a unique identifier for each task.
|
||||
ID uuid.UUID
|
||||
ID string
|
||||
|
||||
// Queue is a name this message should be enqueued to.
|
||||
Queue string
|
||||
@@ -230,6 +238,15 @@ type TaskMessage struct {
|
||||
//
|
||||
// Empty string indicates that no uniqueness lock was used.
|
||||
UniqueKey string
|
||||
|
||||
// Retention specifies the number of seconds the task should be retained after completion.
|
||||
Retention int64
|
||||
|
||||
// CompletedAt is the time the task was processed successfully in Unix time,
|
||||
// the number of seconds elapsed since January 1, 1970 UTC.
|
||||
//
|
||||
// Use zero to indicate no value.
|
||||
CompletedAt int64
|
||||
}
|
||||
|
||||
// EncodeMessage marshals the given task message and returns an encoded bytes.
|
||||
@@ -240,7 +257,7 @@ func EncodeMessage(msg *TaskMessage) ([]byte, error) {
|
||||
return proto.Marshal(&pb.TaskMessage{
|
||||
Type: msg.Type,
|
||||
Payload: msg.Payload,
|
||||
Id: msg.ID.String(),
|
||||
Id: msg.ID,
|
||||
Queue: msg.Queue,
|
||||
Retry: int32(msg.Retry),
|
||||
Retried: int32(msg.Retried),
|
||||
@@ -249,6 +266,8 @@ func EncodeMessage(msg *TaskMessage) ([]byte, error) {
|
||||
Timeout: msg.Timeout,
|
||||
Deadline: msg.Deadline,
|
||||
UniqueKey: msg.UniqueKey,
|
||||
Retention: msg.Retention,
|
||||
CompletedAt: msg.CompletedAt,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -261,7 +280,7 @@ func DecodeMessage(data []byte) (*TaskMessage, error) {
|
||||
return &TaskMessage{
|
||||
Type: pbmsg.GetType(),
|
||||
Payload: pbmsg.GetPayload(),
|
||||
ID: uuid.MustParse(pbmsg.GetId()),
|
||||
ID: pbmsg.GetId(),
|
||||
Queue: pbmsg.GetQueue(),
|
||||
Retry: int(pbmsg.GetRetry()),
|
||||
Retried: int(pbmsg.GetRetried()),
|
||||
@@ -270,6 +289,8 @@ func DecodeMessage(data []byte) (*TaskMessage, error) {
|
||||
Timeout: pbmsg.GetTimeout(),
|
||||
Deadline: pbmsg.GetDeadline(),
|
||||
UniqueKey: pbmsg.GetUniqueKey(),
|
||||
Retention: pbmsg.GetRetention(),
|
||||
CompletedAt: pbmsg.GetCompletedAt(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -278,6 +299,7 @@ type TaskInfo struct {
|
||||
Message *TaskMessage
|
||||
State TaskState
|
||||
NextProcessAt time.Time
|
||||
Result []byte
|
||||
}
|
||||
|
||||
// Z represents sorted set member.
|
||||
@@ -638,20 +660,23 @@ func (c *Cancelations) Get(id string) (fn context.CancelFunc, ok bool) {
|
||||
// See rdb.RDB as a reference implementation.
|
||||
type Broker interface {
|
||||
Ping() error
|
||||
Enqueue(msg *TaskMessage) error
|
||||
EnqueueUnique(msg *TaskMessage, ttl time.Duration) error
|
||||
Enqueue(ctx context.Context, msg *TaskMessage) error
|
||||
EnqueueUnique(ctx context.Context, msg *TaskMessage, ttl time.Duration) error
|
||||
Dequeue(qnames ...string) (*TaskMessage, time.Time, error)
|
||||
Done(msg *TaskMessage) error
|
||||
MarkAsComplete(msg *TaskMessage) error
|
||||
Requeue(msg *TaskMessage) error
|
||||
Schedule(msg *TaskMessage, processAt time.Time) error
|
||||
ScheduleUnique(msg *TaskMessage, processAt time.Time, ttl time.Duration) error
|
||||
Retry(msg *TaskMessage, processAt time.Time, errMsg string) error
|
||||
Schedule(ctx context.Context, msg *TaskMessage, processAt time.Time) error
|
||||
ScheduleUnique(ctx context.Context, msg *TaskMessage, processAt time.Time, ttl time.Duration) error
|
||||
Retry(msg *TaskMessage, processAt time.Time, errMsg string, isFailure bool) error
|
||||
Archive(msg *TaskMessage, errMsg string) error
|
||||
ForwardIfReady(qnames ...string) error
|
||||
DeleteExpiredCompletedTasks(qname string) error
|
||||
ListDeadlineExceeded(deadline time.Time, qnames ...string) ([]*TaskMessage, error)
|
||||
WriteServerState(info *ServerInfo, workers []*WorkerInfo, ttl time.Duration) error
|
||||
ClearServerState(host string, pid int, serverID string) error
|
||||
CancelationPubSub() (*redis.PubSub, error) // TODO: Need to decouple from redis to support other brokers
|
||||
PublishCancelation(id string) error
|
||||
WriteResult(qname, id string, data []byte) (n int, err error)
|
||||
Close() error
|
||||
}
|
||||
|
@@ -139,6 +139,23 @@ func TestArchivedKey(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestCompletedKey(t *testing.T) {
|
||||
tests := []struct {
|
||||
qname string
|
||||
want string
|
||||
}{
|
||||
{"default", "asynq:{default}:completed"},
|
||||
{"custom", "asynq:{custom}:completed"},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
got := CompletedKey(tc.qname)
|
||||
if got != tc.want {
|
||||
t.Errorf("CompletedKey(%q) = %q, want %q", tc.qname, got, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPausedKey(t *testing.T) {
|
||||
tests := []struct {
|
||||
qname string
|
||||
@@ -344,31 +361,33 @@ func TestUniqueKey(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMessageEncoding(t *testing.T) {
|
||||
id := uuid.New()
|
||||
id := uuid.NewString()
|
||||
tests := []struct {
|
||||
in *TaskMessage
|
||||
out *TaskMessage
|
||||
}{
|
||||
{
|
||||
in: &TaskMessage{
|
||||
Type: "task1",
|
||||
Payload: toBytes(map[string]interface{}{"a": 1, "b": "hello!", "c": true}),
|
||||
ID: id,
|
||||
Queue: "default",
|
||||
Retry: 10,
|
||||
Retried: 0,
|
||||
Timeout: 1800,
|
||||
Deadline: 1692311100,
|
||||
Type: "task1",
|
||||
Payload: toBytes(map[string]interface{}{"a": 1, "b": "hello!", "c": true}),
|
||||
ID: id,
|
||||
Queue: "default",
|
||||
Retry: 10,
|
||||
Retried: 0,
|
||||
Timeout: 1800,
|
||||
Deadline: 1692311100,
|
||||
Retention: 3600,
|
||||
},
|
||||
out: &TaskMessage{
|
||||
Type: "task1",
|
||||
Payload: toBytes(map[string]interface{}{"a": json.Number("1"), "b": "hello!", "c": true}),
|
||||
ID: id,
|
||||
Queue: "default",
|
||||
Retry: 10,
|
||||
Retried: 0,
|
||||
Timeout: 1800,
|
||||
Deadline: 1692311100,
|
||||
Type: "task1",
|
||||
Payload: toBytes(map[string]interface{}{"a": json.Number("1"), "b": "hello!", "c": true}),
|
||||
ID: id,
|
||||
Queue: "default",
|
||||
Retry: 10,
|
||||
Retried: 0,
|
||||
Timeout: 1800,
|
||||
Deadline: 1692311100,
|
||||
Retention: 3600,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
87
internal/context/context.go
Normal file
87
internal/context/context.go
Normal file
@@ -0,0 +1,87 @@
|
||||
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
package context
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/hibiken/asynq/internal/base"
|
||||
)
|
||||
|
||||
// A taskMetadata holds task scoped data to put in context.
|
||||
type taskMetadata struct {
|
||||
id string
|
||||
maxRetry int
|
||||
retryCount int
|
||||
qname string
|
||||
}
|
||||
|
||||
// ctxKey type is unexported to prevent collisions with context keys defined in
|
||||
// other packages.
|
||||
type ctxKey int
|
||||
|
||||
// metadataCtxKey is the context key for the task metadata.
|
||||
// Its value of zero is arbitrary.
|
||||
const metadataCtxKey ctxKey = 0
|
||||
|
||||
// New returns a context and cancel function for a given task message.
|
||||
func New(msg *base.TaskMessage, deadline time.Time) (context.Context, context.CancelFunc) {
|
||||
metadata := taskMetadata{
|
||||
id: msg.ID,
|
||||
maxRetry: msg.Retry,
|
||||
retryCount: msg.Retried,
|
||||
qname: msg.Queue,
|
||||
}
|
||||
ctx := context.WithValue(context.Background(), metadataCtxKey, metadata)
|
||||
return context.WithDeadline(ctx, deadline)
|
||||
}
|
||||
|
||||
// GetTaskID extracts a task ID from a context, if any.
|
||||
//
|
||||
// ID of a task is guaranteed to be unique.
|
||||
// ID of a task doesn't change if the task is being retried.
|
||||
func GetTaskID(ctx context.Context) (id string, ok bool) {
|
||||
metadata, ok := ctx.Value(metadataCtxKey).(taskMetadata)
|
||||
if !ok {
|
||||
return "", false
|
||||
}
|
||||
return metadata.id, true
|
||||
}
|
||||
|
||||
// GetRetryCount extracts retry count from a context, if any.
|
||||
//
|
||||
// Return value n indicates the number of times associated task has been
|
||||
// retried so far.
|
||||
func GetRetryCount(ctx context.Context) (n int, ok bool) {
|
||||
metadata, ok := ctx.Value(metadataCtxKey).(taskMetadata)
|
||||
if !ok {
|
||||
return 0, false
|
||||
}
|
||||
return metadata.retryCount, true
|
||||
}
|
||||
|
||||
// GetMaxRetry extracts maximum retry from a context, if any.
|
||||
//
|
||||
// Return value n indicates the maximum number of times the assoicated task
|
||||
// can be retried if ProcessTask returns a non-nil error.
|
||||
func GetMaxRetry(ctx context.Context) (n int, ok bool) {
|
||||
metadata, ok := ctx.Value(metadataCtxKey).(taskMetadata)
|
||||
if !ok {
|
||||
return 0, false
|
||||
}
|
||||
return metadata.maxRetry, true
|
||||
}
|
||||
|
||||
// GetQueueName extracts queue name from a context, if any.
|
||||
//
|
||||
// Return value qname indicates which queue the task was pulled from.
|
||||
func GetQueueName(ctx context.Context) (qname string, ok bool) {
|
||||
metadata, ok := ctx.Value(metadataCtxKey).(taskMetadata)
|
||||
if !ok {
|
||||
return "", false
|
||||
}
|
||||
return metadata.qname, true
|
||||
}
|
@@ -2,7 +2,7 @@
|
||||
// Use of this source code is governed by a MIT license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
package asynq
|
||||
package context
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -24,12 +24,11 @@ func TestCreateContextWithFutureDeadline(t *testing.T) {
|
||||
for _, tc := range tests {
|
||||
msg := &base.TaskMessage{
|
||||
Type: "something",
|
||||
ID: uuid.New(),
|
||||
ID: uuid.NewString(),
|
||||
Payload: nil,
|
||||
}
|
||||
|
||||
ctx, cancel := createContext(msg, tc.deadline)
|
||||
|
||||
ctx, cancel := New(msg, tc.deadline)
|
||||
select {
|
||||
case x := <-ctx.Done():
|
||||
t.Errorf("<-ctx.Done() == %v, want nothing (it should block)", x)
|
||||
@@ -64,11 +63,11 @@ func TestCreateContextWithPastDeadline(t *testing.T) {
|
||||
for _, tc := range tests {
|
||||
msg := &base.TaskMessage{
|
||||
Type: "something",
|
||||
ID: uuid.New(),
|
||||
ID: uuid.NewString(),
|
||||
Payload: nil,
|
||||
}
|
||||
|
||||
ctx, cancel := createContext(msg, tc.deadline)
|
||||
ctx, cancel := New(msg, tc.deadline)
|
||||
defer cancel()
|
||||
|
||||
select {
|
||||
@@ -92,21 +91,21 @@ func TestGetTaskMetadataFromContext(t *testing.T) {
|
||||
desc string
|
||||
msg *base.TaskMessage
|
||||
}{
|
||||
{"with zero retried message", &base.TaskMessage{Type: "something", ID: uuid.New(), Retry: 25, Retried: 0, Timeout: 1800, Queue: "default"}},
|
||||
{"with non-zero retried message", &base.TaskMessage{Type: "something", ID: uuid.New(), Retry: 10, Retried: 5, Timeout: 1800, Queue: "default"}},
|
||||
{"with custom queue name", &base.TaskMessage{Type: "something", ID: uuid.New(), Retry: 25, Retried: 0, Timeout: 1800, Queue: "custom"}},
|
||||
{"with zero retried message", &base.TaskMessage{Type: "something", ID: uuid.NewString(), Retry: 25, Retried: 0, Timeout: 1800, Queue: "default"}},
|
||||
{"with non-zero retried message", &base.TaskMessage{Type: "something", ID: uuid.NewString(), Retry: 10, Retried: 5, Timeout: 1800, Queue: "default"}},
|
||||
{"with custom queue name", &base.TaskMessage{Type: "something", ID: uuid.NewString(), Retry: 25, Retried: 0, Timeout: 1800, Queue: "custom"}},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
ctx, cancel := createContext(tc.msg, time.Now().Add(30*time.Minute))
|
||||
ctx, cancel := New(tc.msg, time.Now().Add(30*time.Minute))
|
||||
defer cancel()
|
||||
|
||||
id, ok := GetTaskID(ctx)
|
||||
if !ok {
|
||||
t.Errorf("%s: GetTaskID(ctx) returned ok == false", tc.desc)
|
||||
}
|
||||
if ok && id != tc.msg.ID.String() {
|
||||
t.Errorf("%s: GetTaskID(ctx) returned id == %q, want %q", tc.desc, id, tc.msg.ID.String())
|
||||
if ok && id != tc.msg.ID {
|
||||
t.Errorf("%s: GetTaskID(ctx) returned id == %q, want %q", tc.desc, id, tc.msg.ID)
|
||||
}
|
||||
|
||||
retried, ok := GetRetryCount(ctx)
|
@@ -170,6 +170,9 @@ var (
|
||||
|
||||
// ErrDuplicateTask indicates that another task with the same unique key holds the uniqueness lock.
|
||||
ErrDuplicateTask = errors.New("task already exists")
|
||||
|
||||
// ErrTaskIdConflict indicates that another task with the same task ID already exist
|
||||
ErrTaskIdConflict = errors.New("task id conflicts with another task")
|
||||
)
|
||||
|
||||
// TaskNotFoundError indicates that a task with the given ID does not exist
|
||||
|
@@ -5,7 +5,7 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.25.0
|
||||
// protoc v3.14.0
|
||||
// protoc v3.17.3
|
||||
// source: asynq.proto
|
||||
|
||||
package proto
|
||||
@@ -65,6 +65,14 @@ type TaskMessage struct {
|
||||
// UniqueKey holds the redis key used for uniqueness lock for this task.
|
||||
// Empty string indicates that no uniqueness lock was used.
|
||||
UniqueKey string `protobuf:"bytes,10,opt,name=unique_key,json=uniqueKey,proto3" json:"unique_key,omitempty"`
|
||||
// Retention period specified in a number of seconds.
|
||||
// The task will be stored in redis as a completed task until the TTL
|
||||
// expires.
|
||||
Retention int64 `protobuf:"varint,12,opt,name=retention,proto3" json:"retention,omitempty"`
|
||||
// Time when the task completed in success in Unix time,
|
||||
// the number of seconds elapsed since January 1, 1970 UTC.
|
||||
// This field is populated if result_ttl > 0 upon completion.
|
||||
CompletedAt int64 `protobuf:"varint,13,opt,name=completed_at,json=completedAt,proto3" json:"completed_at,omitempty"`
|
||||
}
|
||||
|
||||
func (x *TaskMessage) Reset() {
|
||||
@@ -176,6 +184,20 @@ func (x *TaskMessage) GetUniqueKey() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *TaskMessage) GetRetention() int64 {
|
||||
if x != nil {
|
||||
return x.Retention
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *TaskMessage) GetCompletedAt() int64 {
|
||||
if x != nil {
|
||||
return x.CompletedAt
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// ServerInfo holds information about a running server.
|
||||
type ServerInfo struct {
|
||||
state protoimpl.MessageState
|
||||
@@ -592,7 +614,7 @@ var file_asynq_proto_rawDesc = []byte{
|
||||
0x0a, 0x0b, 0x61, 0x73, 0x79, 0x6e, 0x71, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x61,
|
||||
0x73, 0x79, 0x6e, 0x71, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f,
|
||||
0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e,
|
||||
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa9, 0x02, 0x0a, 0x0b, 0x54, 0x61, 0x73, 0x6b, 0x4d, 0x65,
|
||||
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xea, 0x02, 0x0a, 0x0b, 0x54, 0x61, 0x73, 0x6b, 0x4d, 0x65,
|
||||
0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20,
|
||||
0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79,
|
||||
0x6c, 0x6f, 0x61, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c,
|
||||
@@ -611,80 +633,84 @@ var file_asynq_proto_rawDesc = []byte{
|
||||
0x6e, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69,
|
||||
0x6e, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x5f, 0x6b, 0x65, 0x79,
|
||||
0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x4b, 0x65,
|
||||
0x79, 0x22, 0x8f, 0x03, 0x0a, 0x0a, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f,
|
||||
0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
|
||||
0x68, 0x6f, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28,
|
||||
0x05, 0x52, 0x03, 0x70, 0x69, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72,
|
||||
0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x72, 0x76, 0x65,
|
||||
0x72, 0x49, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e,
|
||||
0x63, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72,
|
||||
0x72, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x35, 0x0a, 0x06, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x18,
|
||||
0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x61, 0x73, 0x79, 0x6e, 0x71, 0x2e, 0x53, 0x65,
|
||||
0x72, 0x76, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x51, 0x75, 0x65, 0x75, 0x65, 0x73, 0x45,
|
||||
0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x12, 0x27, 0x0a, 0x0f,
|
||||
0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x5f, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18,
|
||||
0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x50, 0x72, 0x69,
|
||||
0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18,
|
||||
0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x39, 0x0a,
|
||||
0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28,
|
||||
0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73,
|
||||
0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x13, 0x61, 0x63, 0x74, 0x69,
|
||||
0x76, 0x65, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18,
|
||||
0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x11, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x57, 0x6f, 0x72,
|
||||
0x6b, 0x65, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x1a, 0x39, 0x0a, 0x0b, 0x51, 0x75, 0x65, 0x75,
|
||||
0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01,
|
||||
0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c,
|
||||
0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a,
|
||||
0x02, 0x38, 0x01, 0x22, 0xb1, 0x02, 0x0a, 0x0a, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x6e,
|
||||
0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
|
||||
0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x02, 0x20,
|
||||
0x01, 0x28, 0x05, 0x52, 0x03, 0x70, 0x69, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x65, 0x72, 0x76,
|
||||
0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x72,
|
||||
0x76, 0x65, 0x72, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64,
|
||||
0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12, 0x1b,
|
||||
0x0a, 0x09, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28,
|
||||
0x09, 0x52, 0x08, 0x74, 0x61, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x74,
|
||||
0x61, 0x73, 0x6b, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28,
|
||||
0x0c, 0x52, 0x0b, 0x74, 0x61, 0x73, 0x6b, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x14,
|
||||
0x0a, 0x05, 0x71, 0x75, 0x65, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x71,
|
||||
0x75, 0x65, 0x75, 0x65, 0x12, 0x39, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69,
|
||||
0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
|
||||
0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73,
|
||||
0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12,
|
||||
0x36, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28,
|
||||
0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x64,
|
||||
0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x22, 0xad, 0x02, 0x0a, 0x0e, 0x53, 0x63, 0x68, 0x65,
|
||||
0x64, 0x75, 0x6c, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64,
|
||||
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x70,
|
||||
0x65, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x73, 0x70, 0x65, 0x63, 0x12, 0x1b,
|
||||
0x0a, 0x09, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28,
|
||||
0x09, 0x52, 0x08, 0x74, 0x61, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x74,
|
||||
0x61, 0x73, 0x6b, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28,
|
||||
0x0c, 0x52, 0x0b, 0x74, 0x61, 0x73, 0x6b, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x27,
|
||||
0x0a, 0x0f, 0x65, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
|
||||
0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x65, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65,
|
||||
0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x46, 0x0a, 0x11, 0x6e, 0x65, 0x78, 0x74, 0x5f,
|
||||
0x65, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01,
|
||||
0x79, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0c,
|
||||
0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12,
|
||||
0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18,
|
||||
0x0d, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64,
|
||||
0x41, 0x74, 0x22, 0x8f, 0x03, 0x0a, 0x0a, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x49, 0x6e, 0x66,
|
||||
0x6f, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
|
||||
0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01,
|
||||
0x28, 0x05, 0x52, 0x03, 0x70, 0x69, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x65, 0x72, 0x76, 0x65,
|
||||
0x72, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x72, 0x76,
|
||||
0x65, 0x72, 0x49, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65,
|
||||
0x6e, 0x63, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75,
|
||||
0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x35, 0x0a, 0x06, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73,
|
||||
0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x61, 0x73, 0x79, 0x6e, 0x71, 0x2e, 0x53,
|
||||
0x65, 0x72, 0x76, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x51, 0x75, 0x65, 0x75, 0x65, 0x73,
|
||||
0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x12, 0x27, 0x0a,
|
||||
0x0f, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x5f, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79,
|
||||
0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x50, 0x72,
|
||||
0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73,
|
||||
0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x39,
|
||||
0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01,
|
||||
0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
|
||||
0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0f,
|
||||
0x6e, 0x65, 0x78, 0x74, 0x45, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12,
|
||||
0x46, 0x0a, 0x11, 0x70, 0x72, 0x65, 0x76, 0x5f, 0x65, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x5f,
|
||||
0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
|
||||
0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d,
|
||||
0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0f, 0x70, 0x72, 0x65, 0x76, 0x45, 0x6e, 0x71, 0x75,
|
||||
0x65, 0x75, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x22, 0x6f, 0x0a, 0x15, 0x53, 0x63, 0x68, 0x65, 0x64,
|
||||
0x75, 0x6c, 0x65, 0x72, 0x45, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74,
|
||||
0x12, 0x17, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
|
||||
0x09, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12, 0x3d, 0x0a, 0x0c, 0x65, 0x6e, 0x71,
|
||||
0x75, 0x65, 0x75, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
|
||||
0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
|
||||
0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x65, 0x6e, 0x71,
|
||||
0x75, 0x65, 0x75, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68,
|
||||
0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x69, 0x62, 0x69, 0x6b, 0x65, 0x6e, 0x2f, 0x61,
|
||||
0x73, 0x79, 0x6e, 0x71, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72,
|
||||
0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09,
|
||||
0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x13, 0x61, 0x63, 0x74,
|
||||
0x69, 0x76, 0x65, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74,
|
||||
0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x11, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x57, 0x6f,
|
||||
0x72, 0x6b, 0x65, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x1a, 0x39, 0x0a, 0x0b, 0x51, 0x75, 0x65,
|
||||
0x75, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18,
|
||||
0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61,
|
||||
0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
|
||||
0x3a, 0x02, 0x38, 0x01, 0x22, 0xb1, 0x02, 0x0a, 0x0a, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49,
|
||||
0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28,
|
||||
0x09, 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x02,
|
||||
0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x70, 0x69, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x65, 0x72,
|
||||
0x76, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65,
|
||||
0x72, 0x76, 0x65, 0x72, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69,
|
||||
0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12,
|
||||
0x1b, 0x0a, 0x09, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01,
|
||||
0x28, 0x09, 0x52, 0x08, 0x74, 0x61, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x12, 0x21, 0x0a, 0x0c,
|
||||
0x74, 0x61, 0x73, 0x6b, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x06, 0x20, 0x01,
|
||||
0x28, 0x0c, 0x52, 0x0b, 0x74, 0x61, 0x73, 0x6b, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12,
|
||||
0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05,
|
||||
0x71, 0x75, 0x65, 0x75, 0x65, 0x12, 0x39, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74,
|
||||
0x69, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
|
||||
0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65,
|
||||
0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65,
|
||||
0x12, 0x36, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x09, 0x20, 0x01,
|
||||
0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
|
||||
0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08,
|
||||
0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x22, 0xad, 0x02, 0x0a, 0x0e, 0x53, 0x63, 0x68,
|
||||
0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x69,
|
||||
0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x73,
|
||||
0x70, 0x65, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x73, 0x70, 0x65, 0x63, 0x12,
|
||||
0x1b, 0x0a, 0x09, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01,
|
||||
0x28, 0x09, 0x52, 0x08, 0x74, 0x61, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x12, 0x21, 0x0a, 0x0c,
|
||||
0x74, 0x61, 0x73, 0x6b, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x04, 0x20, 0x01,
|
||||
0x28, 0x0c, 0x52, 0x0b, 0x74, 0x61, 0x73, 0x6b, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12,
|
||||
0x27, 0x0a, 0x0f, 0x65, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f,
|
||||
0x6e, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x65, 0x6e, 0x71, 0x75, 0x65, 0x75,
|
||||
0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x46, 0x0a, 0x11, 0x6e, 0x65, 0x78, 0x74,
|
||||
0x5f, 0x65, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20,
|
||||
0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
|
||||
0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52,
|
||||
0x0f, 0x6e, 0x65, 0x78, 0x74, 0x45, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x54, 0x69, 0x6d, 0x65,
|
||||
0x12, 0x46, 0x0a, 0x11, 0x70, 0x72, 0x65, 0x76, 0x5f, 0x65, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65,
|
||||
0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
|
||||
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69,
|
||||
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0f, 0x70, 0x72, 0x65, 0x76, 0x45, 0x6e, 0x71,
|
||||
0x75, 0x65, 0x75, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x22, 0x6f, 0x0a, 0x15, 0x53, 0x63, 0x68, 0x65,
|
||||
0x64, 0x75, 0x6c, 0x65, 0x72, 0x45, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x45, 0x76, 0x65, 0x6e,
|
||||
0x74, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01,
|
||||
0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12, 0x3d, 0x0a, 0x0c, 0x65, 0x6e,
|
||||
0x71, 0x75, 0x65, 0x75, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
|
||||
0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
|
||||
0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x65, 0x6e,
|
||||
0x71, 0x75, 0x65, 0x75, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74,
|
||||
0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x69, 0x62, 0x69, 0x6b, 0x65, 0x6e, 0x2f,
|
||||
0x61, 0x73, 0x79, 0x6e, 0x71, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70,
|
||||
0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
|
@@ -51,6 +51,15 @@ message TaskMessage {
|
||||
// Empty string indicates that no uniqueness lock was used.
|
||||
string unique_key = 10;
|
||||
|
||||
// Retention period specified in a number of seconds.
|
||||
// The task will be stored in redis as a completed task until the TTL
|
||||
// expires.
|
||||
int64 retention = 12;
|
||||
|
||||
// Time when the task completed in success in Unix time,
|
||||
// the number of seconds elapsed since January 1, 1970 UTC.
|
||||
// This field is populated if result_ttl > 0 upon completion.
|
||||
int64 completed_at = 13;
|
||||
};
|
||||
|
||||
// ServerInfo holds information about a running server.
|
||||
@@ -151,4 +160,4 @@ message SchedulerEnqueueEvent {
|
||||
|
||||
// Time the task was enqueued.
|
||||
google.protobuf.Timestamp enqueue_time = 2;
|
||||
};
|
||||
};
|
||||
|
@@ -5,6 +5,7 @@
|
||||
package rdb
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -15,6 +16,7 @@ import (
|
||||
|
||||
func BenchmarkEnqueue(b *testing.B) {
|
||||
r := setup(b)
|
||||
ctx := context.Background()
|
||||
msg := asynqtest.NewTaskMessage("task1", nil)
|
||||
b.ResetTimer()
|
||||
|
||||
@@ -23,7 +25,7 @@ func BenchmarkEnqueue(b *testing.B) {
|
||||
asynqtest.FlushDB(b, r.client)
|
||||
b.StartTimer()
|
||||
|
||||
if err := r.Enqueue(msg); err != nil {
|
||||
if err := r.Enqueue(ctx, msg); err != nil {
|
||||
b.Fatalf("Enqueue failed: %v", err)
|
||||
}
|
||||
}
|
||||
@@ -31,6 +33,7 @@ func BenchmarkEnqueue(b *testing.B) {
|
||||
|
||||
func BenchmarkEnqueueUnique(b *testing.B) {
|
||||
r := setup(b)
|
||||
ctx := context.Background()
|
||||
msg := &base.TaskMessage{
|
||||
Type: "task1",
|
||||
Payload: nil,
|
||||
@@ -45,7 +48,7 @@ func BenchmarkEnqueueUnique(b *testing.B) {
|
||||
asynqtest.FlushDB(b, r.client)
|
||||
b.StartTimer()
|
||||
|
||||
if err := r.EnqueueUnique(msg, uniqueTTL); err != nil {
|
||||
if err := r.EnqueueUnique(ctx, msg, uniqueTTL); err != nil {
|
||||
b.Fatalf("EnqueueUnique failed: %v", err)
|
||||
}
|
||||
}
|
||||
@@ -53,6 +56,7 @@ func BenchmarkEnqueueUnique(b *testing.B) {
|
||||
|
||||
func BenchmarkSchedule(b *testing.B) {
|
||||
r := setup(b)
|
||||
ctx := context.Background()
|
||||
msg := asynqtest.NewTaskMessage("task1", nil)
|
||||
processAt := time.Now().Add(3 * time.Minute)
|
||||
b.ResetTimer()
|
||||
@@ -62,7 +66,7 @@ func BenchmarkSchedule(b *testing.B) {
|
||||
asynqtest.FlushDB(b, r.client)
|
||||
b.StartTimer()
|
||||
|
||||
if err := r.Schedule(msg, processAt); err != nil {
|
||||
if err := r.Schedule(ctx, msg, processAt); err != nil {
|
||||
b.Fatalf("Schedule failed: %v", err)
|
||||
}
|
||||
}
|
||||
@@ -70,6 +74,7 @@ func BenchmarkSchedule(b *testing.B) {
|
||||
|
||||
func BenchmarkScheduleUnique(b *testing.B) {
|
||||
r := setup(b)
|
||||
ctx := context.Background()
|
||||
msg := &base.TaskMessage{
|
||||
Type: "task1",
|
||||
Payload: nil,
|
||||
@@ -85,7 +90,7 @@ func BenchmarkScheduleUnique(b *testing.B) {
|
||||
asynqtest.FlushDB(b, r.client)
|
||||
b.StartTimer()
|
||||
|
||||
if err := r.ScheduleUnique(msg, processAt, uniqueTTL); err != nil {
|
||||
if err := r.ScheduleUnique(ctx, msg, processAt, uniqueTTL); err != nil {
|
||||
b.Fatalf("EnqueueUnique failed: %v", err)
|
||||
}
|
||||
}
|
||||
@@ -93,6 +98,7 @@ func BenchmarkScheduleUnique(b *testing.B) {
|
||||
|
||||
func BenchmarkDequeueSingleQueue(b *testing.B) {
|
||||
r := setup(b)
|
||||
ctx := context.Background()
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
@@ -101,7 +107,7 @@ func BenchmarkDequeueSingleQueue(b *testing.B) {
|
||||
for i := 0; i < 10; i++ {
|
||||
m := asynqtest.NewTaskMessageWithQueue(
|
||||
fmt.Sprintf("task%d", i), nil, base.DefaultQueueName)
|
||||
if err := r.Enqueue(m); err != nil {
|
||||
if err := r.Enqueue(ctx, m); err != nil {
|
||||
b.Fatalf("Enqueue failed: %v", err)
|
||||
}
|
||||
}
|
||||
@@ -116,6 +122,7 @@ func BenchmarkDequeueSingleQueue(b *testing.B) {
|
||||
func BenchmarkDequeueMultipleQueues(b *testing.B) {
|
||||
qnames := []string{"critical", "default", "low"}
|
||||
r := setup(b)
|
||||
ctx := context.Background()
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
@@ -125,7 +132,7 @@ func BenchmarkDequeueMultipleQueues(b *testing.B) {
|
||||
for _, qname := range qnames {
|
||||
m := asynqtest.NewTaskMessageWithQueue(
|
||||
fmt.Sprintf("%s_task%d", qname, i), nil, qname)
|
||||
if err := r.Enqueue(m); err != nil {
|
||||
if err := r.Enqueue(ctx, m); err != nil {
|
||||
b.Fatalf("Enqueue failed: %v", err)
|
||||
}
|
||||
}
|
||||
@@ -184,7 +191,7 @@ func BenchmarkRetry(b *testing.B) {
|
||||
asynqtest.SeedDeadlines(b, r.client, zs, base.DefaultQueueName)
|
||||
b.StartTimer()
|
||||
|
||||
if err := r.Retry(msgs[0], time.Now().Add(1*time.Minute), "error"); err != nil {
|
||||
if err := r.Retry(msgs[0], time.Now().Add(1*time.Minute), "error", true /*isFailure*/); err != nil {
|
||||
b.Fatalf("Retry failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
@@ -5,12 +5,12 @@
|
||||
package rdb
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-redis/redis/v7"
|
||||
"github.com/google/uuid"
|
||||
"github.com/go-redis/redis/v8"
|
||||
"github.com/hibiken/asynq/internal/base"
|
||||
"github.com/hibiken/asynq/internal/errors"
|
||||
"github.com/spf13/cast"
|
||||
@@ -18,7 +18,7 @@ import (
|
||||
|
||||
// AllQueues returns a list of all queue names.
|
||||
func (r *RDB) AllQueues() ([]string, error) {
|
||||
return r.client.SMembers(base.AllQueues).Result()
|
||||
return r.client.SMembers(context.Background(), base.AllQueues).Result()
|
||||
}
|
||||
|
||||
// Stats represents a state of queues at a certain time.
|
||||
@@ -40,11 +40,14 @@ type Stats struct {
|
||||
Scheduled int
|
||||
Retry int
|
||||
Archived int
|
||||
Completed int
|
||||
// Total number of tasks processed during the current date.
|
||||
// The number includes both succeeded and failed tasks.
|
||||
Processed int
|
||||
// Total number of tasks failed during the current date.
|
||||
Failed int
|
||||
// Latency of the queue, measured by the oldest pending task in the queue.
|
||||
Latency time.Duration
|
||||
// Time this stats was taken.
|
||||
Timestamp time.Time
|
||||
}
|
||||
@@ -62,18 +65,22 @@ type DailyStats struct {
|
||||
Time time.Time
|
||||
}
|
||||
|
||||
// KEYS[1] -> asynq:<qname>
|
||||
// KEYS[1] -> asynq:<qname>:pending
|
||||
// KEYS[2] -> asynq:<qname>:active
|
||||
// KEYS[3] -> asynq:<qname>:scheduled
|
||||
// KEYS[4] -> asynq:<qname>:retry
|
||||
// KEYS[5] -> asynq:<qname>:archived
|
||||
// KEYS[6] -> asynq:<qname>:processed:<yyyy-mm-dd>
|
||||
// KEYS[7] -> asynq:<qname>:failed:<yyyy-mm-dd>
|
||||
// KEYS[8] -> asynq:<qname>:paused
|
||||
// KEYS[6] -> asynq:<qname>:completed
|
||||
// KEYS[7] -> asynq:<qname>:processed:<yyyy-mm-dd>
|
||||
// KEYS[8] -> asynq:<qname>:failed:<yyyy-mm-dd>
|
||||
// KEYS[9] -> asynq:<qname>:paused
|
||||
//
|
||||
// ARGV[1] -> task key prefix
|
||||
var currentStatsCmd = redis.NewScript(`
|
||||
local res = {}
|
||||
local pendingTaskCount = redis.call("LLEN", KEYS[1])
|
||||
table.insert(res, KEYS[1])
|
||||
table.insert(res, redis.call("LLEN", KEYS[1]))
|
||||
table.insert(res, pendingTaskCount)
|
||||
table.insert(res, KEYS[2])
|
||||
table.insert(res, redis.call("LLEN", KEYS[2]))
|
||||
table.insert(res, KEYS[3])
|
||||
@@ -82,28 +89,37 @@ table.insert(res, KEYS[4])
|
||||
table.insert(res, redis.call("ZCARD", KEYS[4]))
|
||||
table.insert(res, KEYS[5])
|
||||
table.insert(res, redis.call("ZCARD", KEYS[5]))
|
||||
table.insert(res, KEYS[6])
|
||||
table.insert(res, redis.call("ZCARD", KEYS[6]))
|
||||
local pcount = 0
|
||||
local p = redis.call("GET", KEYS[6])
|
||||
local p = redis.call("GET", KEYS[7])
|
||||
if p then
|
||||
pcount = tonumber(p)
|
||||
end
|
||||
table.insert(res, KEYS[6])
|
||||
table.insert(res, KEYS[7])
|
||||
table.insert(res, pcount)
|
||||
local fcount = 0
|
||||
local f = redis.call("GET", KEYS[7])
|
||||
local f = redis.call("GET", KEYS[8])
|
||||
if f then
|
||||
fcount = tonumber(f)
|
||||
end
|
||||
table.insert(res, KEYS[7])
|
||||
table.insert(res, fcount)
|
||||
table.insert(res, KEYS[8])
|
||||
table.insert(res, redis.call("EXISTS", KEYS[8]))
|
||||
table.insert(res, fcount)
|
||||
table.insert(res, KEYS[9])
|
||||
table.insert(res, redis.call("EXISTS", KEYS[9]))
|
||||
table.insert(res, "oldest_pending_since")
|
||||
if pendingTaskCount > 0 then
|
||||
local id = redis.call("LRANGE", KEYS[1], -1, -1)[1]
|
||||
table.insert(res, redis.call("HGET", ARGV[1] .. id, "pending_since"))
|
||||
else
|
||||
table.insert(res, 0)
|
||||
end
|
||||
return res`)
|
||||
|
||||
// CurrentStats returns a current state of the queues.
|
||||
func (r *RDB) CurrentStats(qname string) (*Stats, error) {
|
||||
var op errors.Op = "rdb.CurrentStats"
|
||||
exists, err := r.client.SIsMember(base.AllQueues, qname).Result()
|
||||
exists, err := r.queueExists(qname)
|
||||
if err != nil {
|
||||
return nil, errors.E(op, errors.Unknown, err)
|
||||
}
|
||||
@@ -111,16 +127,17 @@ func (r *RDB) CurrentStats(qname string) (*Stats, error) {
|
||||
return nil, errors.E(op, errors.NotFound, &errors.QueueNotFoundError{Queue: qname})
|
||||
}
|
||||
now := time.Now()
|
||||
res, err := currentStatsCmd.Run(r.client, []string{
|
||||
res, err := currentStatsCmd.Run(context.Background(), r.client, []string{
|
||||
base.PendingKey(qname),
|
||||
base.ActiveKey(qname),
|
||||
base.ScheduledKey(qname),
|
||||
base.RetryKey(qname),
|
||||
base.ArchivedKey(qname),
|
||||
base.CompletedKey(qname),
|
||||
base.ProcessedKey(qname, now),
|
||||
base.FailedKey(qname, now),
|
||||
base.PausedKey(qname),
|
||||
}).Result()
|
||||
}, base.TaskKeyPrefix(qname)).Result()
|
||||
if err != nil {
|
||||
return nil, errors.E(op, errors.Unknown, err)
|
||||
}
|
||||
@@ -152,6 +169,9 @@ func (r *RDB) CurrentStats(qname string) (*Stats, error) {
|
||||
case base.ArchivedKey(qname):
|
||||
stats.Archived = val
|
||||
size += val
|
||||
case base.CompletedKey(qname):
|
||||
stats.Completed = val
|
||||
size += val
|
||||
case base.ProcessedKey(qname, now):
|
||||
stats.Processed = val
|
||||
case base.FailedKey(qname, now):
|
||||
@@ -162,6 +182,12 @@ func (r *RDB) CurrentStats(qname string) (*Stats, error) {
|
||||
} else {
|
||||
stats.Paused = true
|
||||
}
|
||||
case "oldest_pending_since":
|
||||
if val == 0 {
|
||||
stats.Latency = 0
|
||||
} else {
|
||||
stats.Latency = r.clock.Now().Sub(time.Unix(0, int64(val)))
|
||||
}
|
||||
}
|
||||
}
|
||||
stats.Size = size
|
||||
@@ -182,6 +208,7 @@ func (r *RDB) CurrentStats(qname string) (*Stats, error) {
|
||||
// KEYS[3] -> asynq:{qname}:scheduled
|
||||
// KEYS[4] -> asynq:{qname}:retry
|
||||
// KEYS[5] -> asynq:{qname}:archived
|
||||
// KEYS[6] -> asynq:{qname}:completed
|
||||
//
|
||||
// ARGV[1] -> asynq:{qname}:t:
|
||||
// ARGV[2] -> sample_size (e.g 20)
|
||||
@@ -208,7 +235,7 @@ for i=1,2 do
|
||||
memusg = memusg + m
|
||||
end
|
||||
end
|
||||
for i=3,5 do
|
||||
for i=3,6 do
|
||||
local ids = redis.call("ZRANGE", KEYS[i], 0, sample_size - 1)
|
||||
local sample_total = 0
|
||||
if (table.getn(ids) > 0) then
|
||||
@@ -237,12 +264,13 @@ func (r *RDB) memoryUsage(qname string) (int64, error) {
|
||||
base.ScheduledKey(qname),
|
||||
base.RetryKey(qname),
|
||||
base.ArchivedKey(qname),
|
||||
base.CompletedKey(qname),
|
||||
}
|
||||
argv := []interface{}{
|
||||
base.TaskKeyPrefix(qname),
|
||||
sampleSize,
|
||||
}
|
||||
res, err := memoryUsageCmd.Run(r.client, keys, argv...).Result()
|
||||
res, err := memoryUsageCmd.Run(context.Background(), r.client, keys, argv...).Result()
|
||||
if err != nil {
|
||||
return 0, errors.E(op, errors.Unknown, fmt.Sprintf("redis eval error: %v", err))
|
||||
}
|
||||
@@ -270,7 +298,7 @@ func (r *RDB) HistoricalStats(qname string, n int) ([]*DailyStats, error) {
|
||||
if n < 1 {
|
||||
return nil, errors.E(op, errors.FailedPrecondition, "the number of days must be positive")
|
||||
}
|
||||
exists, err := r.client.SIsMember(base.AllQueues, qname).Result()
|
||||
exists, err := r.queueExists(qname)
|
||||
if err != nil {
|
||||
return nil, errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "sismember", Err: err})
|
||||
}
|
||||
@@ -287,7 +315,7 @@ func (r *RDB) HistoricalStats(qname string, n int) ([]*DailyStats, error) {
|
||||
keys = append(keys, base.ProcessedKey(qname, ts))
|
||||
keys = append(keys, base.FailedKey(qname, ts))
|
||||
}
|
||||
res, err := historicalStatsCmd.Run(r.client, keys).Result()
|
||||
res, err := historicalStatsCmd.Run(context.Background(), r.client, keys).Result()
|
||||
if err != nil {
|
||||
return nil, errors.E(op, errors.Unknown, fmt.Sprintf("redis eval error: %v", err))
|
||||
}
|
||||
@@ -309,7 +337,7 @@ func (r *RDB) HistoricalStats(qname string, n int) ([]*DailyStats, error) {
|
||||
|
||||
// RedisInfo returns a map of redis info.
|
||||
func (r *RDB) RedisInfo() (map[string]string, error) {
|
||||
res, err := r.client.Info().Result()
|
||||
res, err := r.client.Info(context.Background()).Result()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -318,7 +346,7 @@ func (r *RDB) RedisInfo() (map[string]string, error) {
|
||||
|
||||
// RedisClusterInfo returns a map of redis cluster info.
|
||||
func (r *RDB) RedisClusterInfo() (map[string]string, error) {
|
||||
res, err := r.client.ClusterInfo().Result()
|
||||
res, err := r.client.ClusterInfo(context.Background()).Result()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -337,7 +365,8 @@ func parseInfo(infoStr string) (map[string]string, error) {
|
||||
return info, nil
|
||||
}
|
||||
|
||||
func reverse(x []string) {
|
||||
// TODO: Use generics once available.
|
||||
func reverse(x []*base.TaskInfo) {
|
||||
for i := len(x)/2 - 1; i >= 0; i-- {
|
||||
opp := len(x) - 1 - i
|
||||
x[i], x[opp] = x[opp], x[i]
|
||||
@@ -347,7 +376,7 @@ func reverse(x []string) {
|
||||
// checkQueueExists verifies whether the queue exists.
|
||||
// It returns QueueNotFoundError if queue doesn't exist.
|
||||
func (r *RDB) checkQueueExists(qname string) error {
|
||||
exists, err := r.client.SIsMember(base.AllQueues, qname).Result()
|
||||
exists, err := r.queueExists(qname)
|
||||
if err != nil {
|
||||
return errors.E(errors.Unknown, &errors.RedisCommandError{Command: "sismember", Err: err})
|
||||
}
|
||||
@@ -364,42 +393,43 @@ func (r *RDB) checkQueueExists(qname string) error {
|
||||
// ARGV[3] -> queue key prefix (asynq:{<qname>}:)
|
||||
//
|
||||
// Output:
|
||||
// Tuple of {msg, state, nextProcessAt}
|
||||
// Tuple of {msg, state, nextProcessAt, result}
|
||||
// msg: encoded task message
|
||||
// state: string describing the state of the task
|
||||
// nextProcessAt: unix time in seconds, zero if not applicable.
|
||||
// result: result data associated with the task
|
||||
//
|
||||
// If the task key doesn't exist, it returns error with a message "NOT FOUND"
|
||||
var getTaskInfoCmd = redis.NewScript(`
|
||||
if redis.call("EXISTS", KEYS[1]) == 0 then
|
||||
return redis.error_reply("NOT FOUND")
|
||||
end
|
||||
local msg, state = unpack(redis.call("HMGET", KEYS[1], "msg", "state"))
|
||||
local msg, state, result = unpack(redis.call("HMGET", KEYS[1], "msg", "state", "result"))
|
||||
if state == "scheduled" or state == "retry" then
|
||||
return {msg, state, redis.call("ZSCORE", ARGV[3] .. state, ARGV[1])}
|
||||
return {msg, state, redis.call("ZSCORE", ARGV[3] .. state, ARGV[1]), result}
|
||||
end
|
||||
if state == "pending" then
|
||||
return {msg, state, ARGV[2]}
|
||||
return {msg, state, ARGV[2], result}
|
||||
end
|
||||
return {msg, state, 0}
|
||||
return {msg, state, 0, result}
|
||||
`)
|
||||
|
||||
// GetTaskInfo returns a TaskInfo describing the task from the given queue.
|
||||
func (r *RDB) GetTaskInfo(qname string, id uuid.UUID) (*base.TaskInfo, error) {
|
||||
func (r *RDB) GetTaskInfo(qname, id string) (*base.TaskInfo, error) {
|
||||
var op errors.Op = "rdb.GetTaskInfo"
|
||||
if err := r.checkQueueExists(qname); err != nil {
|
||||
return nil, errors.E(op, errors.CanonicalCode(err), err)
|
||||
}
|
||||
keys := []string{base.TaskKey(qname, id.String())}
|
||||
keys := []string{base.TaskKey(qname, id)}
|
||||
argv := []interface{}{
|
||||
id.String(),
|
||||
id,
|
||||
time.Now().Unix(),
|
||||
base.QueueKeyPrefix(qname),
|
||||
}
|
||||
res, err := getTaskInfoCmd.Run(r.client, keys, argv...).Result()
|
||||
res, err := getTaskInfoCmd.Run(context.Background(), r.client, keys, argv...).Result()
|
||||
if err != nil {
|
||||
if err.Error() == "NOT FOUND" {
|
||||
return nil, errors.E(op, errors.NotFound, &errors.TaskNotFoundError{Queue: qname, ID: id.String()})
|
||||
return nil, errors.E(op, errors.NotFound, &errors.TaskNotFoundError{Queue: qname, ID: id})
|
||||
}
|
||||
return nil, errors.E(op, errors.Unknown, err)
|
||||
}
|
||||
@@ -407,7 +437,7 @@ func (r *RDB) GetTaskInfo(qname string, id uuid.UUID) (*base.TaskInfo, error) {
|
||||
if err != nil {
|
||||
return nil, errors.E(op, errors.Internal, "unexpected value returned from Lua script")
|
||||
}
|
||||
if len(vals) != 3 {
|
||||
if len(vals) != 4 {
|
||||
return nil, errors.E(op, errors.Internal, "unepxected number of values returned from Lua script")
|
||||
}
|
||||
encoded, err := cast.ToStringE(vals[0])
|
||||
@@ -422,6 +452,10 @@ func (r *RDB) GetTaskInfo(qname string, id uuid.UUID) (*base.TaskInfo, error) {
|
||||
if err != nil {
|
||||
return nil, errors.E(op, errors.Internal, "unexpected value returned from Lua script")
|
||||
}
|
||||
resultStr, err := cast.ToStringE(vals[3])
|
||||
if err != nil {
|
||||
return nil, errors.E(op, errors.Internal, "unexpected value returned from Lua script")
|
||||
}
|
||||
msg, err := base.DecodeMessage([]byte(encoded))
|
||||
if err != nil {
|
||||
return nil, errors.E(op, errors.Internal, "could not decode task message")
|
||||
@@ -434,10 +468,15 @@ func (r *RDB) GetTaskInfo(qname string, id uuid.UUID) (*base.TaskInfo, error) {
|
||||
if processAtUnix != 0 {
|
||||
nextProcessAt = time.Unix(processAtUnix, 0)
|
||||
}
|
||||
var result []byte
|
||||
if len(resultStr) > 0 {
|
||||
result = []byte(resultStr)
|
||||
}
|
||||
return &base.TaskInfo{
|
||||
Message: msg,
|
||||
State: state,
|
||||
NextProcessAt: nextProcessAt,
|
||||
Result: result,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -460,12 +499,16 @@ func (p Pagination) stop() int64 {
|
||||
}
|
||||
|
||||
// ListPending returns pending tasks that are ready to be processed.
|
||||
func (r *RDB) ListPending(qname string, pgn Pagination) ([]*base.TaskMessage, error) {
|
||||
func (r *RDB) ListPending(qname string, pgn Pagination) ([]*base.TaskInfo, error) {
|
||||
var op errors.Op = "rdb.ListPending"
|
||||
if !r.client.SIsMember(base.AllQueues, qname).Val() {
|
||||
exists, err := r.queueExists(qname)
|
||||
if err != nil {
|
||||
return nil, errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "sismember", Err: err})
|
||||
}
|
||||
if !exists {
|
||||
return nil, errors.E(op, errors.NotFound, &errors.QueueNotFoundError{Queue: qname})
|
||||
}
|
||||
res, err := r.listMessages(base.PendingKey(qname), qname, pgn)
|
||||
res, err := r.listMessages(qname, base.TaskStatePending, pgn)
|
||||
if err != nil {
|
||||
return nil, errors.E(op, errors.CanonicalCode(err), err)
|
||||
}
|
||||
@@ -473,12 +516,16 @@ func (r *RDB) ListPending(qname string, pgn Pagination) ([]*base.TaskMessage, er
|
||||
}
|
||||
|
||||
// ListActive returns all tasks that are currently being processed for the given queue.
|
||||
func (r *RDB) ListActive(qname string, pgn Pagination) ([]*base.TaskMessage, error) {
|
||||
func (r *RDB) ListActive(qname string, pgn Pagination) ([]*base.TaskInfo, error) {
|
||||
var op errors.Op = "rdb.ListActive"
|
||||
if !r.client.SIsMember(base.AllQueues, qname).Val() {
|
||||
exists, err := r.queueExists(qname)
|
||||
if err != nil {
|
||||
return nil, errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "sismember", Err: err})
|
||||
}
|
||||
if !exists {
|
||||
return nil, errors.E(op, errors.NotFound, &errors.QueueNotFoundError{Queue: qname})
|
||||
}
|
||||
res, err := r.listMessages(base.ActiveKey(qname), qname, pgn)
|
||||
res, err := r.listMessages(qname, base.TaskStateActive, pgn)
|
||||
if err != nil {
|
||||
return nil, errors.E(op, errors.CanonicalCode(err), err)
|
||||
}
|
||||
@@ -491,21 +538,32 @@ func (r *RDB) ListActive(qname string, pgn Pagination) ([]*base.TaskMessage, err
|
||||
// ARGV[3] -> task key prefix
|
||||
var listMessagesCmd = redis.NewScript(`
|
||||
local ids = redis.call("LRange", KEYS[1], ARGV[1], ARGV[2])
|
||||
local res = {}
|
||||
local data = {}
|
||||
for _, id in ipairs(ids) do
|
||||
local key = ARGV[3] .. id
|
||||
table.insert(res, redis.call("HGET", key, "msg"))
|
||||
local msg, result = unpack(redis.call("HMGET", key, "msg","result"))
|
||||
table.insert(data, msg)
|
||||
table.insert(data, result)
|
||||
end
|
||||
return res
|
||||
return data
|
||||
`)
|
||||
|
||||
// listMessages returns a list of TaskMessage in Redis list with the given key.
|
||||
func (r *RDB) listMessages(key, qname string, pgn Pagination) ([]*base.TaskMessage, error) {
|
||||
// listMessages returns a list of TaskInfo in Redis list with the given key.
|
||||
func (r *RDB) listMessages(qname string, state base.TaskState, pgn Pagination) ([]*base.TaskInfo, error) {
|
||||
var key string
|
||||
switch state {
|
||||
case base.TaskStateActive:
|
||||
key = base.ActiveKey(qname)
|
||||
case base.TaskStatePending:
|
||||
key = base.PendingKey(qname)
|
||||
default:
|
||||
panic(fmt.Sprintf("unsupported task state: %v", state))
|
||||
}
|
||||
// Note: Because we use LPUSH to redis list, we need to calculate the
|
||||
// correct range and reverse the list to get the tasks with pagination.
|
||||
stop := -pgn.start() - 1
|
||||
start := -pgn.stop() - 1
|
||||
res, err := listMessagesCmd.Run(r.client,
|
||||
res, err := listMessagesCmd.Run(context.Background(), r.client,
|
||||
[]string{key}, start, stop, base.TaskKeyPrefix(qname)).Result()
|
||||
if err != nil {
|
||||
return nil, errors.E(errors.Unknown, err)
|
||||
@@ -514,27 +572,44 @@ func (r *RDB) listMessages(key, qname string, pgn Pagination) ([]*base.TaskMessa
|
||||
if err != nil {
|
||||
return nil, errors.E(errors.Internal, fmt.Errorf("cast error: Lua script returned unexpected value: %v", res))
|
||||
}
|
||||
reverse(data)
|
||||
var msgs []*base.TaskMessage
|
||||
for _, s := range data {
|
||||
m, err := base.DecodeMessage([]byte(s))
|
||||
var infos []*base.TaskInfo
|
||||
for i := 0; i < len(data); i += 2 {
|
||||
m, err := base.DecodeMessage([]byte(data[i]))
|
||||
if err != nil {
|
||||
continue // bad data, ignore and continue
|
||||
}
|
||||
msgs = append(msgs, m)
|
||||
var res []byte
|
||||
if len(data[i+1]) > 0 {
|
||||
res = []byte(data[i+1])
|
||||
}
|
||||
var nextProcessAt time.Time
|
||||
if state == base.TaskStatePending {
|
||||
nextProcessAt = time.Now()
|
||||
}
|
||||
infos = append(infos, &base.TaskInfo{
|
||||
Message: m,
|
||||
State: state,
|
||||
NextProcessAt: nextProcessAt,
|
||||
Result: res,
|
||||
})
|
||||
}
|
||||
return msgs, nil
|
||||
reverse(infos)
|
||||
return infos, nil
|
||||
|
||||
}
|
||||
|
||||
// ListScheduled returns all tasks from the given queue that are scheduled
|
||||
// to be processed in the future.
|
||||
func (r *RDB) ListScheduled(qname string, pgn Pagination) ([]base.Z, error) {
|
||||
func (r *RDB) ListScheduled(qname string, pgn Pagination) ([]*base.TaskInfo, error) {
|
||||
var op errors.Op = "rdb.ListScheduled"
|
||||
if !r.client.SIsMember(base.AllQueues, qname).Val() {
|
||||
exists, err := r.queueExists(qname)
|
||||
if err != nil {
|
||||
return nil, errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "sismember", Err: err})
|
||||
}
|
||||
if !exists {
|
||||
return nil, errors.E(op, errors.NotFound, &errors.QueueNotFoundError{Queue: qname})
|
||||
}
|
||||
res, err := r.listZSetEntries(base.ScheduledKey(qname), qname, pgn)
|
||||
res, err := r.listZSetEntries(qname, base.TaskStateScheduled, pgn)
|
||||
if err != nil {
|
||||
return nil, errors.E(op, errors.CanonicalCode(err), err)
|
||||
}
|
||||
@@ -543,12 +618,16 @@ func (r *RDB) ListScheduled(qname string, pgn Pagination) ([]base.Z, error) {
|
||||
|
||||
// ListRetry returns all tasks from the given queue that have failed before
|
||||
// and willl be retried in the future.
|
||||
func (r *RDB) ListRetry(qname string, pgn Pagination) ([]base.Z, error) {
|
||||
func (r *RDB) ListRetry(qname string, pgn Pagination) ([]*base.TaskInfo, error) {
|
||||
var op errors.Op = "rdb.ListRetry"
|
||||
if !r.client.SIsMember(base.AllQueues, qname).Val() {
|
||||
exists, err := r.queueExists(qname)
|
||||
if err != nil {
|
||||
return nil, errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "sismember", Err: err})
|
||||
}
|
||||
if !exists {
|
||||
return nil, errors.E(op, errors.NotFound, &errors.QueueNotFoundError{Queue: qname})
|
||||
}
|
||||
res, err := r.listZSetEntries(base.RetryKey(qname), qname, pgn)
|
||||
res, err := r.listZSetEntries(qname, base.TaskStateRetry, pgn)
|
||||
if err != nil {
|
||||
return nil, errors.E(op, errors.CanonicalCode(err), err)
|
||||
}
|
||||
@@ -556,40 +635,83 @@ func (r *RDB) ListRetry(qname string, pgn Pagination) ([]base.Z, error) {
|
||||
}
|
||||
|
||||
// ListArchived returns all tasks from the given queue that have exhausted its retry limit.
|
||||
func (r *RDB) ListArchived(qname string, pgn Pagination) ([]base.Z, error) {
|
||||
func (r *RDB) ListArchived(qname string, pgn Pagination) ([]*base.TaskInfo, error) {
|
||||
var op errors.Op = "rdb.ListArchived"
|
||||
if !r.client.SIsMember(base.AllQueues, qname).Val() {
|
||||
exists, err := r.queueExists(qname)
|
||||
if err != nil {
|
||||
return nil, errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "sismember", Err: err})
|
||||
}
|
||||
if !exists {
|
||||
return nil, errors.E(op, errors.NotFound, &errors.QueueNotFoundError{Queue: qname})
|
||||
}
|
||||
zs, err := r.listZSetEntries(base.ArchivedKey(qname), qname, pgn)
|
||||
zs, err := r.listZSetEntries(qname, base.TaskStateArchived, pgn)
|
||||
if err != nil {
|
||||
return nil, errors.E(op, errors.CanonicalCode(err), err)
|
||||
}
|
||||
return zs, nil
|
||||
}
|
||||
|
||||
// ListCompleted returns all tasks from the given queue that have completed successfully.
|
||||
func (r *RDB) ListCompleted(qname string, pgn Pagination) ([]*base.TaskInfo, error) {
|
||||
var op errors.Op = "rdb.ListCompleted"
|
||||
exists, err := r.queueExists(qname)
|
||||
if err != nil {
|
||||
return nil, errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "sismember", Err: err})
|
||||
}
|
||||
if !exists {
|
||||
return nil, errors.E(op, errors.NotFound, &errors.QueueNotFoundError{Queue: qname})
|
||||
}
|
||||
zs, err := r.listZSetEntries(qname, base.TaskStateCompleted, pgn)
|
||||
if err != nil {
|
||||
return nil, errors.E(op, errors.CanonicalCode(err), err)
|
||||
}
|
||||
return zs, nil
|
||||
}
|
||||
|
||||
// Reports whether a queue with the given name exists.
|
||||
func (r *RDB) queueExists(qname string) (bool, error) {
|
||||
return r.client.SIsMember(context.Background(), base.AllQueues, qname).Result()
|
||||
}
|
||||
|
||||
// KEYS[1] -> key for ids set (e.g. asynq:{<qname>}:scheduled)
|
||||
// ARGV[1] -> min
|
||||
// ARGV[2] -> max
|
||||
// ARGV[3] -> task key prefix
|
||||
//
|
||||
// Returns an array populated with
|
||||
// [msg1, score1, msg2, score2, ..., msgN, scoreN]
|
||||
// [msg1, score1, result1, msg2, score2, result2, ..., msgN, scoreN, resultN]
|
||||
var listZSetEntriesCmd = redis.NewScript(`
|
||||
local res = {}
|
||||
local data = {}
|
||||
local id_score_pairs = redis.call("ZRANGE", KEYS[1], ARGV[1], ARGV[2], "WITHSCORES")
|
||||
for i = 1, table.getn(id_score_pairs), 2 do
|
||||
local key = ARGV[3] .. id_score_pairs[i]
|
||||
table.insert(res, redis.call("HGET", key, "msg"))
|
||||
table.insert(res, id_score_pairs[i+1])
|
||||
local id = id_score_pairs[i]
|
||||
local score = id_score_pairs[i+1]
|
||||
local key = ARGV[3] .. id
|
||||
local msg, res = unpack(redis.call("HMGET", key, "msg", "result"))
|
||||
table.insert(data, msg)
|
||||
table.insert(data, score)
|
||||
table.insert(data, res)
|
||||
end
|
||||
return res
|
||||
return data
|
||||
`)
|
||||
|
||||
// listZSetEntries returns a list of message and score pairs in Redis sorted-set
|
||||
// with the given key.
|
||||
func (r *RDB) listZSetEntries(key, qname string, pgn Pagination) ([]base.Z, error) {
|
||||
res, err := listZSetEntriesCmd.Run(r.client, []string{key},
|
||||
func (r *RDB) listZSetEntries(qname string, state base.TaskState, pgn Pagination) ([]*base.TaskInfo, error) {
|
||||
var key string
|
||||
switch state {
|
||||
case base.TaskStateScheduled:
|
||||
key = base.ScheduledKey(qname)
|
||||
case base.TaskStateRetry:
|
||||
key = base.RetryKey(qname)
|
||||
case base.TaskStateArchived:
|
||||
key = base.ArchivedKey(qname)
|
||||
case base.TaskStateCompleted:
|
||||
key = base.CompletedKey(qname)
|
||||
default:
|
||||
panic(fmt.Sprintf("unsupported task state: %v", state))
|
||||
}
|
||||
res, err := listZSetEntriesCmd.Run(context.Background(), r.client, []string{key},
|
||||
pgn.start(), pgn.stop(), base.TaskKeyPrefix(qname)).Result()
|
||||
if err != nil {
|
||||
return nil, errors.E(errors.Unknown, err)
|
||||
@@ -598,8 +720,8 @@ func (r *RDB) listZSetEntries(key, qname string, pgn Pagination) ([]base.Z, erro
|
||||
if err != nil {
|
||||
return nil, errors.E(errors.Internal, fmt.Errorf("cast error: Lua script returned unexpected value: %v", res))
|
||||
}
|
||||
var zs []base.Z
|
||||
for i := 0; i < len(data); i += 2 {
|
||||
var infos []*base.TaskInfo
|
||||
for i := 0; i < len(data); i += 3 {
|
||||
s, err := cast.ToStringE(data[i])
|
||||
if err != nil {
|
||||
return nil, errors.E(errors.Internal, fmt.Errorf("cast error: Lua script returned unexpected value: %v", res))
|
||||
@@ -608,13 +730,30 @@ func (r *RDB) listZSetEntries(key, qname string, pgn Pagination) ([]base.Z, erro
|
||||
if err != nil {
|
||||
return nil, errors.E(errors.Internal, fmt.Errorf("cast error: Lua script returned unexpected value: %v", res))
|
||||
}
|
||||
resStr, err := cast.ToStringE(data[i+2])
|
||||
if err != nil {
|
||||
return nil, errors.E(errors.Internal, fmt.Errorf("cast error: Lua script returned unexpected value: %v", res))
|
||||
}
|
||||
msg, err := base.DecodeMessage([]byte(s))
|
||||
if err != nil {
|
||||
continue // bad data, ignore and continue
|
||||
}
|
||||
zs = append(zs, base.Z{Message: msg, Score: score})
|
||||
var nextProcessAt time.Time
|
||||
if state == base.TaskStateScheduled || state == base.TaskStateRetry {
|
||||
nextProcessAt = time.Unix(score, 0)
|
||||
}
|
||||
var resBytes []byte
|
||||
if len(resStr) > 0 {
|
||||
resBytes = []byte(resStr)
|
||||
}
|
||||
infos = append(infos, &base.TaskInfo{
|
||||
Message: msg,
|
||||
State: state,
|
||||
NextProcessAt: nextProcessAt,
|
||||
Result: resBytes,
|
||||
})
|
||||
}
|
||||
return zs, nil
|
||||
return infos, nil
|
||||
}
|
||||
|
||||
// RunAllScheduledTasks enqueues all scheduled tasks from the given queue
|
||||
@@ -703,20 +842,20 @@ return 1
|
||||
// If a queue with the given name doesn't exist, it returns QueueNotFoundError.
|
||||
// If a task with the given id doesn't exist in the queue, it returns TaskNotFoundError
|
||||
// If a task is in active or pending state it returns non-nil error with Code FailedPrecondition.
|
||||
func (r *RDB) RunTask(qname string, id uuid.UUID) error {
|
||||
func (r *RDB) RunTask(qname, id string) error {
|
||||
var op errors.Op = "rdb.RunTask"
|
||||
if err := r.checkQueueExists(qname); err != nil {
|
||||
return errors.E(op, errors.CanonicalCode(err), err)
|
||||
}
|
||||
keys := []string{
|
||||
base.TaskKey(qname, id.String()),
|
||||
base.TaskKey(qname, id),
|
||||
base.PendingKey(qname),
|
||||
}
|
||||
argv := []interface{}{
|
||||
id.String(),
|
||||
id,
|
||||
base.QueueKeyPrefix(qname),
|
||||
}
|
||||
res, err := runTaskCmd.Run(r.client, keys, argv...).Result()
|
||||
res, err := runTaskCmd.Run(context.Background(), r.client, keys, argv...).Result()
|
||||
if err != nil {
|
||||
return errors.E(op, errors.Unknown, err)
|
||||
}
|
||||
@@ -728,7 +867,7 @@ func (r *RDB) RunTask(qname string, id uuid.UUID) error {
|
||||
case 1:
|
||||
return nil
|
||||
case 0:
|
||||
return errors.E(op, errors.NotFound, &errors.TaskNotFoundError{Queue: qname, ID: id.String()})
|
||||
return errors.E(op, errors.NotFound, &errors.TaskNotFoundError{Queue: qname, ID: id})
|
||||
case -1:
|
||||
return errors.E(op, errors.FailedPrecondition, "task is already running")
|
||||
case -2:
|
||||
@@ -769,7 +908,7 @@ func (r *RDB) runAll(zset, qname string) (int64, error) {
|
||||
argv := []interface{}{
|
||||
base.TaskKeyPrefix(qname),
|
||||
}
|
||||
res, err := runAllCmd.Run(r.client, keys, argv...).Result()
|
||||
res, err := runAllCmd.Run(context.Background(), r.client, keys, argv...).Result()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@@ -857,7 +996,7 @@ func (r *RDB) ArchiveAllPendingTasks(qname string) (int64, error) {
|
||||
maxArchiveSize,
|
||||
base.TaskKeyPrefix(qname),
|
||||
}
|
||||
res, err := archiveAllPendingCmd.Run(r.client, keys, argv...).Result()
|
||||
res, err := archiveAllPendingCmd.Run(context.Background(), r.client, keys, argv...).Result()
|
||||
if err != nil {
|
||||
return 0, errors.E(op, errors.Internal, err)
|
||||
}
|
||||
@@ -921,24 +1060,24 @@ return 1
|
||||
// If a task with the given id doesn't exist in the queue, it returns TaskNotFoundError
|
||||
// If a task is already archived, it returns TaskAlreadyArchivedError.
|
||||
// If a task is in active state it returns non-nil error with FailedPrecondition code.
|
||||
func (r *RDB) ArchiveTask(qname string, id uuid.UUID) error {
|
||||
func (r *RDB) ArchiveTask(qname, id string) error {
|
||||
var op errors.Op = "rdb.ArchiveTask"
|
||||
if err := r.checkQueueExists(qname); err != nil {
|
||||
return errors.E(op, errors.CanonicalCode(err), err)
|
||||
}
|
||||
keys := []string{
|
||||
base.TaskKey(qname, id.String()),
|
||||
base.TaskKey(qname, id),
|
||||
base.ArchivedKey(qname),
|
||||
}
|
||||
now := time.Now()
|
||||
argv := []interface{}{
|
||||
id.String(),
|
||||
id,
|
||||
now.Unix(),
|
||||
now.AddDate(0, 0, -archivedExpirationInDays).Unix(),
|
||||
maxArchiveSize,
|
||||
base.QueueKeyPrefix(qname),
|
||||
}
|
||||
res, err := archiveTaskCmd.Run(r.client, keys, argv...).Result()
|
||||
res, err := archiveTaskCmd.Run(context.Background(), r.client, keys, argv...).Result()
|
||||
if err != nil {
|
||||
return errors.E(op, errors.Unknown, err)
|
||||
}
|
||||
@@ -950,9 +1089,9 @@ func (r *RDB) ArchiveTask(qname string, id uuid.UUID) error {
|
||||
case 1:
|
||||
return nil
|
||||
case 0:
|
||||
return errors.E(op, errors.NotFound, &errors.TaskNotFoundError{Queue: qname, ID: id.String()})
|
||||
return errors.E(op, errors.NotFound, &errors.TaskNotFoundError{Queue: qname, ID: id})
|
||||
case -1:
|
||||
return errors.E(op, errors.FailedPrecondition, &errors.TaskAlreadyArchivedError{Queue: qname, ID: id.String()})
|
||||
return errors.E(op, errors.FailedPrecondition, &errors.TaskAlreadyArchivedError{Queue: qname, ID: id})
|
||||
case -2:
|
||||
return errors.E(op, errors.FailedPrecondition, "cannot archive task in active state. use CancelTask instead.")
|
||||
case -3:
|
||||
@@ -1003,7 +1142,7 @@ func (r *RDB) archiveAll(src, dst, qname string) (int64, error) {
|
||||
base.TaskKeyPrefix(qname),
|
||||
qname,
|
||||
}
|
||||
res, err := archiveAllCmd.Run(r.client, keys, argv...).Result()
|
||||
res, err := archiveAllCmd.Run(context.Background(), r.client, keys, argv...).Result()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@@ -1058,19 +1197,19 @@ return redis.call("DEL", KEYS[1])
|
||||
// If a queue with the given name doesn't exist, it returns QueueNotFoundError.
|
||||
// If a task with the given id doesn't exist in the queue, it returns TaskNotFoundError
|
||||
// If a task is in active state it returns non-nil error with Code FailedPrecondition.
|
||||
func (r *RDB) DeleteTask(qname string, id uuid.UUID) error {
|
||||
func (r *RDB) DeleteTask(qname, id string) error {
|
||||
var op errors.Op = "rdb.DeleteTask"
|
||||
if err := r.checkQueueExists(qname); err != nil {
|
||||
return errors.E(op, errors.CanonicalCode(err), err)
|
||||
}
|
||||
keys := []string{
|
||||
base.TaskKey(qname, id.String()),
|
||||
base.TaskKey(qname, id),
|
||||
}
|
||||
argv := []interface{}{
|
||||
id.String(),
|
||||
id,
|
||||
base.QueueKeyPrefix(qname),
|
||||
}
|
||||
res, err := deleteTaskCmd.Run(r.client, keys, argv...).Result()
|
||||
res, err := deleteTaskCmd.Run(context.Background(), r.client, keys, argv...).Result()
|
||||
if err != nil {
|
||||
return errors.E(op, errors.Unknown, err)
|
||||
}
|
||||
@@ -1082,7 +1221,7 @@ func (r *RDB) DeleteTask(qname string, id uuid.UUID) error {
|
||||
case 1:
|
||||
return nil
|
||||
case 0:
|
||||
return errors.E(op, errors.NotFound, &errors.TaskNotFoundError{Queue: qname, ID: id.String()})
|
||||
return errors.E(op, errors.NotFound, &errors.TaskNotFoundError{Queue: qname, ID: id})
|
||||
case -1:
|
||||
return errors.E(op, errors.FailedPrecondition, "cannot delete task in active state. use CancelTask instead.")
|
||||
default:
|
||||
@@ -1132,6 +1271,20 @@ func (r *RDB) DeleteAllScheduledTasks(qname string) (int64, error) {
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// DeleteAllCompletedTasks deletes all completed tasks from the given queue
|
||||
// and returns the number of tasks deleted.
|
||||
func (r *RDB) DeleteAllCompletedTasks(qname string) (int64, error) {
|
||||
var op errors.Op = "rdb.DeleteAllCompletedTasks"
|
||||
n, err := r.deleteAll(base.CompletedKey(qname), qname)
|
||||
if errors.IsQueueNotFound(err) {
|
||||
return 0, errors.E(op, errors.NotFound, err)
|
||||
}
|
||||
if err != nil {
|
||||
return 0, errors.E(op, errors.Unknown, err)
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// deleteAllCmd deletes tasks from the given zset.
|
||||
//
|
||||
// Input:
|
||||
@@ -1162,7 +1315,7 @@ func (r *RDB) deleteAll(key, qname string) (int64, error) {
|
||||
base.TaskKeyPrefix(qname),
|
||||
qname,
|
||||
}
|
||||
res, err := deleteAllCmd.Run(r.client, []string{key}, argv...).Result()
|
||||
res, err := deleteAllCmd.Run(context.Background(), r.client, []string{key}, argv...).Result()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@@ -1203,7 +1356,7 @@ func (r *RDB) DeleteAllPendingTasks(qname string) (int64, error) {
|
||||
argv := []interface{}{
|
||||
base.TaskKeyPrefix(qname),
|
||||
}
|
||||
res, err := deleteAllPendingCmd.Run(r.client, keys, argv...).Result()
|
||||
res, err := deleteAllPendingCmd.Run(context.Background(), r.client, keys, argv...).Result()
|
||||
if err != nil {
|
||||
return 0, errors.E(op, errors.Unknown, err)
|
||||
}
|
||||
@@ -1334,7 +1487,7 @@ return 1`)
|
||||
// the queue is empty.
|
||||
func (r *RDB) RemoveQueue(qname string, force bool) error {
|
||||
var op errors.Op = "rdb.RemoveQueue"
|
||||
exists, err := r.client.SIsMember(base.AllQueues, qname).Result()
|
||||
exists, err := r.queueExists(qname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1355,7 +1508,7 @@ func (r *RDB) RemoveQueue(qname string, force bool) error {
|
||||
base.ArchivedKey(qname),
|
||||
base.DeadlinesKey(qname),
|
||||
}
|
||||
res, err := script.Run(r.client, keys, base.TaskKeyPrefix(qname)).Result()
|
||||
res, err := script.Run(context.Background(), r.client, keys, base.TaskKeyPrefix(qname)).Result()
|
||||
if err != nil {
|
||||
return errors.E(op, errors.Unknown, err)
|
||||
}
|
||||
@@ -1365,7 +1518,7 @@ func (r *RDB) RemoveQueue(qname string, force bool) error {
|
||||
}
|
||||
switch n {
|
||||
case 1:
|
||||
if err := r.client.SRem(base.AllQueues, qname).Err(); err != nil {
|
||||
if err := r.client.SRem(context.Background(), base.AllQueues, qname).Err(); err != nil {
|
||||
return errors.E(op, errors.Unknown, err)
|
||||
}
|
||||
return nil
|
||||
@@ -1388,7 +1541,7 @@ return keys`)
|
||||
// ListServers returns the list of server info.
|
||||
func (r *RDB) ListServers() ([]*base.ServerInfo, error) {
|
||||
now := time.Now()
|
||||
res, err := listServerKeysCmd.Run(r.client, []string{base.AllServers}, now.Unix()).Result()
|
||||
res, err := listServerKeysCmd.Run(context.Background(), r.client, []string{base.AllServers}, now.Unix()).Result()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1398,7 +1551,7 @@ func (r *RDB) ListServers() ([]*base.ServerInfo, error) {
|
||||
}
|
||||
var servers []*base.ServerInfo
|
||||
for _, key := range keys {
|
||||
data, err := r.client.Get(key).Result()
|
||||
data, err := r.client.Get(context.Background(), key).Result()
|
||||
if err != nil {
|
||||
continue // skip bad data
|
||||
}
|
||||
@@ -1422,7 +1575,7 @@ return keys`)
|
||||
func (r *RDB) ListWorkers() ([]*base.WorkerInfo, error) {
|
||||
var op errors.Op = "rdb.ListWorkers"
|
||||
now := time.Now()
|
||||
res, err := listWorkersCmd.Run(r.client, []string{base.AllWorkers}, now.Unix()).Result()
|
||||
res, err := listWorkersCmd.Run(context.Background(), r.client, []string{base.AllWorkers}, now.Unix()).Result()
|
||||
if err != nil {
|
||||
return nil, errors.E(op, errors.Unknown, err)
|
||||
}
|
||||
@@ -1432,7 +1585,7 @@ func (r *RDB) ListWorkers() ([]*base.WorkerInfo, error) {
|
||||
}
|
||||
var workers []*base.WorkerInfo
|
||||
for _, key := range keys {
|
||||
data, err := r.client.HVals(key).Result()
|
||||
data, err := r.client.HVals(context.Background(), key).Result()
|
||||
if err != nil {
|
||||
continue // skip bad data
|
||||
}
|
||||
@@ -1457,7 +1610,7 @@ return keys`)
|
||||
// ListSchedulerEntries returns the list of scheduler entries.
|
||||
func (r *RDB) ListSchedulerEntries() ([]*base.SchedulerEntry, error) {
|
||||
now := time.Now()
|
||||
res, err := listSchedulerKeysCmd.Run(r.client, []string{base.AllSchedulers}, now.Unix()).Result()
|
||||
res, err := listSchedulerKeysCmd.Run(context.Background(), r.client, []string{base.AllSchedulers}, now.Unix()).Result()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1467,7 +1620,7 @@ func (r *RDB) ListSchedulerEntries() ([]*base.SchedulerEntry, error) {
|
||||
}
|
||||
var entries []*base.SchedulerEntry
|
||||
for _, key := range keys {
|
||||
data, err := r.client.LRange(key, 0, -1).Result()
|
||||
data, err := r.client.LRange(context.Background(), key, 0, -1).Result()
|
||||
if err != nil {
|
||||
continue // skip bad data
|
||||
}
|
||||
@@ -1485,7 +1638,7 @@ func (r *RDB) ListSchedulerEntries() ([]*base.SchedulerEntry, error) {
|
||||
// ListSchedulerEnqueueEvents returns the list of scheduler enqueue events.
|
||||
func (r *RDB) ListSchedulerEnqueueEvents(entryID string, pgn Pagination) ([]*base.SchedulerEnqueueEvent, error) {
|
||||
key := base.SchedulerHistoryKey(entryID)
|
||||
zs, err := r.client.ZRevRangeWithScores(key, pgn.start(), pgn.stop()).Result()
|
||||
zs, err := r.client.ZRevRangeWithScores(context.Background(), key, pgn.start(), pgn.stop()).Result()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1507,7 +1660,7 @@ func (r *RDB) ListSchedulerEnqueueEvents(entryID string, pgn Pagination) ([]*bas
|
||||
// Pause pauses processing of tasks from the given queue.
|
||||
func (r *RDB) Pause(qname string) error {
|
||||
key := base.PausedKey(qname)
|
||||
ok, err := r.client.SetNX(key, time.Now().Unix(), 0).Result()
|
||||
ok, err := r.client.SetNX(context.Background(), key, time.Now().Unix(), 0).Result()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1520,7 +1673,7 @@ func (r *RDB) Pause(qname string) error {
|
||||
// Unpause resumes processing of tasks from the given queue.
|
||||
func (r *RDB) Unpause(qname string) error {
|
||||
key := base.PausedKey(qname)
|
||||
deleted, err := r.client.Del(key).Result()
|
||||
deleted, err := r.client.Del(context.Background(), key).Result()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1533,7 +1686,7 @@ func (r *RDB) Unpause(qname string) error {
|
||||
// ClusterKeySlot returns an integer identifying the hash slot the given queue hashes to.
|
||||
func (r *RDB) ClusterKeySlot(qname string) (int64, error) {
|
||||
key := base.PendingKey(qname)
|
||||
return r.client.ClusterKeySlot(key).Result()
|
||||
return r.client.ClusterKeySlot(context.Background(), key).Result()
|
||||
}
|
||||
|
||||
// ClusterNodes returns a list of nodes the given queue belongs to.
|
||||
@@ -1542,7 +1695,7 @@ func (r *RDB) ClusterNodes(qname string) ([]redis.ClusterNode, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
clusterSlots, err := r.client.ClusterSlots().Result()
|
||||
clusterSlots, err := r.client.ClusterSlots(context.Background()).Result()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -6,12 +6,14 @@
|
||||
package rdb
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/go-redis/redis/v7"
|
||||
"github.com/go-redis/redis/v8"
|
||||
"github.com/hibiken/asynq/internal/base"
|
||||
"github.com/hibiken/asynq/internal/errors"
|
||||
"github.com/hibiken/asynq/internal/timeutil"
|
||||
"github.com/spf13/cast"
|
||||
)
|
||||
|
||||
@@ -20,11 +22,15 @@ const statsTTL = 90 * 24 * time.Hour // 90 days
|
||||
// RDB is a client interface to query and mutate task queues.
|
||||
type RDB struct {
|
||||
client redis.UniversalClient
|
||||
clock timeutil.Clock
|
||||
}
|
||||
|
||||
// NewRDB returns a new instance of RDB.
|
||||
func NewRDB(client redis.UniversalClient) *RDB {
|
||||
return &RDB{client}
|
||||
return &RDB{
|
||||
client: client,
|
||||
clock: timeutil.NewRealClock(),
|
||||
}
|
||||
}
|
||||
|
||||
// Close closes the connection with redis server.
|
||||
@@ -37,18 +43,38 @@ func (r *RDB) Client() redis.UniversalClient {
|
||||
return r.client
|
||||
}
|
||||
|
||||
// Ping checks the connection with redis server.
|
||||
func (r *RDB) Ping() error {
|
||||
return r.client.Ping().Err()
|
||||
// SetClock sets the clock used by RDB to the given clock.
|
||||
//
|
||||
// Use this function to set the clock to SimulatedClock in tests.
|
||||
func (r *RDB) SetClock(c timeutil.Clock) {
|
||||
r.clock = c
|
||||
}
|
||||
|
||||
func (r *RDB) runScript(op errors.Op, script *redis.Script, keys []string, args ...interface{}) error {
|
||||
if err := script.Run(r.client, keys, args...).Err(); err != nil {
|
||||
// Ping checks the connection with redis server.
|
||||
func (r *RDB) Ping() error {
|
||||
return r.client.Ping(context.Background()).Err()
|
||||
}
|
||||
|
||||
func (r *RDB) runScript(ctx context.Context, op errors.Op, script *redis.Script, keys []string, args ...interface{}) error {
|
||||
if err := script.Run(ctx, r.client, keys, args...).Err(); err != nil {
|
||||
return errors.E(op, errors.Internal, fmt.Sprintf("redis eval error: %v", err))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Runs the given script with keys and args and retuns the script's return value as int64.
|
||||
func (r *RDB) runScriptWithErrorCode(ctx context.Context, op errors.Op, script *redis.Script, keys []string, args ...interface{}) (int64, error) {
|
||||
res, err := script.Run(ctx, r.client, keys, args...).Result()
|
||||
if err != nil {
|
||||
return 0, errors.E(op, errors.Unknown, fmt.Sprintf("redis eval error: %v", err))
|
||||
}
|
||||
n, ok := res.(int64)
|
||||
if !ok {
|
||||
return 0, errors.E(op, errors.Internal, fmt.Sprintf("unexpected return value from Lua script: %v", res))
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// enqueueCmd enqueues a given task message.
|
||||
//
|
||||
// Input:
|
||||
@@ -59,40 +85,54 @@ func (r *RDB) runScript(op errors.Op, script *redis.Script, keys []string, args
|
||||
// ARGV[2] -> task ID
|
||||
// ARGV[3] -> task timeout in seconds (0 if not timeout)
|
||||
// ARGV[4] -> task deadline in unix time (0 if no deadline)
|
||||
// ARGV[5] -> current unix time in nsec
|
||||
//
|
||||
// Output:
|
||||
// Returns 1 if successfully enqueued
|
||||
// Returns 0 if task ID already exists
|
||||
var enqueueCmd = redis.NewScript(`
|
||||
if redis.call("EXISTS", KEYS[1]) == 1 then
|
||||
return 0
|
||||
end
|
||||
redis.call("HSET", KEYS[1],
|
||||
"msg", ARGV[1],
|
||||
"state", "pending",
|
||||
"timeout", ARGV[3],
|
||||
"deadline", ARGV[4])
|
||||
"deadline", ARGV[4],
|
||||
"pending_since", ARGV[5])
|
||||
redis.call("LPUSH", KEYS[2], ARGV[2])
|
||||
return 1
|
||||
`)
|
||||
|
||||
// Enqueue adds the given task to the pending list of the queue.
|
||||
func (r *RDB) Enqueue(msg *base.TaskMessage) error {
|
||||
func (r *RDB) Enqueue(ctx context.Context, msg *base.TaskMessage) error {
|
||||
var op errors.Op = "rdb.Enqueue"
|
||||
encoded, err := base.EncodeMessage(msg)
|
||||
if err != nil {
|
||||
return errors.E(op, errors.Unknown, fmt.Sprintf("cannot encode message: %v", err))
|
||||
}
|
||||
if err := r.client.SAdd(base.AllQueues, msg.Queue).Err(); err != nil {
|
||||
if err := r.client.SAdd(ctx, base.AllQueues, msg.Queue).Err(); err != nil {
|
||||
return errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "sadd", Err: err})
|
||||
}
|
||||
keys := []string{
|
||||
base.TaskKey(msg.Queue, msg.ID.String()),
|
||||
base.TaskKey(msg.Queue, msg.ID),
|
||||
base.PendingKey(msg.Queue),
|
||||
}
|
||||
argv := []interface{}{
|
||||
encoded,
|
||||
msg.ID.String(),
|
||||
msg.ID,
|
||||
msg.Timeout,
|
||||
msg.Deadline,
|
||||
r.clock.Now().UnixNano(),
|
||||
}
|
||||
return r.runScript(op, enqueueCmd, keys, argv...)
|
||||
n, err := r.runScriptWithErrorCode(ctx, op, enqueueCmd, keys, argv...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if n == 0 {
|
||||
return errors.E(op, errors.AlreadyExists, errors.ErrTaskIdConflict)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// enqueueUniqueCmd enqueues the task message if the task is unique.
|
||||
@@ -106,13 +146,18 @@ func (r *RDB) Enqueue(msg *base.TaskMessage) error {
|
||||
// ARGV[3] -> task message data
|
||||
// ARGV[4] -> task timeout in seconds (0 if not timeout)
|
||||
// ARGV[5] -> task deadline in unix time (0 if no deadline)
|
||||
// ARGV[6] -> current unix time in nsec
|
||||
//
|
||||
// Output:
|
||||
// Returns 1 if successfully enqueued
|
||||
// Returns 0 if task already exists
|
||||
// Returns 0 if task ID conflicts with another task
|
||||
// Returns -1 if task unique key already exists
|
||||
var enqueueUniqueCmd = redis.NewScript(`
|
||||
local ok = redis.call("SET", KEYS[1], ARGV[1], "NX", "EX", ARGV[2])
|
||||
if not ok then
|
||||
return -1
|
||||
end
|
||||
if redis.call("EXISTS", KEYS[2]) == 1 then
|
||||
return 0
|
||||
end
|
||||
redis.call("HSET", KEYS[2],
|
||||
@@ -120,6 +165,7 @@ redis.call("HSET", KEYS[2],
|
||||
"state", "pending",
|
||||
"timeout", ARGV[4],
|
||||
"deadline", ARGV[5],
|
||||
"pending_since", ARGV[6],
|
||||
"unique_key", KEYS[1])
|
||||
redis.call("LPUSH", KEYS[3], ARGV[1])
|
||||
return 1
|
||||
@@ -127,37 +173,37 @@ return 1
|
||||
|
||||
// EnqueueUnique inserts the given task if the task's uniqueness lock can be acquired.
|
||||
// It returns ErrDuplicateTask if the lock cannot be acquired.
|
||||
func (r *RDB) EnqueueUnique(msg *base.TaskMessage, ttl time.Duration) error {
|
||||
func (r *RDB) EnqueueUnique(ctx context.Context, msg *base.TaskMessage, ttl time.Duration) error {
|
||||
var op errors.Op = "rdb.EnqueueUnique"
|
||||
encoded, err := base.EncodeMessage(msg)
|
||||
if err != nil {
|
||||
return errors.E(op, errors.Internal, "cannot encode task message: %v", err)
|
||||
}
|
||||
if err := r.client.SAdd(base.AllQueues, msg.Queue).Err(); err != nil {
|
||||
if err := r.client.SAdd(ctx, base.AllQueues, msg.Queue).Err(); err != nil {
|
||||
return errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "sadd", Err: err})
|
||||
}
|
||||
keys := []string{
|
||||
msg.UniqueKey,
|
||||
base.TaskKey(msg.Queue, msg.ID.String()),
|
||||
base.TaskKey(msg.Queue, msg.ID),
|
||||
base.PendingKey(msg.Queue),
|
||||
}
|
||||
argv := []interface{}{
|
||||
msg.ID.String(),
|
||||
msg.ID,
|
||||
int(ttl.Seconds()),
|
||||
encoded,
|
||||
msg.Timeout,
|
||||
msg.Deadline,
|
||||
r.clock.Now().UnixNano(),
|
||||
}
|
||||
res, err := enqueueUniqueCmd.Run(r.client, keys, argv...).Result()
|
||||
n, err := r.runScriptWithErrorCode(ctx, op, enqueueUniqueCmd, keys, argv...)
|
||||
if err != nil {
|
||||
return errors.E(op, errors.Unknown, fmt.Sprintf("redis eval error: %v", err))
|
||||
return err
|
||||
}
|
||||
n, ok := res.(int64)
|
||||
if !ok {
|
||||
return errors.E(op, errors.Internal, fmt.Sprintf("unexpected return value from Lua script: %v", res))
|
||||
if n == -1 {
|
||||
return errors.E(op, errors.AlreadyExists, errors.ErrDuplicateTask)
|
||||
}
|
||||
if n == 0 {
|
||||
return errors.E(op, errors.AlreadyExists, errors.ErrDuplicateTask)
|
||||
return errors.E(op, errors.AlreadyExists, errors.ErrTaskIdConflict)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -186,6 +232,7 @@ if redis.call("EXISTS", KEYS[2]) == 0 then
|
||||
if id then
|
||||
local key = ARGV[2] .. id
|
||||
redis.call("HSET", key, "state", "active")
|
||||
redis.call("HDEL", key, "pending_since")
|
||||
local data = redis.call("HMGET", key, "msg", "timeout", "deadline")
|
||||
local msg = data[1]
|
||||
local timeout = tonumber(data[2])
|
||||
@@ -220,10 +267,10 @@ func (r *RDB) Dequeue(qnames ...string) (msg *base.TaskMessage, deadline time.Ti
|
||||
base.DeadlinesKey(qname),
|
||||
}
|
||||
argv := []interface{}{
|
||||
time.Now().Unix(),
|
||||
r.clock.Now().Unix(),
|
||||
base.TaskKeyPrefix(qname),
|
||||
}
|
||||
res, err := dequeueCmd.Run(r.client, keys, argv...).Result()
|
||||
res, err := dequeueCmd.Run(context.Background(), r.client, keys, argv...).Result()
|
||||
if err == redis.Nil {
|
||||
continue
|
||||
} else if err != nil {
|
||||
@@ -302,27 +349,120 @@ end
|
||||
return redis.status_reply("OK")
|
||||
`)
|
||||
|
||||
// Done removes the task from active queue to mark the task as done.
|
||||
// Done removes the task from active queue and deletes the task.
|
||||
// It removes a uniqueness lock acquired by the task, if any.
|
||||
func (r *RDB) Done(msg *base.TaskMessage) error {
|
||||
var op errors.Op = "rdb.Done"
|
||||
now := time.Now()
|
||||
ctx := context.Background()
|
||||
now := r.clock.Now()
|
||||
expireAt := now.Add(statsTTL)
|
||||
keys := []string{
|
||||
base.ActiveKey(msg.Queue),
|
||||
base.DeadlinesKey(msg.Queue),
|
||||
base.TaskKey(msg.Queue, msg.ID.String()),
|
||||
base.TaskKey(msg.Queue, msg.ID),
|
||||
base.ProcessedKey(msg.Queue, now),
|
||||
}
|
||||
argv := []interface{}{
|
||||
msg.ID.String(),
|
||||
msg.ID,
|
||||
expireAt.Unix(),
|
||||
}
|
||||
// Note: We cannot pass empty unique key when running this script in redis-cluster.
|
||||
if len(msg.UniqueKey) > 0 {
|
||||
keys = append(keys, msg.UniqueKey)
|
||||
return r.runScript(op, doneUniqueCmd, keys, argv...)
|
||||
return r.runScript(ctx, op, doneUniqueCmd, keys, argv...)
|
||||
}
|
||||
return r.runScript(op, doneCmd, keys, argv...)
|
||||
return r.runScript(ctx, op, doneCmd, keys, argv...)
|
||||
}
|
||||
|
||||
// KEYS[1] -> asynq:{<qname>}:active
|
||||
// KEYS[2] -> asynq:{<qname>}:deadlines
|
||||
// KEYS[3] -> asynq:{<qname>}:completed
|
||||
// KEYS[4] -> asynq:{<qname>}:t:<task_id>
|
||||
// KEYS[5] -> asynq:{<qname>}:processed:<yyyy-mm-dd>
|
||||
// ARGV[1] -> task ID
|
||||
// ARGV[2] -> stats expiration timestamp
|
||||
// ARGV[3] -> task exipration time in unix time
|
||||
// ARGV[4] -> task message data
|
||||
var markAsCompleteCmd = redis.NewScript(`
|
||||
if redis.call("LREM", KEYS[1], 0, ARGV[1]) == 0 then
|
||||
return redis.error_reply("NOT FOUND")
|
||||
end
|
||||
if redis.call("ZREM", KEYS[2], ARGV[1]) == 0 then
|
||||
return redis.error_reply("NOT FOUND")
|
||||
end
|
||||
if redis.call("ZADD", KEYS[3], ARGV[3], ARGV[1]) ~= 1 then
|
||||
redis.redis.error_reply("INTERNAL")
|
||||
end
|
||||
redis.call("HSET", KEYS[4], "msg", ARGV[4], "state", "completed")
|
||||
local n = redis.call("INCR", KEYS[5])
|
||||
if tonumber(n) == 1 then
|
||||
redis.call("EXPIREAT", KEYS[5], ARGV[2])
|
||||
end
|
||||
return redis.status_reply("OK")
|
||||
`)
|
||||
|
||||
// KEYS[1] -> asynq:{<qname>}:active
|
||||
// KEYS[2] -> asynq:{<qname>}:deadlines
|
||||
// KEYS[3] -> asynq:{<qname>}:completed
|
||||
// KEYS[4] -> asynq:{<qname>}:t:<task_id>
|
||||
// KEYS[5] -> asynq:{<qname>}:processed:<yyyy-mm-dd>
|
||||
// KEYS[6] -> asynq:{<qname>}:unique:{<checksum>}
|
||||
// ARGV[1] -> task ID
|
||||
// ARGV[2] -> stats expiration timestamp
|
||||
// ARGV[3] -> task exipration time in unix time
|
||||
// ARGV[4] -> task message data
|
||||
var markAsCompleteUniqueCmd = redis.NewScript(`
|
||||
if redis.call("LREM", KEYS[1], 0, ARGV[1]) == 0 then
|
||||
return redis.error_reply("NOT FOUND")
|
||||
end
|
||||
if redis.call("ZREM", KEYS[2], ARGV[1]) == 0 then
|
||||
return redis.error_reply("NOT FOUND")
|
||||
end
|
||||
if redis.call("ZADD", KEYS[3], ARGV[3], ARGV[1]) ~= 1 then
|
||||
redis.redis.error_reply("INTERNAL")
|
||||
end
|
||||
redis.call("HSET", KEYS[4], "msg", ARGV[4], "state", "completed")
|
||||
local n = redis.call("INCR", KEYS[5])
|
||||
if tonumber(n) == 1 then
|
||||
redis.call("EXPIREAT", KEYS[5], ARGV[2])
|
||||
end
|
||||
if redis.call("GET", KEYS[6]) == ARGV[1] then
|
||||
redis.call("DEL", KEYS[6])
|
||||
end
|
||||
return redis.status_reply("OK")
|
||||
`)
|
||||
|
||||
// MarkAsComplete removes the task from active queue to mark the task as completed.
|
||||
// It removes a uniqueness lock acquired by the task, if any.
|
||||
func (r *RDB) MarkAsComplete(msg *base.TaskMessage) error {
|
||||
var op errors.Op = "rdb.MarkAsComplete"
|
||||
ctx := context.Background()
|
||||
now := r.clock.Now()
|
||||
statsExpireAt := now.Add(statsTTL)
|
||||
msg.CompletedAt = now.Unix()
|
||||
encoded, err := base.EncodeMessage(msg)
|
||||
if err != nil {
|
||||
return errors.E(op, errors.Unknown, fmt.Sprintf("cannot encode message: %v", err))
|
||||
}
|
||||
keys := []string{
|
||||
base.ActiveKey(msg.Queue),
|
||||
base.DeadlinesKey(msg.Queue),
|
||||
base.CompletedKey(msg.Queue),
|
||||
base.TaskKey(msg.Queue, msg.ID),
|
||||
base.ProcessedKey(msg.Queue, now),
|
||||
}
|
||||
argv := []interface{}{
|
||||
msg.ID,
|
||||
statsExpireAt.Unix(),
|
||||
now.Unix() + msg.Retention,
|
||||
encoded,
|
||||
}
|
||||
// Note: We cannot pass empty unique key when running this script in redis-cluster.
|
||||
if len(msg.UniqueKey) > 0 {
|
||||
keys = append(keys, msg.UniqueKey)
|
||||
return r.runScript(ctx, op, markAsCompleteUniqueCmd, keys, argv...)
|
||||
}
|
||||
return r.runScript(ctx, op, markAsCompleteCmd, keys, argv...)
|
||||
}
|
||||
|
||||
// KEYS[1] -> asynq:{<qname>}:active
|
||||
@@ -345,13 +485,14 @@ return redis.status_reply("OK")`)
|
||||
// Requeue moves the task from active queue to the specified queue.
|
||||
func (r *RDB) Requeue(msg *base.TaskMessage) error {
|
||||
var op errors.Op = "rdb.Requeue"
|
||||
ctx := context.Background()
|
||||
keys := []string{
|
||||
base.ActiveKey(msg.Queue),
|
||||
base.DeadlinesKey(msg.Queue),
|
||||
base.PendingKey(msg.Queue),
|
||||
base.TaskKey(msg.Queue, msg.ID.String()),
|
||||
base.TaskKey(msg.Queue, msg.ID),
|
||||
}
|
||||
return r.runScript(op, requeueCmd, keys, msg.ID.String())
|
||||
return r.runScript(ctx, op, requeueCmd, keys, msg.ID)
|
||||
}
|
||||
|
||||
// KEYS[1] -> asynq:{<qname>}:t:<task_id>
|
||||
@@ -361,7 +502,14 @@ func (r *RDB) Requeue(msg *base.TaskMessage) error {
|
||||
// ARGV[3] -> task ID
|
||||
// ARGV[4] -> task timeout in seconds (0 if not timeout)
|
||||
// ARGV[5] -> task deadline in unix time (0 if no deadline)
|
||||
//
|
||||
// Output:
|
||||
// Returns 1 if successfully enqueued
|
||||
// Returns 0 if task ID already exists
|
||||
var scheduleCmd = redis.NewScript(`
|
||||
if redis.call("EXISTS", KEYS[1]) == 1 then
|
||||
return 0
|
||||
end
|
||||
redis.call("HSET", KEYS[1],
|
||||
"msg", ARGV[1],
|
||||
"state", "scheduled",
|
||||
@@ -372,27 +520,34 @@ return 1
|
||||
`)
|
||||
|
||||
// Schedule adds the task to the scheduled set to be processed in the future.
|
||||
func (r *RDB) Schedule(msg *base.TaskMessage, processAt time.Time) error {
|
||||
func (r *RDB) Schedule(ctx context.Context, msg *base.TaskMessage, processAt time.Time) error {
|
||||
var op errors.Op = "rdb.Schedule"
|
||||
encoded, err := base.EncodeMessage(msg)
|
||||
if err != nil {
|
||||
return errors.E(op, errors.Unknown, fmt.Sprintf("cannot encode message: %v", err))
|
||||
}
|
||||
if err := r.client.SAdd(base.AllQueues, msg.Queue).Err(); err != nil {
|
||||
if err := r.client.SAdd(ctx, base.AllQueues, msg.Queue).Err(); err != nil {
|
||||
return errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "sadd", Err: err})
|
||||
}
|
||||
keys := []string{
|
||||
base.TaskKey(msg.Queue, msg.ID.String()),
|
||||
base.TaskKey(msg.Queue, msg.ID),
|
||||
base.ScheduledKey(msg.Queue),
|
||||
}
|
||||
argv := []interface{}{
|
||||
encoded,
|
||||
processAt.Unix(),
|
||||
msg.ID.String(),
|
||||
msg.ID,
|
||||
msg.Timeout,
|
||||
msg.Deadline,
|
||||
}
|
||||
return r.runScript(op, scheduleCmd, keys, argv...)
|
||||
n, err := r.runScriptWithErrorCode(ctx, op, scheduleCmd, keys, argv...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if n == 0 {
|
||||
return errors.E(op, errors.AlreadyExists, errors.ErrTaskIdConflict)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// KEYS[1] -> unique key
|
||||
@@ -404,9 +559,17 @@ func (r *RDB) Schedule(msg *base.TaskMessage, processAt time.Time) error {
|
||||
// ARGV[4] -> task message
|
||||
// ARGV[5] -> task timeout in seconds (0 if not timeout)
|
||||
// ARGV[6] -> task deadline in unix time (0 if no deadline)
|
||||
//
|
||||
// Output:
|
||||
// Returns 1 if successfully scheduled
|
||||
// Returns 0 if task ID already exists
|
||||
// Returns -1 if task unique key already exists
|
||||
var scheduleUniqueCmd = redis.NewScript(`
|
||||
local ok = redis.call("SET", KEYS[1], ARGV[1], "NX", "EX", ARGV[2])
|
||||
if not ok then
|
||||
return -1
|
||||
end
|
||||
if redis.call("EXISTS", KEYS[2]) == 1 then
|
||||
return 0
|
||||
end
|
||||
redis.call("HSET", KEYS[2],
|
||||
@@ -421,38 +584,37 @@ return 1
|
||||
|
||||
// ScheduleUnique adds the task to the backlog queue to be processed in the future if the uniqueness lock can be acquired.
|
||||
// It returns ErrDuplicateTask if the lock cannot be acquired.
|
||||
func (r *RDB) ScheduleUnique(msg *base.TaskMessage, processAt time.Time, ttl time.Duration) error {
|
||||
func (r *RDB) ScheduleUnique(ctx context.Context, msg *base.TaskMessage, processAt time.Time, ttl time.Duration) error {
|
||||
var op errors.Op = "rdb.ScheduleUnique"
|
||||
encoded, err := base.EncodeMessage(msg)
|
||||
if err != nil {
|
||||
return errors.E(op, errors.Internal, fmt.Sprintf("cannot encode task message: %v", err))
|
||||
}
|
||||
if err := r.client.SAdd(base.AllQueues, msg.Queue).Err(); err != nil {
|
||||
if err := r.client.SAdd(ctx, base.AllQueues, msg.Queue).Err(); err != nil {
|
||||
return errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "sadd", Err: err})
|
||||
}
|
||||
keys := []string{
|
||||
msg.UniqueKey,
|
||||
base.TaskKey(msg.Queue, msg.ID.String()),
|
||||
base.TaskKey(msg.Queue, msg.ID),
|
||||
base.ScheduledKey(msg.Queue),
|
||||
}
|
||||
argv := []interface{}{
|
||||
msg.ID.String(),
|
||||
msg.ID,
|
||||
int(ttl.Seconds()),
|
||||
processAt.Unix(),
|
||||
encoded,
|
||||
msg.Timeout,
|
||||
msg.Deadline,
|
||||
}
|
||||
res, err := scheduleUniqueCmd.Run(r.client, keys, argv...).Result()
|
||||
n, err := r.runScriptWithErrorCode(ctx, op, scheduleUniqueCmd, keys, argv...)
|
||||
if err != nil {
|
||||
return errors.E(op, errors.Unknown, fmt.Sprintf("redis eval error: %v", err))
|
||||
return err
|
||||
}
|
||||
n, ok := res.(int64)
|
||||
if !ok {
|
||||
return errors.E(op, errors.Internal, fmt.Sprintf("cast error: unexpected return value from Lua script: %v", res))
|
||||
if n == -1 {
|
||||
return errors.E(op, errors.AlreadyExists, errors.ErrDuplicateTask)
|
||||
}
|
||||
if n == 0 {
|
||||
return errors.E(op, errors.AlreadyExists, errors.ErrDuplicateTask)
|
||||
return errors.E(op, errors.AlreadyExists, errors.ErrTaskIdConflict)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -467,6 +629,7 @@ func (r *RDB) ScheduleUnique(msg *base.TaskMessage, processAt time.Time, ttl tim
|
||||
// ARGV[2] -> updated base.TaskMessage value
|
||||
// ARGV[3] -> retry_at UNIX timestamp
|
||||
// ARGV[4] -> stats expiration timestamp
|
||||
// ARGV[5] -> is_failure (bool)
|
||||
var retryCmd = redis.NewScript(`
|
||||
if redis.call("LREM", KEYS[2], 0, ARGV[1]) == 0 then
|
||||
return redis.error_reply("NOT FOUND")
|
||||
@@ -476,23 +639,29 @@ if redis.call("ZREM", KEYS[3], ARGV[1]) == 0 then
|
||||
end
|
||||
redis.call("ZADD", KEYS[4], ARGV[3], ARGV[1])
|
||||
redis.call("HSET", KEYS[1], "msg", ARGV[2], "state", "retry")
|
||||
local n = redis.call("INCR", KEYS[5])
|
||||
if tonumber(n) == 1 then
|
||||
redis.call("EXPIREAT", KEYS[5], ARGV[4])
|
||||
end
|
||||
local m = redis.call("INCR", KEYS[6])
|
||||
if tonumber(m) == 1 then
|
||||
redis.call("EXPIREAT", KEYS[6], ARGV[4])
|
||||
if tonumber(ARGV[5]) == 1 then
|
||||
local n = redis.call("INCR", KEYS[5])
|
||||
if tonumber(n) == 1 then
|
||||
redis.call("EXPIREAT", KEYS[5], ARGV[4])
|
||||
end
|
||||
local m = redis.call("INCR", KEYS[6])
|
||||
if tonumber(m) == 1 then
|
||||
redis.call("EXPIREAT", KEYS[6], ARGV[4])
|
||||
end
|
||||
end
|
||||
return redis.status_reply("OK")`)
|
||||
|
||||
// Retry moves the task from active to retry queue, incrementing retry count
|
||||
// and assigning error message to the task message.
|
||||
func (r *RDB) Retry(msg *base.TaskMessage, processAt time.Time, errMsg string) error {
|
||||
// Retry moves the task from active to retry queue.
|
||||
// It also annotates the message with the given error message and
|
||||
// if isFailure is true increments the retried counter.
|
||||
func (r *RDB) Retry(msg *base.TaskMessage, processAt time.Time, errMsg string, isFailure bool) error {
|
||||
var op errors.Op = "rdb.Retry"
|
||||
now := time.Now()
|
||||
ctx := context.Background()
|
||||
now := r.clock.Now()
|
||||
modified := *msg
|
||||
modified.Retried++
|
||||
if isFailure {
|
||||
modified.Retried++
|
||||
}
|
||||
modified.ErrorMsg = errMsg
|
||||
modified.LastFailedAt = now.Unix()
|
||||
encoded, err := base.EncodeMessage(&modified)
|
||||
@@ -501,7 +670,7 @@ func (r *RDB) Retry(msg *base.TaskMessage, processAt time.Time, errMsg string) e
|
||||
}
|
||||
expireAt := now.Add(statsTTL)
|
||||
keys := []string{
|
||||
base.TaskKey(msg.Queue, msg.ID.String()),
|
||||
base.TaskKey(msg.Queue, msg.ID),
|
||||
base.ActiveKey(msg.Queue),
|
||||
base.DeadlinesKey(msg.Queue),
|
||||
base.RetryKey(msg.Queue),
|
||||
@@ -509,12 +678,13 @@ func (r *RDB) Retry(msg *base.TaskMessage, processAt time.Time, errMsg string) e
|
||||
base.FailedKey(msg.Queue, now),
|
||||
}
|
||||
argv := []interface{}{
|
||||
msg.ID.String(),
|
||||
msg.ID,
|
||||
encoded,
|
||||
processAt.Unix(),
|
||||
expireAt.Unix(),
|
||||
isFailure,
|
||||
}
|
||||
return r.runScript(op, retryCmd, keys, argv...)
|
||||
return r.runScript(ctx, op, retryCmd, keys, argv...)
|
||||
}
|
||||
|
||||
const (
|
||||
@@ -559,7 +729,8 @@ return redis.status_reply("OK")`)
|
||||
// It also trims the archive by timestamp and set size.
|
||||
func (r *RDB) Archive(msg *base.TaskMessage, errMsg string) error {
|
||||
var op errors.Op = "rdb.Archive"
|
||||
now := time.Now()
|
||||
ctx := context.Background()
|
||||
now := r.clock.Now()
|
||||
modified := *msg
|
||||
modified.ErrorMsg = errMsg
|
||||
modified.LastFailedAt = now.Unix()
|
||||
@@ -570,7 +741,7 @@ func (r *RDB) Archive(msg *base.TaskMessage, errMsg string) error {
|
||||
cutoff := now.AddDate(0, 0, -archivedExpirationInDays)
|
||||
expireAt := now.Add(statsTTL)
|
||||
keys := []string{
|
||||
base.TaskKey(msg.Queue, msg.ID.String()),
|
||||
base.TaskKey(msg.Queue, msg.ID),
|
||||
base.ActiveKey(msg.Queue),
|
||||
base.DeadlinesKey(msg.Queue),
|
||||
base.ArchivedKey(msg.Queue),
|
||||
@@ -578,14 +749,14 @@ func (r *RDB) Archive(msg *base.TaskMessage, errMsg string) error {
|
||||
base.FailedKey(msg.Queue, now),
|
||||
}
|
||||
argv := []interface{}{
|
||||
msg.ID.String(),
|
||||
msg.ID,
|
||||
encoded,
|
||||
now.Unix(),
|
||||
cutoff.Unix(),
|
||||
maxArchiveSize,
|
||||
expireAt.Unix(),
|
||||
}
|
||||
return r.runScript(op, archiveCmd, keys, argv...)
|
||||
return r.runScript(ctx, op, archiveCmd, keys, argv...)
|
||||
}
|
||||
|
||||
// ForwardIfReady checks scheduled and retry sets of the given queues
|
||||
@@ -602,23 +773,27 @@ func (r *RDB) ForwardIfReady(qnames ...string) error {
|
||||
|
||||
// KEYS[1] -> source queue (e.g. asynq:{<qname>:scheduled or asynq:{<qname>}:retry})
|
||||
// KEYS[2] -> asynq:{<qname>}:pending
|
||||
// ARGV[1] -> current unix time
|
||||
// ARGV[1] -> current unix time in seconds
|
||||
// ARGV[2] -> task key prefix
|
||||
// ARGV[3] -> current unix time in nsec
|
||||
// Note: Script moves tasks up to 100 at a time to keep the runtime of script short.
|
||||
var forwardCmd = redis.NewScript(`
|
||||
local ids = redis.call("ZRANGEBYSCORE", KEYS[1], "-inf", ARGV[1], "LIMIT", 0, 100)
|
||||
for _, id in ipairs(ids) do
|
||||
redis.call("LPUSH", KEYS[2], id)
|
||||
redis.call("ZREM", KEYS[1], id)
|
||||
redis.call("HSET", ARGV[2] .. id, "state", "pending")
|
||||
redis.call("HSET", ARGV[2] .. id,
|
||||
"state", "pending",
|
||||
"pending_since", ARGV[3])
|
||||
end
|
||||
return table.getn(ids)`)
|
||||
|
||||
// forward moves tasks with a score less than the current unix time
|
||||
// from the src zset to the dst list. It returns the number of tasks moved.
|
||||
func (r *RDB) forward(src, dst, taskKeyPrefix string) (int, error) {
|
||||
now := float64(time.Now().Unix())
|
||||
res, err := forwardCmd.Run(r.client, []string{src, dst}, now, taskKeyPrefix).Result()
|
||||
now := r.clock.Now()
|
||||
res, err := forwardCmd.Run(context.Background(), r.client,
|
||||
[]string{src, dst}, now.Unix(), taskKeyPrefix, now.UnixNano()).Result()
|
||||
if err != nil {
|
||||
return 0, errors.E(errors.Internal, fmt.Sprintf("redis eval error: %v", err))
|
||||
}
|
||||
@@ -647,6 +822,57 @@ func (r *RDB) forwardAll(qname string) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// KEYS[1] -> asynq:{<qname>}:completed
|
||||
// ARGV[1] -> current time in unix time
|
||||
// ARGV[2] -> task key prefix
|
||||
// ARGV[3] -> batch size (i.e. maximum number of tasks to delete)
|
||||
//
|
||||
// Returns the number of tasks deleted.
|
||||
var deleteExpiredCompletedTasksCmd = redis.NewScript(`
|
||||
local ids = redis.call("ZRANGEBYSCORE", KEYS[1], "-inf", ARGV[1], "LIMIT", 0, tonumber(ARGV[3]))
|
||||
for _, id in ipairs(ids) do
|
||||
redis.call("DEL", ARGV[2] .. id)
|
||||
redis.call("ZREM", KEYS[1], id)
|
||||
end
|
||||
return table.getn(ids)`)
|
||||
|
||||
// DeleteExpiredCompletedTasks checks for any expired tasks in the given queue's completed set,
|
||||
// and delete all expired tasks.
|
||||
func (r *RDB) DeleteExpiredCompletedTasks(qname string) error {
|
||||
// Note: Do this operation in fix batches to prevent long running script.
|
||||
const batchSize = 100
|
||||
for {
|
||||
n, err := r.deleteExpiredCompletedTasks(qname, batchSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if n == 0 {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// deleteExpiredCompletedTasks runs the lua script to delete expired deleted task with the specified
|
||||
// batch size. It reports the number of tasks deleted.
|
||||
func (r *RDB) deleteExpiredCompletedTasks(qname string, batchSize int) (int64, error) {
|
||||
var op errors.Op = "rdb.DeleteExpiredCompletedTasks"
|
||||
keys := []string{base.CompletedKey(qname)}
|
||||
argv := []interface{}{
|
||||
r.clock.Now().Unix(),
|
||||
base.TaskKeyPrefix(qname),
|
||||
batchSize,
|
||||
}
|
||||
res, err := deleteExpiredCompletedTasksCmd.Run(context.Background(), r.client, keys, argv...).Result()
|
||||
if err != nil {
|
||||
return 0, errors.E(op, errors.Internal, fmt.Sprintf("redis eval error: %v", err))
|
||||
}
|
||||
n, ok := res.(int64)
|
||||
if !ok {
|
||||
return 0, errors.E(op, errors.Internal, fmt.Sprintf("unexpected return value from Lua script: %v", res))
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// KEYS[1] -> asynq:{<qname>}:deadlines
|
||||
// ARGV[1] -> deadline in unix time
|
||||
// ARGV[2] -> task key prefix
|
||||
@@ -665,7 +891,7 @@ func (r *RDB) ListDeadlineExceeded(deadline time.Time, qnames ...string) ([]*bas
|
||||
var op errors.Op = "rdb.ListDeadlineExceeded"
|
||||
var msgs []*base.TaskMessage
|
||||
for _, qname := range qnames {
|
||||
res, err := listDeadlineExceededCmd.Run(r.client,
|
||||
res, err := listDeadlineExceededCmd.Run(context.Background(), r.client,
|
||||
[]string{base.DeadlinesKey(qname)},
|
||||
deadline.Unix(), base.TaskKeyPrefix(qname)).Result()
|
||||
if err != nil {
|
||||
@@ -705,11 +931,12 @@ return redis.status_reply("OK")`)
|
||||
// WriteServerState writes server state data to redis with expiration set to the value ttl.
|
||||
func (r *RDB) WriteServerState(info *base.ServerInfo, workers []*base.WorkerInfo, ttl time.Duration) error {
|
||||
var op errors.Op = "rdb.WriteServerState"
|
||||
ctx := context.Background()
|
||||
bytes, err := base.EncodeServerInfo(info)
|
||||
if err != nil {
|
||||
return errors.E(op, errors.Internal, fmt.Sprintf("cannot encode server info: %v", err))
|
||||
}
|
||||
exp := time.Now().Add(ttl).UTC()
|
||||
exp := r.clock.Now().Add(ttl).UTC()
|
||||
args := []interface{}{ttl.Seconds(), bytes} // args to the lua script
|
||||
for _, w := range workers {
|
||||
bytes, err := base.EncodeWorkerInfo(w)
|
||||
@@ -720,13 +947,13 @@ func (r *RDB) WriteServerState(info *base.ServerInfo, workers []*base.WorkerInfo
|
||||
}
|
||||
skey := base.ServerInfoKey(info.Host, info.PID, info.ServerID)
|
||||
wkey := base.WorkersKey(info.Host, info.PID, info.ServerID)
|
||||
if err := r.client.ZAdd(base.AllServers, &redis.Z{Score: float64(exp.Unix()), Member: skey}).Err(); err != nil {
|
||||
if err := r.client.ZAdd(ctx, base.AllServers, &redis.Z{Score: float64(exp.Unix()), Member: skey}).Err(); err != nil {
|
||||
return errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "sadd", Err: err})
|
||||
}
|
||||
if err := r.client.ZAdd(base.AllWorkers, &redis.Z{Score: float64(exp.Unix()), Member: wkey}).Err(); err != nil {
|
||||
if err := r.client.ZAdd(ctx, base.AllWorkers, &redis.Z{Score: float64(exp.Unix()), Member: wkey}).Err(); err != nil {
|
||||
return errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "zadd", Err: err})
|
||||
}
|
||||
return r.runScript(op, writeServerStateCmd, []string{skey, wkey}, args...)
|
||||
return r.runScript(ctx, op, writeServerStateCmd, []string{skey, wkey}, args...)
|
||||
}
|
||||
|
||||
// KEYS[1] -> asynq:servers:{<host:pid:sid>}
|
||||
@@ -739,15 +966,16 @@ return redis.status_reply("OK")`)
|
||||
// ClearServerState deletes server state data from redis.
|
||||
func (r *RDB) ClearServerState(host string, pid int, serverID string) error {
|
||||
var op errors.Op = "rdb.ClearServerState"
|
||||
ctx := context.Background()
|
||||
skey := base.ServerInfoKey(host, pid, serverID)
|
||||
wkey := base.WorkersKey(host, pid, serverID)
|
||||
if err := r.client.ZRem(base.AllServers, skey).Err(); err != nil {
|
||||
if err := r.client.ZRem(ctx, base.AllServers, skey).Err(); err != nil {
|
||||
return errors.E(op, errors.Internal, &errors.RedisCommandError{Command: "zrem", Err: err})
|
||||
}
|
||||
if err := r.client.ZRem(base.AllWorkers, wkey).Err(); err != nil {
|
||||
if err := r.client.ZRem(ctx, base.AllWorkers, wkey).Err(); err != nil {
|
||||
return errors.E(op, errors.Internal, &errors.RedisCommandError{Command: "zrem", Err: err})
|
||||
}
|
||||
return r.runScript(op, clearServerStateCmd, []string{skey, wkey})
|
||||
return r.runScript(ctx, op, clearServerStateCmd, []string{skey, wkey})
|
||||
}
|
||||
|
||||
// KEYS[1] -> asynq:schedulers:{<schedulerID>}
|
||||
@@ -764,6 +992,7 @@ return redis.status_reply("OK")`)
|
||||
// WriteSchedulerEntries writes scheduler entries data to redis with expiration set to the value ttl.
|
||||
func (r *RDB) WriteSchedulerEntries(schedulerID string, entries []*base.SchedulerEntry, ttl time.Duration) error {
|
||||
var op errors.Op = "rdb.WriteSchedulerEntries"
|
||||
ctx := context.Background()
|
||||
args := []interface{}{ttl.Seconds()}
|
||||
for _, e := range entries {
|
||||
bytes, err := base.EncodeSchedulerEntry(e)
|
||||
@@ -772,23 +1001,24 @@ func (r *RDB) WriteSchedulerEntries(schedulerID string, entries []*base.Schedule
|
||||
}
|
||||
args = append(args, bytes)
|
||||
}
|
||||
exp := time.Now().Add(ttl).UTC()
|
||||
exp := r.clock.Now().Add(ttl).UTC()
|
||||
key := base.SchedulerEntriesKey(schedulerID)
|
||||
err := r.client.ZAdd(base.AllSchedulers, &redis.Z{Score: float64(exp.Unix()), Member: key}).Err()
|
||||
err := r.client.ZAdd(ctx, base.AllSchedulers, &redis.Z{Score: float64(exp.Unix()), Member: key}).Err()
|
||||
if err != nil {
|
||||
return errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "zadd", Err: err})
|
||||
}
|
||||
return r.runScript(op, writeSchedulerEntriesCmd, []string{key}, args...)
|
||||
return r.runScript(ctx, op, writeSchedulerEntriesCmd, []string{key}, args...)
|
||||
}
|
||||
|
||||
// ClearSchedulerEntries deletes scheduler entries data from redis.
|
||||
func (r *RDB) ClearSchedulerEntries(scheduelrID string) error {
|
||||
var op errors.Op = "rdb.ClearSchedulerEntries"
|
||||
ctx := context.Background()
|
||||
key := base.SchedulerEntriesKey(scheduelrID)
|
||||
if err := r.client.ZRem(base.AllSchedulers, key).Err(); err != nil {
|
||||
if err := r.client.ZRem(ctx, base.AllSchedulers, key).Err(); err != nil {
|
||||
return errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "zrem", Err: err})
|
||||
}
|
||||
if err := r.client.Del(key).Err(); err != nil {
|
||||
if err := r.client.Del(ctx, key).Err(); err != nil {
|
||||
return errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "del", Err: err})
|
||||
}
|
||||
return nil
|
||||
@@ -797,8 +1027,9 @@ func (r *RDB) ClearSchedulerEntries(scheduelrID string) error {
|
||||
// CancelationPubSub returns a pubsub for cancelation messages.
|
||||
func (r *RDB) CancelationPubSub() (*redis.PubSub, error) {
|
||||
var op errors.Op = "rdb.CancelationPubSub"
|
||||
pubsub := r.client.Subscribe(base.CancelChannel)
|
||||
_, err := pubsub.Receive()
|
||||
ctx := context.Background()
|
||||
pubsub := r.client.Subscribe(ctx, base.CancelChannel)
|
||||
_, err := pubsub.Receive(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.E(op, errors.Unknown, fmt.Sprintf("redis pubsub receive error: %v", err))
|
||||
}
|
||||
@@ -809,7 +1040,8 @@ func (r *RDB) CancelationPubSub() (*redis.PubSub, error) {
|
||||
// The message is the ID for the task to be canceled.
|
||||
func (r *RDB) PublishCancelation(id string) error {
|
||||
var op errors.Op = "rdb.PublishCancelation"
|
||||
if err := r.client.Publish(base.CancelChannel, id).Err(); err != nil {
|
||||
ctx := context.Background()
|
||||
if err := r.client.Publish(ctx, base.CancelChannel, id).Err(); err != nil {
|
||||
return errors.E(op, errors.Unknown, fmt.Sprintf("redis pubsub publish error: %v", err))
|
||||
}
|
||||
return nil
|
||||
@@ -830,6 +1062,7 @@ const maxEvents = 1000
|
||||
// RecordSchedulerEnqueueEvent records the time when the given task was enqueued.
|
||||
func (r *RDB) RecordSchedulerEnqueueEvent(entryID string, event *base.SchedulerEnqueueEvent) error {
|
||||
var op errors.Op = "rdb.RecordSchedulerEnqueueEvent"
|
||||
ctx := context.Background()
|
||||
data, err := base.EncodeSchedulerEnqueueEvent(event)
|
||||
if err != nil {
|
||||
return errors.E(op, errors.Internal, fmt.Sprintf("cannot encode scheduler enqueue event: %v", err))
|
||||
@@ -842,15 +1075,27 @@ func (r *RDB) RecordSchedulerEnqueueEvent(entryID string, event *base.SchedulerE
|
||||
data,
|
||||
maxEvents,
|
||||
}
|
||||
return r.runScript(op, recordSchedulerEnqueueEventCmd, keys, argv...)
|
||||
return r.runScript(ctx, op, recordSchedulerEnqueueEventCmd, keys, argv...)
|
||||
}
|
||||
|
||||
// ClearSchedulerHistory deletes the enqueue event history for the given scheduler entry.
|
||||
func (r *RDB) ClearSchedulerHistory(entryID string) error {
|
||||
var op errors.Op = "rdb.ClearSchedulerHistory"
|
||||
ctx := context.Background()
|
||||
key := base.SchedulerHistoryKey(entryID)
|
||||
if err := r.client.Del(key).Err(); err != nil {
|
||||
if err := r.client.Del(ctx, key).Err(); err != nil {
|
||||
return errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "del", Err: err})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WriteResult writes the given result data for the specified task.
|
||||
func (r *RDB) WriteResult(qname, taskID string, data []byte) (int, error) {
|
||||
var op errors.Op = "rdb.WriteResult"
|
||||
ctx := context.Background()
|
||||
taskKey := base.TaskKey(qname, taskID)
|
||||
if err := r.client.HSet(ctx, taskKey, "result", data).Err(); err != nil {
|
||||
return 0, errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "hset", Err: err})
|
||||
}
|
||||
return len(data), nil
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -6,11 +6,12 @@
|
||||
package testbroker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-redis/redis/v7"
|
||||
"github.com/go-redis/redis/v8"
|
||||
"github.com/hibiken/asynq/internal/base"
|
||||
)
|
||||
|
||||
@@ -45,22 +46,22 @@ func (tb *TestBroker) Wakeup() {
|
||||
tb.sleeping = false
|
||||
}
|
||||
|
||||
func (tb *TestBroker) Enqueue(msg *base.TaskMessage) error {
|
||||
func (tb *TestBroker) Enqueue(ctx context.Context, msg *base.TaskMessage) error {
|
||||
tb.mu.Lock()
|
||||
defer tb.mu.Unlock()
|
||||
if tb.sleeping {
|
||||
return errRedisDown
|
||||
}
|
||||
return tb.real.Enqueue(msg)
|
||||
return tb.real.Enqueue(ctx, msg)
|
||||
}
|
||||
|
||||
func (tb *TestBroker) EnqueueUnique(msg *base.TaskMessage, ttl time.Duration) error {
|
||||
func (tb *TestBroker) EnqueueUnique(ctx context.Context, msg *base.TaskMessage, ttl time.Duration) error {
|
||||
tb.mu.Lock()
|
||||
defer tb.mu.Unlock()
|
||||
if tb.sleeping {
|
||||
return errRedisDown
|
||||
}
|
||||
return tb.real.EnqueueUnique(msg, ttl)
|
||||
return tb.real.EnqueueUnique(ctx, msg, ttl)
|
||||
}
|
||||
|
||||
func (tb *TestBroker) Dequeue(qnames ...string) (*base.TaskMessage, time.Time, error) {
|
||||
@@ -81,6 +82,15 @@ func (tb *TestBroker) Done(msg *base.TaskMessage) error {
|
||||
return tb.real.Done(msg)
|
||||
}
|
||||
|
||||
func (tb *TestBroker) MarkAsComplete(msg *base.TaskMessage) error {
|
||||
tb.mu.Lock()
|
||||
defer tb.mu.Unlock()
|
||||
if tb.sleeping {
|
||||
return errRedisDown
|
||||
}
|
||||
return tb.real.MarkAsComplete(msg)
|
||||
}
|
||||
|
||||
func (tb *TestBroker) Requeue(msg *base.TaskMessage) error {
|
||||
tb.mu.Lock()
|
||||
defer tb.mu.Unlock()
|
||||
@@ -90,31 +100,31 @@ func (tb *TestBroker) Requeue(msg *base.TaskMessage) error {
|
||||
return tb.real.Requeue(msg)
|
||||
}
|
||||
|
||||
func (tb *TestBroker) Schedule(msg *base.TaskMessage, processAt time.Time) error {
|
||||
func (tb *TestBroker) Schedule(ctx context.Context, msg *base.TaskMessage, processAt time.Time) error {
|
||||
tb.mu.Lock()
|
||||
defer tb.mu.Unlock()
|
||||
if tb.sleeping {
|
||||
return errRedisDown
|
||||
}
|
||||
return tb.real.Schedule(msg, processAt)
|
||||
return tb.real.Schedule(ctx, msg, processAt)
|
||||
}
|
||||
|
||||
func (tb *TestBroker) ScheduleUnique(msg *base.TaskMessage, processAt time.Time, ttl time.Duration) error {
|
||||
func (tb *TestBroker) ScheduleUnique(ctx context.Context, msg *base.TaskMessage, processAt time.Time, ttl time.Duration) error {
|
||||
tb.mu.Lock()
|
||||
defer tb.mu.Unlock()
|
||||
if tb.sleeping {
|
||||
return errRedisDown
|
||||
}
|
||||
return tb.real.ScheduleUnique(msg, processAt, ttl)
|
||||
return tb.real.ScheduleUnique(ctx, msg, processAt, ttl)
|
||||
}
|
||||
|
||||
func (tb *TestBroker) Retry(msg *base.TaskMessage, processAt time.Time, errMsg string) error {
|
||||
func (tb *TestBroker) Retry(msg *base.TaskMessage, processAt time.Time, errMsg string, isFailure bool) error {
|
||||
tb.mu.Lock()
|
||||
defer tb.mu.Unlock()
|
||||
if tb.sleeping {
|
||||
return errRedisDown
|
||||
}
|
||||
return tb.real.Retry(msg, processAt, errMsg)
|
||||
return tb.real.Retry(msg, processAt, errMsg, isFailure)
|
||||
}
|
||||
|
||||
func (tb *TestBroker) Archive(msg *base.TaskMessage, errMsg string) error {
|
||||
@@ -135,6 +145,15 @@ func (tb *TestBroker) ForwardIfReady(qnames ...string) error {
|
||||
return tb.real.ForwardIfReady(qnames...)
|
||||
}
|
||||
|
||||
func (tb *TestBroker) DeleteExpiredCompletedTasks(qname string) error {
|
||||
tb.mu.Lock()
|
||||
defer tb.mu.Unlock()
|
||||
if tb.sleeping {
|
||||
return errRedisDown
|
||||
}
|
||||
return tb.real.DeleteExpiredCompletedTasks(qname)
|
||||
}
|
||||
|
||||
func (tb *TestBroker) ListDeadlineExceeded(deadline time.Time, qnames ...string) ([]*base.TaskMessage, error) {
|
||||
tb.mu.Lock()
|
||||
defer tb.mu.Unlock()
|
||||
@@ -180,6 +199,15 @@ func (tb *TestBroker) PublishCancelation(id string) error {
|
||||
return tb.real.PublishCancelation(id)
|
||||
}
|
||||
|
||||
func (tb *TestBroker) WriteResult(qname, id string, data []byte) (int, error) {
|
||||
tb.mu.Lock()
|
||||
defer tb.mu.Unlock()
|
||||
if tb.sleeping {
|
||||
return 0, errRedisDown
|
||||
}
|
||||
return tb.real.WriteResult(qname, id, data)
|
||||
}
|
||||
|
||||
func (tb *TestBroker) Ping() error {
|
||||
tb.mu.Lock()
|
||||
defer tb.mu.Unlock()
|
||||
|
38
internal/timeutil/timeutil.go
Normal file
38
internal/timeutil/timeutil.go
Normal file
@@ -0,0 +1,38 @@
|
||||
// Package timeutil exports functions and types related to time and date.
|
||||
package timeutil
|
||||
|
||||
import "time"
|
||||
|
||||
// A Clock is an object that can tell you the current time.
|
||||
//
|
||||
// This interface allows decoupling code that uses time from the code that creates
|
||||
// a point in time. You can use this to your advantage by injecting Clocks into interfaces
|
||||
// rather than having implementations call time.Now() directly.
|
||||
//
|
||||
// Use RealClock() in production.
|
||||
// Use SimulatedClock() in test.
|
||||
type Clock interface {
|
||||
Now() time.Time
|
||||
}
|
||||
|
||||
func NewRealClock() Clock { return &realTimeClock{} }
|
||||
|
||||
type realTimeClock struct{}
|
||||
|
||||
func (_ *realTimeClock) Now() time.Time { return time.Now() }
|
||||
|
||||
// A SimulatedClock is a concrete Clock implementation that doesn't "tick" on its own.
|
||||
// Time is advanced by explicit call to the AdvanceTime() or SetTime() functions.
|
||||
type SimulatedClock struct {
|
||||
t time.Time
|
||||
}
|
||||
|
||||
func NewSimulatedClock(t time.Time) *SimulatedClock {
|
||||
return &SimulatedClock{t}
|
||||
}
|
||||
|
||||
func (c *SimulatedClock) Now() time.Time { return c.t }
|
||||
|
||||
func (c *SimulatedClock) SetTime(t time.Time) { c.t = t }
|
||||
|
||||
func (c *SimulatedClock) AdvanceTime(d time.Duration) { c.t.Add(d) }
|
81
janitor.go
Normal file
81
janitor.go
Normal file
@@ -0,0 +1,81 @@
|
||||
// Copyright 2021 Kentaro Hibino. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
package asynq
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/hibiken/asynq/internal/base"
|
||||
"github.com/hibiken/asynq/internal/log"
|
||||
)
|
||||
|
||||
// A janitor is responsible for deleting expired completed tasks from the specified
|
||||
// queues. It periodically checks for any expired tasks in the completed set, and
|
||||
// deletes them.
|
||||
type janitor struct {
|
||||
logger *log.Logger
|
||||
broker base.Broker
|
||||
|
||||
// channel to communicate back to the long running "janitor" goroutine.
|
||||
done chan struct{}
|
||||
|
||||
// list of queue names to check.
|
||||
queues []string
|
||||
|
||||
// average interval between checks.
|
||||
avgInterval time.Duration
|
||||
}
|
||||
|
||||
type janitorParams struct {
|
||||
logger *log.Logger
|
||||
broker base.Broker
|
||||
queues []string
|
||||
interval time.Duration
|
||||
}
|
||||
|
||||
func newJanitor(params janitorParams) *janitor {
|
||||
return &janitor{
|
||||
logger: params.logger,
|
||||
broker: params.broker,
|
||||
done: make(chan struct{}),
|
||||
queues: params.queues,
|
||||
avgInterval: params.interval,
|
||||
}
|
||||
}
|
||||
|
||||
func (j *janitor) shutdown() {
|
||||
j.logger.Debug("Janitor shutting down...")
|
||||
// Signal the janitor goroutine to stop.
|
||||
j.done <- struct{}{}
|
||||
}
|
||||
|
||||
// start starts the "janitor" goroutine.
|
||||
func (j *janitor) start(wg *sync.WaitGroup) {
|
||||
wg.Add(1)
|
||||
timer := time.NewTimer(j.avgInterval) // randomize this interval with margin of 1s
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for {
|
||||
select {
|
||||
case <-j.done:
|
||||
j.logger.Debug("Janitor done")
|
||||
return
|
||||
case <-timer.C:
|
||||
j.exec()
|
||||
timer.Reset(j.avgInterval)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (j *janitor) exec() {
|
||||
for _, qname := range j.queues {
|
||||
if err := j.broker.DeleteExpiredCompletedTasks(qname); err != nil {
|
||||
j.logger.Errorf("Could not delete expired completed tasks from queue %q: %v",
|
||||
qname, err)
|
||||
}
|
||||
}
|
||||
}
|
89
janitor_test.go
Normal file
89
janitor_test.go
Normal file
@@ -0,0 +1,89 @@
|
||||
// Copyright 2021 Kentaro Hibino. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
package asynq
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
h "github.com/hibiken/asynq/internal/asynqtest"
|
||||
"github.com/hibiken/asynq/internal/base"
|
||||
"github.com/hibiken/asynq/internal/rdb"
|
||||
)
|
||||
|
||||
func newCompletedTask(qname, tasktype string, payload []byte, completedAt time.Time) *base.TaskMessage {
|
||||
msg := h.NewTaskMessageWithQueue(tasktype, payload, qname)
|
||||
msg.CompletedAt = completedAt.Unix()
|
||||
return msg
|
||||
}
|
||||
|
||||
func TestJanitor(t *testing.T) {
|
||||
r := setup(t)
|
||||
defer r.Close()
|
||||
rdbClient := rdb.NewRDB(r)
|
||||
const interval = 1 * time.Second
|
||||
janitor := newJanitor(janitorParams{
|
||||
logger: testLogger,
|
||||
broker: rdbClient,
|
||||
queues: []string{"default", "custom"},
|
||||
interval: interval,
|
||||
})
|
||||
|
||||
now := time.Now()
|
||||
hourAgo := now.Add(-1 * time.Hour)
|
||||
minuteAgo := now.Add(-1 * time.Minute)
|
||||
halfHourAgo := now.Add(-30 * time.Minute)
|
||||
halfHourFromNow := now.Add(30 * time.Minute)
|
||||
fiveMinFromNow := now.Add(5 * time.Minute)
|
||||
msg1 := newCompletedTask("default", "task1", nil, hourAgo)
|
||||
msg2 := newCompletedTask("default", "task2", nil, minuteAgo)
|
||||
msg3 := newCompletedTask("custom", "task3", nil, hourAgo)
|
||||
msg4 := newCompletedTask("custom", "task4", nil, minuteAgo)
|
||||
|
||||
tests := []struct {
|
||||
completed map[string][]base.Z // initial completed sets
|
||||
wantCompleted map[string][]base.Z // expected completed sets after janitor runs
|
||||
}{
|
||||
{
|
||||
completed: map[string][]base.Z{
|
||||
"default": {
|
||||
{Message: msg1, Score: halfHourAgo.Unix()},
|
||||
{Message: msg2, Score: fiveMinFromNow.Unix()},
|
||||
},
|
||||
"custom": {
|
||||
{Message: msg3, Score: halfHourFromNow.Unix()},
|
||||
{Message: msg4, Score: minuteAgo.Unix()},
|
||||
},
|
||||
},
|
||||
wantCompleted: map[string][]base.Z{
|
||||
"default": {
|
||||
{Message: msg2, Score: fiveMinFromNow.Unix()},
|
||||
},
|
||||
"custom": {
|
||||
{Message: msg3, Score: halfHourFromNow.Unix()},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
h.FlushDB(t, r)
|
||||
h.SeedAllCompletedQueues(t, r, tc.completed)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
janitor.start(&wg)
|
||||
time.Sleep(2 * interval) // make sure to let janitor run at least one time
|
||||
janitor.shutdown()
|
||||
|
||||
for qname, want := range tc.wantCompleted {
|
||||
got := h.GetCompletedEntries(t, r, qname)
|
||||
if diff := cmp.Diff(want, got, h.SortZSetEntryOpt); diff != "" {
|
||||
t.Errorf("diff found in %q after running janitor: (-want, +got)\n%s", base.CompletedKey(qname), diff)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
77
processor.go
77
processor.go
@@ -16,6 +16,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/hibiken/asynq/internal/base"
|
||||
asynqcontext "github.com/hibiken/asynq/internal/context"
|
||||
"github.com/hibiken/asynq/internal/errors"
|
||||
"github.com/hibiken/asynq/internal/log"
|
||||
"golang.org/x/time/rate"
|
||||
@@ -33,6 +34,7 @@ type processor struct {
|
||||
orderedQueues []string
|
||||
|
||||
retryDelayFunc RetryDelayFunc
|
||||
isFailureFunc func(error) bool
|
||||
|
||||
errHandler ErrorHandler
|
||||
|
||||
@@ -70,6 +72,7 @@ type processorParams struct {
|
||||
logger *log.Logger
|
||||
broker base.Broker
|
||||
retryDelayFunc RetryDelayFunc
|
||||
isFailureFunc func(error) bool
|
||||
syncCh chan<- *syncRequest
|
||||
cancelations *base.Cancelations
|
||||
concurrency int
|
||||
@@ -94,6 +97,7 @@ func newProcessor(params processorParams) *processor {
|
||||
queueConfig: queues,
|
||||
orderedQueues: orderedQueues,
|
||||
retryDelayFunc: params.retryDelayFunc,
|
||||
isFailureFunc: params.isFailureFunc,
|
||||
syncRequestCh: params.syncCh,
|
||||
cancelations: params.cancelations,
|
||||
errLogLimiter: rate.NewLimiter(rate.Every(3*time.Second), 1),
|
||||
@@ -186,25 +190,35 @@ func (p *processor) exec() {
|
||||
<-p.sema // release token
|
||||
}()
|
||||
|
||||
ctx, cancel := createContext(msg, deadline)
|
||||
p.cancelations.Add(msg.ID.String(), cancel)
|
||||
ctx, cancel := asynqcontext.New(msg, deadline)
|
||||
p.cancelations.Add(msg.ID, cancel)
|
||||
defer func() {
|
||||
cancel()
|
||||
p.cancelations.Delete(msg.ID.String())
|
||||
p.cancelations.Delete(msg.ID)
|
||||
}()
|
||||
|
||||
// check context before starting a worker goroutine.
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
// already canceled (e.g. deadline exceeded).
|
||||
p.retryOrKill(ctx, msg, ctx.Err())
|
||||
p.handleFailedMessage(ctx, msg, ctx.Err())
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
resCh := make(chan error, 1)
|
||||
go func() {
|
||||
resCh <- p.perform(ctx, NewTask(msg.Type, msg.Payload))
|
||||
task := newTask(
|
||||
msg.Type,
|
||||
msg.Payload,
|
||||
&ResultWriter{
|
||||
id: msg.ID,
|
||||
qname: msg.Queue,
|
||||
broker: p.broker,
|
||||
ctx: ctx,
|
||||
},
|
||||
)
|
||||
resCh <- p.perform(ctx, task)
|
||||
}()
|
||||
|
||||
select {
|
||||
@@ -214,18 +228,14 @@ func (p *processor) exec() {
|
||||
p.requeue(msg)
|
||||
return
|
||||
case <-ctx.Done():
|
||||
p.retryOrKill(ctx, msg, ctx.Err())
|
||||
p.handleFailedMessage(ctx, msg, ctx.Err())
|
||||
return
|
||||
case resErr := <-resCh:
|
||||
// Note: One of three things should happen.
|
||||
// 1) Done -> Removes the message from Active
|
||||
// 2) Retry -> Removes the message from Active & Adds the message to Retry
|
||||
// 3) Archive -> Removes the message from Active & Adds the message to archive
|
||||
if resErr != nil {
|
||||
p.retryOrKill(ctx, msg, resErr)
|
||||
p.handleFailedMessage(ctx, msg, resErr)
|
||||
return
|
||||
}
|
||||
p.markAsDone(ctx, msg)
|
||||
p.handleSucceededMessage(ctx, msg)
|
||||
}
|
||||
}()
|
||||
}
|
||||
@@ -240,6 +250,34 @@ func (p *processor) requeue(msg *base.TaskMessage) {
|
||||
}
|
||||
}
|
||||
|
||||
func (p *processor) handleSucceededMessage(ctx context.Context, msg *base.TaskMessage) {
|
||||
if msg.Retention > 0 {
|
||||
p.markAsComplete(ctx, msg)
|
||||
} else {
|
||||
p.markAsDone(ctx, msg)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *processor) markAsComplete(ctx context.Context, msg *base.TaskMessage) {
|
||||
err := p.broker.MarkAsComplete(msg)
|
||||
if err != nil {
|
||||
errMsg := fmt.Sprintf("Could not move task id=%s type=%q from %q to %q: %+v",
|
||||
msg.ID, msg.Type, base.ActiveKey(msg.Queue), base.CompletedKey(msg.Queue), err)
|
||||
deadline, ok := ctx.Deadline()
|
||||
if !ok {
|
||||
panic("asynq: internal error: missing deadline in context")
|
||||
}
|
||||
p.logger.Warnf("%s; Will retry syncing", errMsg)
|
||||
p.syncRequestCh <- &syncRequest{
|
||||
fn: func() error {
|
||||
return p.broker.MarkAsComplete(msg)
|
||||
},
|
||||
errMsg: errMsg,
|
||||
deadline: deadline,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *processor) markAsDone(ctx context.Context, msg *base.TaskMessage) {
|
||||
err := p.broker.Done(msg)
|
||||
if err != nil {
|
||||
@@ -263,22 +301,27 @@ func (p *processor) markAsDone(ctx context.Context, msg *base.TaskMessage) {
|
||||
// the task should not be retried and should be archived instead.
|
||||
var SkipRetry = errors.New("skip retry for the task")
|
||||
|
||||
func (p *processor) retryOrKill(ctx context.Context, msg *base.TaskMessage, err error) {
|
||||
func (p *processor) handleFailedMessage(ctx context.Context, msg *base.TaskMessage, err error) {
|
||||
if p.errHandler != nil {
|
||||
p.errHandler.HandleError(ctx, NewTask(msg.Type, msg.Payload), err)
|
||||
}
|
||||
if !p.isFailureFunc(err) {
|
||||
// retry the task without marking it as failed
|
||||
p.retry(ctx, msg, err, false /*isFailure*/)
|
||||
return
|
||||
}
|
||||
if msg.Retried >= msg.Retry || errors.Is(err, SkipRetry) {
|
||||
p.logger.Warnf("Retry exhausted for task id=%s", msg.ID)
|
||||
p.archive(ctx, msg, err)
|
||||
} else {
|
||||
p.retry(ctx, msg, err)
|
||||
p.retry(ctx, msg, err, true /*isFailure*/)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *processor) retry(ctx context.Context, msg *base.TaskMessage, e error) {
|
||||
func (p *processor) retry(ctx context.Context, msg *base.TaskMessage, e error, isFailure bool) {
|
||||
d := p.retryDelayFunc(msg.Retried, e, NewTask(msg.Type, msg.Payload))
|
||||
retryAt := time.Now().Add(d)
|
||||
err := p.broker.Retry(msg, retryAt, e.Error())
|
||||
err := p.broker.Retry(msg, retryAt, e.Error(), isFailure)
|
||||
if err != nil {
|
||||
errMsg := fmt.Sprintf("Could not move task id=%s from %q to %q", msg.ID, base.ActiveKey(msg.Queue), base.RetryKey(msg.Queue))
|
||||
deadline, ok := ctx.Deadline()
|
||||
@@ -288,7 +331,7 @@ func (p *processor) retry(ctx context.Context, msg *base.TaskMessage, e error) {
|
||||
p.logger.Warnf("%s; Will retry syncing", errMsg)
|
||||
p.syncRequestCh <- &syncRequest{
|
||||
fn: func() error {
|
||||
return p.broker.Retry(msg, retryAt, e.Error())
|
||||
return p.broker.Retry(msg, retryAt, e.Error(), isFailure)
|
||||
},
|
||||
errMsg: errMsg,
|
||||
deadline: deadline,
|
||||
|
@@ -14,11 +14,18 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
h "github.com/hibiken/asynq/internal/asynqtest"
|
||||
"github.com/hibiken/asynq/internal/base"
|
||||
"github.com/hibiken/asynq/internal/rdb"
|
||||
)
|
||||
|
||||
var taskCmpOpts = []cmp.Option{
|
||||
sortTaskOpt, // sort the tasks
|
||||
cmp.AllowUnexported(Task{}), // allow typename, payload fields to be compared
|
||||
cmpopts.IgnoreFields(Task{}, "opts", "w"), // ignore opts, w fields
|
||||
}
|
||||
|
||||
// fakeHeartbeater receives from starting and finished channels and do nothing.
|
||||
func fakeHeartbeater(starting <-chan *workerInfo, finished <-chan *base.TaskMessage, done <-chan struct{}) {
|
||||
for {
|
||||
@@ -42,6 +49,34 @@ func fakeSyncer(syncCh <-chan *syncRequest, done <-chan struct{}) {
|
||||
}
|
||||
}
|
||||
|
||||
// Returns a processor instance configured for testing purpose.
|
||||
func newProcessorForTest(t *testing.T, r *rdb.RDB, h Handler) *processor {
|
||||
starting := make(chan *workerInfo)
|
||||
finished := make(chan *base.TaskMessage)
|
||||
syncCh := make(chan *syncRequest)
|
||||
done := make(chan struct{})
|
||||
t.Cleanup(func() { close(done) })
|
||||
go fakeHeartbeater(starting, finished, done)
|
||||
go fakeSyncer(syncCh, done)
|
||||
p := newProcessor(processorParams{
|
||||
logger: testLogger,
|
||||
broker: r,
|
||||
retryDelayFunc: DefaultRetryDelayFunc,
|
||||
isFailureFunc: defaultIsFailureFunc,
|
||||
syncCh: syncCh,
|
||||
cancelations: base.NewCancelations(),
|
||||
concurrency: 10,
|
||||
queues: defaultQueueConfig,
|
||||
strictPriority: false,
|
||||
errHandler: nil,
|
||||
shutdownTimeout: defaultShutdownTimeout,
|
||||
starting: starting,
|
||||
finished: finished,
|
||||
})
|
||||
p.handler = h
|
||||
return p
|
||||
}
|
||||
|
||||
func TestProcessorSuccessWithSingleQueue(t *testing.T) {
|
||||
r := setup(t)
|
||||
defer r.Close()
|
||||
@@ -87,45 +122,24 @@ func TestProcessorSuccessWithSingleQueue(t *testing.T) {
|
||||
processed = append(processed, task)
|
||||
return nil
|
||||
}
|
||||
starting := make(chan *workerInfo)
|
||||
finished := make(chan *base.TaskMessage)
|
||||
syncCh := make(chan *syncRequest)
|
||||
done := make(chan struct{})
|
||||
defer func() { close(done) }()
|
||||
go fakeHeartbeater(starting, finished, done)
|
||||
go fakeSyncer(syncCh, done)
|
||||
p := newProcessor(processorParams{
|
||||
logger: testLogger,
|
||||
broker: rdbClient,
|
||||
retryDelayFunc: DefaultRetryDelayFunc,
|
||||
syncCh: syncCh,
|
||||
cancelations: base.NewCancelations(),
|
||||
concurrency: 10,
|
||||
queues: defaultQueueConfig,
|
||||
strictPriority: false,
|
||||
errHandler: nil,
|
||||
shutdownTimeout: defaultShutdownTimeout,
|
||||
starting: starting,
|
||||
finished: finished,
|
||||
})
|
||||
p.handler = HandlerFunc(handler)
|
||||
p := newProcessorForTest(t, rdbClient, HandlerFunc(handler))
|
||||
|
||||
p.start(&sync.WaitGroup{})
|
||||
for _, msg := range tc.incoming {
|
||||
err := rdbClient.Enqueue(msg)
|
||||
err := rdbClient.Enqueue(context.Background(), msg)
|
||||
if err != nil {
|
||||
p.shutdown()
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
time.Sleep(2 * time.Second) // wait for two second to allow all pending tasks to be processed.
|
||||
if l := r.LLen(base.ActiveKey(base.DefaultQueueName)).Val(); l != 0 {
|
||||
if l := r.LLen(context.Background(), base.ActiveKey(base.DefaultQueueName)).Val(); l != 0 {
|
||||
t.Errorf("%q has %d tasks, want 0", base.ActiveKey(base.DefaultQueueName), l)
|
||||
}
|
||||
p.shutdown()
|
||||
|
||||
mu.Lock()
|
||||
if diff := cmp.Diff(tc.wantProcessed, processed, sortTaskOpt, cmp.AllowUnexported(Task{})); diff != "" {
|
||||
if diff := cmp.Diff(tc.wantProcessed, processed, taskCmpOpts...); diff != "" {
|
||||
t.Errorf("mismatch found in processed tasks; (-want, +got)\n%s", diff)
|
||||
}
|
||||
mu.Unlock()
|
||||
@@ -179,46 +193,26 @@ func TestProcessorSuccessWithMultipleQueues(t *testing.T) {
|
||||
processed = append(processed, task)
|
||||
return nil
|
||||
}
|
||||
starting := make(chan *workerInfo)
|
||||
finished := make(chan *base.TaskMessage)
|
||||
syncCh := make(chan *syncRequest)
|
||||
done := make(chan struct{})
|
||||
defer func() { close(done) }()
|
||||
go fakeHeartbeater(starting, finished, done)
|
||||
go fakeSyncer(syncCh, done)
|
||||
p := newProcessor(processorParams{
|
||||
logger: testLogger,
|
||||
broker: rdbClient,
|
||||
retryDelayFunc: DefaultRetryDelayFunc,
|
||||
syncCh: syncCh,
|
||||
cancelations: base.NewCancelations(),
|
||||
concurrency: 10,
|
||||
queues: map[string]int{
|
||||
"default": 2,
|
||||
"high": 3,
|
||||
"low": 1,
|
||||
},
|
||||
strictPriority: false,
|
||||
errHandler: nil,
|
||||
shutdownTimeout: defaultShutdownTimeout,
|
||||
starting: starting,
|
||||
finished: finished,
|
||||
})
|
||||
p.handler = HandlerFunc(handler)
|
||||
p := newProcessorForTest(t, rdbClient, HandlerFunc(handler))
|
||||
p.queueConfig = map[string]int{
|
||||
"default": 2,
|
||||
"high": 3,
|
||||
"low": 1,
|
||||
}
|
||||
|
||||
p.start(&sync.WaitGroup{})
|
||||
// Wait for two second to allow all pending tasks to be processed.
|
||||
time.Sleep(2 * time.Second)
|
||||
// Make sure no messages are stuck in active list.
|
||||
for _, qname := range tc.queues {
|
||||
if l := r.LLen(base.ActiveKey(qname)).Val(); l != 0 {
|
||||
if l := r.LLen(context.Background(), base.ActiveKey(qname)).Val(); l != 0 {
|
||||
t.Errorf("%q has %d tasks, want 0", base.ActiveKey(qname), l)
|
||||
}
|
||||
}
|
||||
p.shutdown()
|
||||
|
||||
mu.Lock()
|
||||
if diff := cmp.Diff(tc.wantProcessed, processed, sortTaskOpt, cmp.AllowUnexported(Task{})); diff != "" {
|
||||
if diff := cmp.Diff(tc.wantProcessed, processed, taskCmpOpts...); diff != "" {
|
||||
t.Errorf("mismatch found in processed tasks; (-want, +got)\n%s", diff)
|
||||
}
|
||||
mu.Unlock()
|
||||
@@ -265,38 +259,17 @@ func TestProcessTasksWithLargeNumberInPayload(t *testing.T) {
|
||||
processed = append(processed, task)
|
||||
return nil
|
||||
}
|
||||
starting := make(chan *workerInfo)
|
||||
finished := make(chan *base.TaskMessage)
|
||||
syncCh := make(chan *syncRequest)
|
||||
done := make(chan struct{})
|
||||
defer func() { close(done) }()
|
||||
go fakeHeartbeater(starting, finished, done)
|
||||
go fakeSyncer(syncCh, done)
|
||||
p := newProcessor(processorParams{
|
||||
logger: testLogger,
|
||||
broker: rdbClient,
|
||||
retryDelayFunc: DefaultRetryDelayFunc,
|
||||
syncCh: syncCh,
|
||||
cancelations: base.NewCancelations(),
|
||||
concurrency: 10,
|
||||
queues: defaultQueueConfig,
|
||||
strictPriority: false,
|
||||
errHandler: nil,
|
||||
shutdownTimeout: defaultShutdownTimeout,
|
||||
starting: starting,
|
||||
finished: finished,
|
||||
})
|
||||
p.handler = HandlerFunc(handler)
|
||||
p := newProcessorForTest(t, rdbClient, HandlerFunc(handler))
|
||||
|
||||
p.start(&sync.WaitGroup{})
|
||||
time.Sleep(2 * time.Second) // wait for two second to allow all pending tasks to be processed.
|
||||
if l := r.LLen(base.ActiveKey(base.DefaultQueueName)).Val(); l != 0 {
|
||||
if l := r.LLen(context.Background(), base.ActiveKey(base.DefaultQueueName)).Val(); l != 0 {
|
||||
t.Errorf("%q has %d tasks, want 0", base.ActiveKey(base.DefaultQueueName), l)
|
||||
}
|
||||
p.shutdown()
|
||||
|
||||
mu.Lock()
|
||||
if diff := cmp.Diff(tc.wantProcessed, processed, sortTaskOpt, cmp.AllowUnexported(Task{})); diff != "" {
|
||||
if diff := cmp.Diff(tc.wantProcessed, processed, taskCmpOpts...); diff != "" {
|
||||
t.Errorf("mismatch found in processed tasks; (-want, +got)\n%s", diff)
|
||||
}
|
||||
mu.Unlock()
|
||||
@@ -386,26 +359,9 @@ func TestProcessorRetry(t *testing.T) {
|
||||
defer mu.Unlock()
|
||||
n++
|
||||
}
|
||||
starting := make(chan *workerInfo)
|
||||
finished := make(chan *base.TaskMessage)
|
||||
done := make(chan struct{})
|
||||
defer func() { close(done) }()
|
||||
go fakeHeartbeater(starting, finished, done)
|
||||
p := newProcessor(processorParams{
|
||||
logger: testLogger,
|
||||
broker: rdbClient,
|
||||
retryDelayFunc: delayFunc,
|
||||
syncCh: nil,
|
||||
cancelations: base.NewCancelations(),
|
||||
concurrency: 10,
|
||||
queues: defaultQueueConfig,
|
||||
strictPriority: false,
|
||||
errHandler: ErrorHandlerFunc(errHandler),
|
||||
shutdownTimeout: defaultShutdownTimeout,
|
||||
starting: starting,
|
||||
finished: finished,
|
||||
})
|
||||
p.handler = tc.handler
|
||||
p := newProcessorForTest(t, rdbClient, tc.handler)
|
||||
p.errHandler = ErrorHandlerFunc(errHandler)
|
||||
p.retryDelayFunc = delayFunc
|
||||
|
||||
p.start(&sync.WaitGroup{})
|
||||
runTime := time.Now() // time when processor is running
|
||||
@@ -439,7 +395,7 @@ func TestProcessorRetry(t *testing.T) {
|
||||
t.Errorf("%s: mismatch found in %q after running processor; (-want, +got)\n%s", tc.desc, base.ArchivedKey(base.DefaultQueueName), diff)
|
||||
}
|
||||
|
||||
if l := r.LLen(base.ActiveKey(base.DefaultQueueName)).Val(); l != 0 {
|
||||
if l := r.LLen(context.Background(), base.ActiveKey(base.DefaultQueueName)).Val(); l != 0 {
|
||||
t.Errorf("%s: %q has %d tasks, want 0", base.ActiveKey(base.DefaultQueueName), tc.desc, l)
|
||||
}
|
||||
|
||||
@@ -449,6 +405,81 @@ func TestProcessorRetry(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessorMarkAsComplete(t *testing.T) {
|
||||
r := setup(t)
|
||||
defer r.Close()
|
||||
rdbClient := rdb.NewRDB(r)
|
||||
|
||||
msg1 := h.NewTaskMessage("one", nil)
|
||||
msg2 := h.NewTaskMessage("two", nil)
|
||||
msg3 := h.NewTaskMessageWithQueue("three", nil, "custom")
|
||||
msg1.Retention = 3600
|
||||
msg3.Retention = 7200
|
||||
|
||||
handler := func(ctx context.Context, task *Task) error { return nil }
|
||||
|
||||
tests := []struct {
|
||||
pending map[string][]*base.TaskMessage
|
||||
completed map[string][]base.Z
|
||||
queueCfg map[string]int
|
||||
wantPending map[string][]*base.TaskMessage
|
||||
wantCompleted func(completedAt time.Time) map[string][]base.Z
|
||||
}{
|
||||
{
|
||||
pending: map[string][]*base.TaskMessage{
|
||||
"default": {msg1, msg2},
|
||||
"custom": {msg3},
|
||||
},
|
||||
completed: map[string][]base.Z{
|
||||
"default": {},
|
||||
"custom": {},
|
||||
},
|
||||
queueCfg: map[string]int{
|
||||
"default": 1,
|
||||
"custom": 1,
|
||||
},
|
||||
wantPending: map[string][]*base.TaskMessage{
|
||||
"default": {},
|
||||
"custom": {},
|
||||
},
|
||||
wantCompleted: func(completedAt time.Time) map[string][]base.Z {
|
||||
return map[string][]base.Z{
|
||||
"default": {{Message: h.TaskMessageWithCompletedAt(*msg1, completedAt), Score: completedAt.Unix() + msg1.Retention}},
|
||||
"custom": {{Message: h.TaskMessageWithCompletedAt(*msg3, completedAt), Score: completedAt.Unix() + msg3.Retention}},
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
h.FlushDB(t, r)
|
||||
h.SeedAllPendingQueues(t, r, tc.pending)
|
||||
h.SeedAllCompletedQueues(t, r, tc.completed)
|
||||
|
||||
p := newProcessorForTest(t, rdbClient, HandlerFunc(handler))
|
||||
p.queueConfig = tc.queueCfg
|
||||
|
||||
p.start(&sync.WaitGroup{})
|
||||
runTime := time.Now() // time when processor is running
|
||||
time.Sleep(2 * time.Second)
|
||||
p.shutdown()
|
||||
|
||||
for qname, want := range tc.wantPending {
|
||||
gotPending := h.GetPendingMessages(t, r, qname)
|
||||
if diff := cmp.Diff(want, gotPending, cmpopts.EquateEmpty()); diff != "" {
|
||||
t.Errorf("diff found in %q pending set; want=%v, got=%v\n%s", qname, want, gotPending, diff)
|
||||
}
|
||||
}
|
||||
|
||||
for qname, want := range tc.wantCompleted(runTime) {
|
||||
gotCompleted := h.GetCompletedEntries(t, r, qname)
|
||||
if diff := cmp.Diff(want, gotCompleted, cmpopts.EquateEmpty()); diff != "" {
|
||||
t.Errorf("diff found in %q completed set; want=%v, got=%v\n%s", qname, want, gotCompleted, diff)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessorQueues(t *testing.T) {
|
||||
sortOpt := cmp.Transformer("SortStrings", func(in []string) []string {
|
||||
out := append([]string(nil), in...) // Copy input to avoid mutating it
|
||||
@@ -477,25 +508,10 @@ func TestProcessorQueues(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
starting := make(chan *workerInfo)
|
||||
finished := make(chan *base.TaskMessage)
|
||||
done := make(chan struct{})
|
||||
defer func() { close(done) }()
|
||||
go fakeHeartbeater(starting, finished, done)
|
||||
p := newProcessor(processorParams{
|
||||
logger: testLogger,
|
||||
broker: nil,
|
||||
retryDelayFunc: DefaultRetryDelayFunc,
|
||||
syncCh: nil,
|
||||
cancelations: base.NewCancelations(),
|
||||
concurrency: 10,
|
||||
queues: tc.queueCfg,
|
||||
strictPriority: false,
|
||||
errHandler: nil,
|
||||
shutdownTimeout: defaultShutdownTimeout,
|
||||
starting: starting,
|
||||
finished: finished,
|
||||
})
|
||||
// Note: rdb and handler not needed for this test.
|
||||
p := newProcessorForTest(t, nil, nil)
|
||||
p.queueConfig = tc.queueCfg
|
||||
|
||||
got := p.queues()
|
||||
if diff := cmp.Diff(tc.want, got, sortOpt); diff != "" {
|
||||
t.Errorf("with queue config: %v\n(*processor).queues() = %v, want %v\n(-want,+got):\n%s",
|
||||
@@ -577,6 +593,7 @@ func TestProcessorWithStrictPriority(t *testing.T) {
|
||||
logger: testLogger,
|
||||
broker: rdbClient,
|
||||
retryDelayFunc: DefaultRetryDelayFunc,
|
||||
isFailureFunc: defaultIsFailureFunc,
|
||||
syncCh: syncCh,
|
||||
cancelations: base.NewCancelations(),
|
||||
concurrency: 1, // Set concurrency to 1 to make sure tasks are processed one at a time.
|
||||
@@ -593,13 +610,13 @@ func TestProcessorWithStrictPriority(t *testing.T) {
|
||||
time.Sleep(tc.wait)
|
||||
// Make sure no tasks are stuck in active list.
|
||||
for _, qname := range tc.queues {
|
||||
if l := r.LLen(base.ActiveKey(qname)).Val(); l != 0 {
|
||||
if l := r.LLen(context.Background(), base.ActiveKey(qname)).Val(); l != 0 {
|
||||
t.Errorf("%q has %d tasks, want 0", base.ActiveKey(qname), l)
|
||||
}
|
||||
}
|
||||
p.shutdown()
|
||||
|
||||
if diff := cmp.Diff(tc.wantProcessed, processed, sortTaskOpt, cmp.AllowUnexported(Task{})); diff != "" {
|
||||
if diff := cmp.Diff(tc.wantProcessed, processed, taskCmpOpts...); diff != "" {
|
||||
t.Errorf("mismatch found in processed tasks; (-want, +got)\n%s", diff)
|
||||
}
|
||||
|
||||
@@ -638,12 +655,9 @@ func TestProcessorPerform(t *testing.T) {
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
// Note: We don't need to fully initialize the processor since we are only testing
|
||||
// Note: We don't need to fully initialized the processor since we are only testing
|
||||
// perform method.
|
||||
p := newProcessor(processorParams{
|
||||
logger: testLogger,
|
||||
queues: defaultQueueConfig,
|
||||
})
|
||||
p := newProcessorForTest(t, nil, nil)
|
||||
|
||||
for _, tc := range tests {
|
||||
p.handler = tc.handler
|
||||
|
20
recoverer.go
20
recoverer.go
@@ -5,7 +5,7 @@
|
||||
package asynq
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -17,6 +17,7 @@ type recoverer struct {
|
||||
logger *log.Logger
|
||||
broker base.Broker
|
||||
retryDelayFunc RetryDelayFunc
|
||||
isFailureFunc func(error) bool
|
||||
|
||||
// channel to communicate back to the long running "recoverer" goroutine.
|
||||
done chan struct{}
|
||||
@@ -34,6 +35,7 @@ type recovererParams struct {
|
||||
queues []string
|
||||
interval time.Duration
|
||||
retryDelayFunc RetryDelayFunc
|
||||
isFailureFunc func(error) bool
|
||||
}
|
||||
|
||||
func newRecoverer(params recovererParams) *recoverer {
|
||||
@@ -44,6 +46,7 @@ func newRecoverer(params recovererParams) *recoverer {
|
||||
queues: params.queues,
|
||||
interval: params.interval,
|
||||
retryDelayFunc: params.retryDelayFunc,
|
||||
isFailureFunc: params.isFailureFunc,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -81,26 +84,25 @@ func (r *recoverer) recover() {
|
||||
r.logger.Warn("recoverer: could not list deadline exceeded tasks")
|
||||
return
|
||||
}
|
||||
const errMsg = "deadline exceeded"
|
||||
for _, msg := range msgs {
|
||||
if msg.Retried >= msg.Retry {
|
||||
r.archive(msg, errMsg)
|
||||
r.archive(msg, context.DeadlineExceeded)
|
||||
} else {
|
||||
r.retry(msg, errMsg)
|
||||
r.retry(msg, context.DeadlineExceeded)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *recoverer) retry(msg *base.TaskMessage, errMsg string) {
|
||||
delay := r.retryDelayFunc(msg.Retried, fmt.Errorf(errMsg), NewTask(msg.Type, msg.Payload))
|
||||
func (r *recoverer) retry(msg *base.TaskMessage, err error) {
|
||||
delay := r.retryDelayFunc(msg.Retried, err, NewTask(msg.Type, msg.Payload))
|
||||
retryAt := time.Now().Add(delay)
|
||||
if err := r.broker.Retry(msg, retryAt, errMsg); err != nil {
|
||||
if err := r.broker.Retry(msg, retryAt, err.Error(), r.isFailureFunc(err)); err != nil {
|
||||
r.logger.Warnf("recoverer: could not retry deadline exceeded task: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *recoverer) archive(msg *base.TaskMessage, errMsg string) {
|
||||
if err := r.broker.Archive(msg, errMsg); err != nil {
|
||||
func (r *recoverer) archive(msg *base.TaskMessage, err error) {
|
||||
if err := r.broker.Archive(msg, err.Error()); err != nil {
|
||||
r.logger.Warnf("recoverer: could not move task to archive: %v", err)
|
||||
}
|
||||
}
|
||||
|
@@ -234,6 +234,7 @@ func TestRecoverer(t *testing.T) {
|
||||
queues: []string{"default", "critical"},
|
||||
interval: 1 * time.Second,
|
||||
retryDelayFunc: func(n int, err error, task *Task) time.Duration { return 30 * time.Second },
|
||||
isFailureFunc: defaultIsFailureFunc,
|
||||
})
|
||||
|
||||
var wg sync.WaitGroup
|
||||
@@ -259,7 +260,7 @@ func TestRecoverer(t *testing.T) {
|
||||
gotRetry := h.GetRetryMessages(t, r, qname)
|
||||
var wantRetry []*base.TaskMessage // Note: construct message here since `LastFailedAt` is relative to each test run
|
||||
for _, msg := range msgs {
|
||||
wantRetry = append(wantRetry, h.TaskMessageAfterRetry(*msg, "deadline exceeded", runTime))
|
||||
wantRetry = append(wantRetry, h.TaskMessageAfterRetry(*msg, "context deadline exceeded", runTime))
|
||||
}
|
||||
if diff := cmp.Diff(wantRetry, gotRetry, h.SortMsgOpt, cmpOpt); diff != "" {
|
||||
t.Errorf("%s; mismatch found in %q: (-want, +got)\n%s", tc.desc, base.RetryKey(qname), diff)
|
||||
@@ -269,7 +270,7 @@ func TestRecoverer(t *testing.T) {
|
||||
gotArchived := h.GetArchivedMessages(t, r, qname)
|
||||
var wantArchived []*base.TaskMessage
|
||||
for _, msg := range msgs {
|
||||
wantArchived = append(wantArchived, h.TaskMessageWithError(*msg, "deadline exceeded", runTime))
|
||||
wantArchived = append(wantArchived, h.TaskMessageWithError(*msg, "context deadline exceeded", runTime))
|
||||
}
|
||||
if diff := cmp.Diff(wantArchived, gotArchived, h.SortMsgOpt, cmpOpt); diff != "" {
|
||||
t.Errorf("%s; mismatch found in %q: (-want, +got)\n%s", tc.desc, base.ArchivedKey(qname), diff)
|
||||
|
12
scheduler.go
12
scheduler.go
@@ -10,7 +10,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-redis/redis/v7"
|
||||
"github.com/go-redis/redis/v8"
|
||||
"github.com/google/uuid"
|
||||
"github.com/hibiken/asynq/internal/base"
|
||||
"github.com/hibiken/asynq/internal/log"
|
||||
@@ -19,6 +19,8 @@ import (
|
||||
)
|
||||
|
||||
// A Scheduler kicks off tasks at regular intervals based on the user defined schedule.
|
||||
//
|
||||
// Schedulers are safe for concurrent use by multiple goroutines.
|
||||
type Scheduler struct {
|
||||
id string
|
||||
state *base.ServerState
|
||||
@@ -30,6 +32,9 @@ type Scheduler struct {
|
||||
done chan struct{}
|
||||
wg sync.WaitGroup
|
||||
errHandler func(task *Task, opts []Option, err error)
|
||||
|
||||
// guards idmap
|
||||
mu sync.Mutex
|
||||
// idmap maps Scheduler's entry ID to cron.EntryID
|
||||
// to avoid using cron.EntryID as the public API of
|
||||
// the Scheduler.
|
||||
@@ -154,17 +159,22 @@ func (s *Scheduler) Register(cronspec string, task *Task, opts ...Option) (entry
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
s.mu.Lock()
|
||||
s.idmap[job.id.String()] = cronID
|
||||
s.mu.Unlock()
|
||||
return job.id.String(), nil
|
||||
}
|
||||
|
||||
// Unregister removes a registered entry by entry ID.
|
||||
// Unregister returns a non-nil error if no entries were found for the given entryID.
|
||||
func (s *Scheduler) Unregister(entryID string) error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
cronID, ok := s.idmap[entryID]
|
||||
if !ok {
|
||||
return fmt.Errorf("asynq: no scheduler entry found")
|
||||
}
|
||||
delete(s.idmap, entryID)
|
||||
s.cron.Remove(cronID)
|
||||
return nil
|
||||
}
|
||||
|
28
server.go
28
server.go
@@ -15,7 +15,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-redis/redis/v7"
|
||||
"github.com/go-redis/redis/v8"
|
||||
"github.com/hibiken/asynq/internal/base"
|
||||
"github.com/hibiken/asynq/internal/log"
|
||||
"github.com/hibiken/asynq/internal/rdb"
|
||||
@@ -49,6 +49,7 @@ type Server struct {
|
||||
subscriber *subscriber
|
||||
recoverer *recoverer
|
||||
healthchecker *healthchecker
|
||||
janitor *janitor
|
||||
}
|
||||
|
||||
// Config specifies the server's background-task processing behavior.
|
||||
@@ -64,6 +65,14 @@ type Config struct {
|
||||
// By default, it uses exponential backoff algorithm to calculate the delay.
|
||||
RetryDelayFunc RetryDelayFunc
|
||||
|
||||
// Predicate function to determine whether the error returned from Handler is a failure.
|
||||
// If the function returns false, Server will not increment the retried counter for the task,
|
||||
// and Server won't record the queue stats (processed and failed stats) to avoid skewing the error
|
||||
// rate of the queue.
|
||||
//
|
||||
// By default, if the given error is non-nil the function returns true.
|
||||
IsFailure func(error) bool
|
||||
|
||||
// List of queues to process with given priority value. Keys are the names of the
|
||||
// queues and values are associated priority value.
|
||||
//
|
||||
@@ -268,6 +277,8 @@ func DefaultRetryDelayFunc(n int, e error, t *Task) time.Duration {
|
||||
return time.Duration(s) * time.Second
|
||||
}
|
||||
|
||||
func defaultIsFailureFunc(err error) bool { return err != nil }
|
||||
|
||||
var defaultQueueConfig = map[string]int{
|
||||
base.DefaultQueueName: 1,
|
||||
}
|
||||
@@ -293,6 +304,10 @@ func NewServer(r RedisConnOpt, cfg Config) *Server {
|
||||
if delayFunc == nil {
|
||||
delayFunc = DefaultRetryDelayFunc
|
||||
}
|
||||
isFailureFunc := cfg.IsFailure
|
||||
if isFailureFunc == nil {
|
||||
isFailureFunc = defaultIsFailureFunc
|
||||
}
|
||||
queues := make(map[string]int)
|
||||
for qname, p := range cfg.Queues {
|
||||
if err := base.ValidateQueueName(qname); err != nil {
|
||||
@@ -362,6 +377,7 @@ func NewServer(r RedisConnOpt, cfg Config) *Server {
|
||||
logger: logger,
|
||||
broker: rdb,
|
||||
retryDelayFunc: delayFunc,
|
||||
isFailureFunc: isFailureFunc,
|
||||
syncCh: syncCh,
|
||||
cancelations: cancels,
|
||||
concurrency: n,
|
||||
@@ -376,6 +392,7 @@ func NewServer(r RedisConnOpt, cfg Config) *Server {
|
||||
logger: logger,
|
||||
broker: rdb,
|
||||
retryDelayFunc: delayFunc,
|
||||
isFailureFunc: isFailureFunc,
|
||||
queues: qnames,
|
||||
interval: 1 * time.Minute,
|
||||
})
|
||||
@@ -385,6 +402,12 @@ func NewServer(r RedisConnOpt, cfg Config) *Server {
|
||||
interval: healthcheckInterval,
|
||||
healthcheckFunc: cfg.HealthCheckFunc,
|
||||
})
|
||||
janitor := newJanitor(janitorParams{
|
||||
logger: logger,
|
||||
broker: rdb,
|
||||
queues: qnames,
|
||||
interval: 8 * time.Second,
|
||||
})
|
||||
return &Server{
|
||||
logger: logger,
|
||||
broker: rdb,
|
||||
@@ -396,6 +419,7 @@ func NewServer(r RedisConnOpt, cfg Config) *Server {
|
||||
subscriber: subscriber,
|
||||
recoverer: recoverer,
|
||||
healthchecker: healthchecker,
|
||||
janitor: janitor,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -477,6 +501,7 @@ func (srv *Server) Start(handler Handler) error {
|
||||
srv.recoverer.start(&srv.wg)
|
||||
srv.forwarder.start(&srv.wg)
|
||||
srv.processor.start(&srv.wg)
|
||||
srv.janitor.start(&srv.wg)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -501,6 +526,7 @@ func (srv *Server) Shutdown() {
|
||||
srv.recoverer.shutdown()
|
||||
srv.syncer.shutdown()
|
||||
srv.subscriber.shutdown()
|
||||
srv.janitor.shutdown()
|
||||
srv.healthchecker.shutdown()
|
||||
srv.heartbeater.shutdown()
|
||||
|
||||
|
@@ -19,7 +19,7 @@ import (
|
||||
|
||||
func TestServer(t *testing.T) {
|
||||
// https://github.com/go-redis/redis/issues/1029
|
||||
ignoreOpt := goleak.IgnoreTopFunction("github.com/go-redis/redis/v7/internal/pool.(*ConnPool).reaper")
|
||||
ignoreOpt := goleak.IgnoreTopFunction("github.com/go-redis/redis/v8/internal/pool.(*ConnPool).reaper")
|
||||
defer goleak.VerifyNoLeaks(t, ignoreOpt)
|
||||
|
||||
redisConnOpt := getRedisConnOpt(t)
|
||||
@@ -55,7 +55,7 @@ func TestServer(t *testing.T) {
|
||||
|
||||
func TestServerRun(t *testing.T) {
|
||||
// https://github.com/go-redis/redis/issues/1029
|
||||
ignoreOpt := goleak.IgnoreTopFunction("github.com/go-redis/redis/v7/internal/pool.(*ConnPool).reaper")
|
||||
ignoreOpt := goleak.IgnoreTopFunction("github.com/go-redis/redis/v8/internal/pool.(*ConnPool).reaper")
|
||||
defer goleak.VerifyNoLeaks(t, ignoreOpt)
|
||||
|
||||
srv := NewServer(RedisClientOpt{Addr: ":6379"}, Config{LogLevel: testLogLevel})
|
||||
|
@@ -8,7 +8,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-redis/redis/v7"
|
||||
"github.com/go-redis/redis/v8"
|
||||
"github.com/hibiken/asynq/internal/base"
|
||||
"github.com/hibiken/asynq/internal/log"
|
||||
)
|
||||
|
@@ -63,7 +63,7 @@ func cronList(cmd *cobra.Command, args []string) {
|
||||
cols := []string{"EntryID", "Spec", "Type", "Payload", "Options", "Next", "Prev"}
|
||||
printRows := func(w io.Writer, tmpl string) {
|
||||
for _, e := range entries {
|
||||
fmt.Fprintf(w, tmpl, e.ID, e.Spec, e.Task.Type(), formatPayload(e.Task.Payload()), e.Opts,
|
||||
fmt.Fprintf(w, tmpl, e.ID, e.Spec, e.Task.Type(), sprintBytes(e.Task.Payload()), e.Opts,
|
||||
nextEnqueue(e.Next), prevEnqueue(e.Prev))
|
||||
}
|
||||
}
|
||||
|
@@ -1,404 +0,0 @@
|
||||
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-redis/redis/v7"
|
||||
"github.com/google/uuid"
|
||||
"github.com/hibiken/asynq/internal/base"
|
||||
"github.com/hibiken/asynq/internal/errors"
|
||||
"github.com/hibiken/asynq/internal/rdb"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// migrateCmd represents the migrate command.
|
||||
var migrateCmd = &cobra.Command{
|
||||
Use: "migrate",
|
||||
Short: fmt.Sprintf("Migrate existing tasks and queues to be asynq%s compatible", base.Version),
|
||||
Long: `Migrate (asynq migrate) will migrate existing tasks and queues in redis to be compatible with the latest version of asynq.
|
||||
`,
|
||||
Args: cobra.NoArgs,
|
||||
Run: migrate,
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(migrateCmd)
|
||||
}
|
||||
|
||||
func backupKey(key string) string {
|
||||
return fmt.Sprintf("%s:backup", key)
|
||||
}
|
||||
|
||||
func renameKeyAsBackup(c redis.UniversalClient, key string) error {
|
||||
if c.Exists(key).Val() == 0 {
|
||||
return nil // key doesn't exist; no-op
|
||||
}
|
||||
return c.Rename(key, backupKey(key)).Err()
|
||||
}
|
||||
|
||||
func failIfError(err error, msg string) {
|
||||
if err != nil {
|
||||
fmt.Printf("error: %s: %v\n", msg, err)
|
||||
fmt.Println("*** Please report this issue at https://github.com/hibiken/asynq/issues ***")
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func logIfError(err error, msg string) {
|
||||
if err != nil {
|
||||
fmt.Printf("warning: %s: %v\n", msg, err)
|
||||
}
|
||||
}
|
||||
|
||||
func migrate(cmd *cobra.Command, args []string) {
|
||||
r := createRDB()
|
||||
queues, err := r.AllQueues()
|
||||
failIfError(err, "Failed to get queue names")
|
||||
|
||||
// ---------------------------------------------
|
||||
// Pre-check: Ensure no active servers, tasks.
|
||||
// ---------------------------------------------
|
||||
srvs, err := r.ListServers()
|
||||
failIfError(err, "Failed to get server infos")
|
||||
if len(srvs) > 0 {
|
||||
fmt.Println("(error): Server(s) still running. Please ensure that no asynq servers are running when runnning migrate command.")
|
||||
os.Exit(1)
|
||||
}
|
||||
for _, qname := range queues {
|
||||
stats, err := r.CurrentStats(qname)
|
||||
failIfError(err, "Failed to get stats")
|
||||
if stats.Active > 0 {
|
||||
fmt.Printf("(error): %d active tasks found. Please ensure that no active tasks exist when running migrate command.\n", stats.Active)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------
|
||||
// Rename pending key
|
||||
// ---------------------------------------------
|
||||
fmt.Print("Renaming pending keys...")
|
||||
for _, qname := range queues {
|
||||
oldKey := fmt.Sprintf("asynq:{%s}", qname)
|
||||
if r.Client().Exists(oldKey).Val() == 0 {
|
||||
continue
|
||||
}
|
||||
newKey := base.PendingKey(qname)
|
||||
err := r.Client().Rename(oldKey, newKey).Err()
|
||||
failIfError(err, "Failed to rename key")
|
||||
}
|
||||
fmt.Print("Done\n")
|
||||
|
||||
// ---------------------------------------------
|
||||
// Rename keys as backup
|
||||
// ---------------------------------------------
|
||||
fmt.Print("Renaming keys for backup...")
|
||||
for _, qname := range queues {
|
||||
keys := []string{
|
||||
base.ActiveKey(qname),
|
||||
base.PendingKey(qname),
|
||||
base.ScheduledKey(qname),
|
||||
base.RetryKey(qname),
|
||||
base.ArchivedKey(qname),
|
||||
}
|
||||
for _, key := range keys {
|
||||
err := renameKeyAsBackup(r.Client(), key)
|
||||
failIfError(err, fmt.Sprintf("Failed to rename key %q for backup", key))
|
||||
}
|
||||
}
|
||||
fmt.Print("Done\n")
|
||||
|
||||
// ---------------------------------------------
|
||||
// Update to new schema
|
||||
// ---------------------------------------------
|
||||
fmt.Print("Updating to new schema...")
|
||||
for _, qname := range queues {
|
||||
updatePendingMessages(r, qname)
|
||||
updateZSetMessages(r.Client(), base.ScheduledKey(qname), "scheduled")
|
||||
updateZSetMessages(r.Client(), base.RetryKey(qname), "retry")
|
||||
updateZSetMessages(r.Client(), base.ArchivedKey(qname), "archived")
|
||||
}
|
||||
fmt.Print("Done\n")
|
||||
|
||||
// ---------------------------------------------
|
||||
// Delete backup keys
|
||||
// ---------------------------------------------
|
||||
fmt.Print("Deleting backup keys...")
|
||||
for _, qname := range queues {
|
||||
keys := []string{
|
||||
backupKey(base.ActiveKey(qname)),
|
||||
backupKey(base.PendingKey(qname)),
|
||||
backupKey(base.ScheduledKey(qname)),
|
||||
backupKey(base.RetryKey(qname)),
|
||||
backupKey(base.ArchivedKey(qname)),
|
||||
}
|
||||
for _, key := range keys {
|
||||
err := r.Client().Del(key).Err()
|
||||
failIfError(err, "Failed to delete backup key")
|
||||
}
|
||||
}
|
||||
fmt.Print("Done\n")
|
||||
}
|
||||
|
||||
func UnmarshalOldMessage(encoded string) (*base.TaskMessage, error) {
|
||||
oldMsg, err := DecodeMessage(encoded)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
payload, err := json.Marshal(oldMsg.Payload)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not marshal payload: %v", err)
|
||||
}
|
||||
return &base.TaskMessage{
|
||||
Type: oldMsg.Type,
|
||||
Payload: payload,
|
||||
ID: oldMsg.ID,
|
||||
Queue: oldMsg.Queue,
|
||||
Retry: oldMsg.Retry,
|
||||
Retried: oldMsg.Retried,
|
||||
ErrorMsg: oldMsg.ErrorMsg,
|
||||
LastFailedAt: 0,
|
||||
Timeout: oldMsg.Timeout,
|
||||
Deadline: oldMsg.Deadline,
|
||||
UniqueKey: oldMsg.UniqueKey,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// TaskMessage from v0.17
|
||||
type OldTaskMessage struct {
|
||||
// Type indicates the kind of the task to be performed.
|
||||
Type string
|
||||
|
||||
// Payload holds data needed to process the task.
|
||||
Payload map[string]interface{}
|
||||
|
||||
// ID is a unique identifier for each task.
|
||||
ID uuid.UUID
|
||||
|
||||
// Queue is a name this message should be enqueued to.
|
||||
Queue string
|
||||
|
||||
// Retry is the max number of retry for this task.
|
||||
Retry int
|
||||
|
||||
// Retried is the number of times we've retried this task so far.
|
||||
Retried int
|
||||
|
||||
// ErrorMsg holds the error message from the last failure.
|
||||
ErrorMsg string
|
||||
|
||||
// Timeout specifies timeout in seconds.
|
||||
// If task processing doesn't complete within the timeout, the task will be retried
|
||||
// if retry count is remaining. Otherwise it will be moved to the archive.
|
||||
//
|
||||
// Use zero to indicate no timeout.
|
||||
Timeout int64
|
||||
|
||||
// Deadline specifies the deadline for the task in Unix time,
|
||||
// the number of seconds elapsed since January 1, 1970 UTC.
|
||||
// If task processing doesn't complete before the deadline, the task will be retried
|
||||
// if retry count is remaining. Otherwise it will be moved to the archive.
|
||||
//
|
||||
// Use zero to indicate no deadline.
|
||||
Deadline int64
|
||||
|
||||
// UniqueKey holds the redis key used for uniqueness lock for this task.
|
||||
//
|
||||
// Empty string indicates that no uniqueness lock was used.
|
||||
UniqueKey string
|
||||
}
|
||||
|
||||
// DecodeMessage unmarshals the given encoded string and returns a decoded task message.
|
||||
// Code from v0.17.
|
||||
func DecodeMessage(s string) (*OldTaskMessage, error) {
|
||||
d := json.NewDecoder(strings.NewReader(s))
|
||||
d.UseNumber()
|
||||
var msg OldTaskMessage
|
||||
if err := d.Decode(&msg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &msg, nil
|
||||
}
|
||||
|
||||
func updatePendingMessages(r *rdb.RDB, qname string) {
|
||||
data, err := r.Client().LRange(backupKey(base.PendingKey(qname)), 0, -1).Result()
|
||||
failIfError(err, "Failed to read backup pending key")
|
||||
|
||||
for _, s := range data {
|
||||
msg, err := UnmarshalOldMessage(s)
|
||||
failIfError(err, "Failed to unmarshal message")
|
||||
|
||||
if msg.UniqueKey != "" {
|
||||
ttl, err := r.Client().TTL(msg.UniqueKey).Result()
|
||||
failIfError(err, "Failed to get ttl")
|
||||
|
||||
if ttl > 0 {
|
||||
err = r.Client().Del(msg.UniqueKey).Err()
|
||||
logIfError(err, "Failed to delete unique key")
|
||||
}
|
||||
|
||||
// Regenerate unique key.
|
||||
msg.UniqueKey = base.UniqueKey(msg.Queue, msg.Type, msg.Payload)
|
||||
if ttl > 0 {
|
||||
err = r.EnqueueUnique(msg, ttl)
|
||||
} else {
|
||||
err = r.Enqueue(msg)
|
||||
}
|
||||
failIfError(err, "Failed to enqueue message")
|
||||
|
||||
} else {
|
||||
err := r.Enqueue(msg)
|
||||
failIfError(err, "Failed to enqueue message")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// KEYS[1] -> asynq:{<qname>}:t:<task_id>
|
||||
// KEYS[2] -> asynq:{<qname>}:scheduled
|
||||
// ARGV[1] -> task message data
|
||||
// ARGV[2] -> zset score
|
||||
// ARGV[3] -> task ID
|
||||
// ARGV[4] -> task timeout in seconds (0 if not timeout)
|
||||
// ARGV[5] -> task deadline in unix time (0 if no deadline)
|
||||
// ARGV[6] -> task state (e.g. "retry", "archived")
|
||||
var taskZAddCmd = redis.NewScript(`
|
||||
redis.call("HSET", KEYS[1],
|
||||
"msg", ARGV[1],
|
||||
"state", ARGV[6],
|
||||
"timeout", ARGV[4],
|
||||
"deadline", ARGV[5])
|
||||
redis.call("ZADD", KEYS[2], ARGV[2], ARGV[3])
|
||||
return 1
|
||||
`)
|
||||
|
||||
// ZAddTask adds task to zset.
|
||||
func ZAddTask(c redis.UniversalClient, key string, msg *base.TaskMessage, score float64, state string) error {
|
||||
// Special case; LastFailedAt field is new so assign a value inferred from zscore.
|
||||
if state == "archived" {
|
||||
msg.LastFailedAt = int64(score)
|
||||
}
|
||||
|
||||
encoded, err := base.EncodeMessage(msg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := c.SAdd(base.AllQueues, msg.Queue).Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
keys := []string{
|
||||
base.TaskKey(msg.Queue, msg.ID.String()),
|
||||
key,
|
||||
}
|
||||
argv := []interface{}{
|
||||
encoded,
|
||||
score,
|
||||
msg.ID.String(),
|
||||
msg.Timeout,
|
||||
msg.Deadline,
|
||||
state,
|
||||
}
|
||||
return taskZAddCmd.Run(c, keys, argv...).Err()
|
||||
}
|
||||
|
||||
// KEYS[1] -> unique key
|
||||
// KEYS[2] -> asynq:{<qname>}:t:<task_id>
|
||||
// KEYS[3] -> zset key (e.g. asynq:{<qname>}:scheduled)
|
||||
// --
|
||||
// ARGV[1] -> task ID
|
||||
// ARGV[2] -> uniqueness lock TTL
|
||||
// ARGV[3] -> score (process_at timestamp)
|
||||
// ARGV[4] -> task message
|
||||
// ARGV[5] -> task timeout in seconds (0 if not timeout)
|
||||
// ARGV[6] -> task deadline in unix time (0 if no deadline)
|
||||
// ARGV[7] -> task state (oneof "scheduled", "retry", "archived")
|
||||
var taskZAddUniqueCmd = redis.NewScript(`
|
||||
local ok = redis.call("SET", KEYS[1], ARGV[1], "NX", "EX", ARGV[2])
|
||||
if not ok then
|
||||
return 0
|
||||
end
|
||||
redis.call("HSET", KEYS[2],
|
||||
"msg", ARGV[4],
|
||||
"state", ARGV[7],
|
||||
"timeout", ARGV[5],
|
||||
"deadline", ARGV[6],
|
||||
"unique_key", KEYS[1])
|
||||
redis.call("ZADD", KEYS[3], ARGV[3], ARGV[1])
|
||||
return 1
|
||||
`)
|
||||
|
||||
// ScheduleUnique adds the task to the backlog queue to be processed in the future if the uniqueness lock can be acquired.
|
||||
// It returns ErrDuplicateTask if the lock cannot be acquired.
|
||||
func ZAddTaskUnique(c redis.UniversalClient, key string, msg *base.TaskMessage, score float64, state string, ttl time.Duration) error {
|
||||
encoded, err := base.EncodeMessage(msg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := c.SAdd(base.AllQueues, msg.Queue).Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
keys := []string{
|
||||
msg.UniqueKey,
|
||||
base.TaskKey(msg.Queue, msg.ID.String()),
|
||||
key,
|
||||
}
|
||||
argv := []interface{}{
|
||||
msg.ID.String(),
|
||||
int(ttl.Seconds()),
|
||||
score,
|
||||
encoded,
|
||||
msg.Timeout,
|
||||
msg.Deadline,
|
||||
state,
|
||||
}
|
||||
res, err := taskZAddUniqueCmd.Run(c, keys, argv...).Result()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
n, ok := res.(int64)
|
||||
if !ok {
|
||||
return errors.E(errors.Internal, fmt.Sprintf("cast error: unexpected return value from Lua script: %v", res))
|
||||
}
|
||||
if n == 0 {
|
||||
return errors.E(errors.AlreadyExists, errors.ErrDuplicateTask)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func updateZSetMessages(c redis.UniversalClient, key, state string) {
|
||||
zs, err := c.ZRangeWithScores(backupKey(key), 0, -1).Result()
|
||||
failIfError(err, "Failed to read")
|
||||
|
||||
for _, z := range zs {
|
||||
msg, err := UnmarshalOldMessage(z.Member.(string))
|
||||
failIfError(err, "Failed to unmarshal message")
|
||||
|
||||
if msg.UniqueKey != "" {
|
||||
ttl, err := c.TTL(msg.UniqueKey).Result()
|
||||
failIfError(err, "Failed to get ttl")
|
||||
|
||||
if ttl > 0 {
|
||||
err = c.Del(msg.UniqueKey).Err()
|
||||
logIfError(err, "Failed to delete unique key")
|
||||
}
|
||||
|
||||
// Regenerate unique key.
|
||||
msg.UniqueKey = base.UniqueKey(msg.Queue, msg.Type, msg.Payload)
|
||||
if ttl > 0 {
|
||||
err = ZAddTaskUnique(c, key, msg, z.Score, state, ttl)
|
||||
} else {
|
||||
err = ZAddTask(c, key, msg, z.Score, state)
|
||||
}
|
||||
failIfError(err, "Failed to zadd message")
|
||||
} else {
|
||||
err := ZAddTask(c, key, msg, z.Score, state)
|
||||
failIfError(err, "Failed to enqueue scheduled message")
|
||||
}
|
||||
}
|
||||
}
|
@@ -148,9 +148,9 @@ func printQueueInfo(info *asynq.QueueInfo) {
|
||||
fmt.Printf("Paused: %t\n\n", info.Paused)
|
||||
bold.Println("Task Count by State")
|
||||
printTable(
|
||||
[]string{"active", "pending", "scheduled", "retry", "archived"},
|
||||
[]string{"active", "pending", "scheduled", "retry", "archived", "completed"},
|
||||
func(w io.Writer, tmpl string) {
|
||||
fmt.Fprintf(w, tmpl, info.Active, info.Pending, info.Scheduled, info.Retry, info.Archived)
|
||||
fmt.Fprintf(w, tmpl, info.Active, info.Pending, info.Scheduled, info.Retry, info.Archived, info.Completed)
|
||||
},
|
||||
)
|
||||
fmt.Println()
|
||||
|
@@ -14,7 +14,7 @@ import (
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/go-redis/redis/v7"
|
||||
"github.com/go-redis/redis/v8"
|
||||
"github.com/hibiken/asynq"
|
||||
"github.com/hibiken/asynq/internal/base"
|
||||
"github.com/hibiken/asynq/internal/rdb"
|
||||
@@ -199,9 +199,9 @@ func printTable(cols []string, printRows func(w io.Writer, tmpl string)) {
|
||||
tw.Flush()
|
||||
}
|
||||
|
||||
// formatPayload returns string representation of payload if data is printable.
|
||||
// If data is not printable, it returns a string describing payload is not printable.
|
||||
func formatPayload(payload []byte) string {
|
||||
// sprintBytes returns a string representation of the given byte slice if data is printable.
|
||||
// If data is not printable, it returns a string describing it is not printable.
|
||||
func sprintBytes(payload []byte) string {
|
||||
if !isPrintable(payload) {
|
||||
return "non-printable bytes"
|
||||
}
|
||||
|
@@ -7,11 +7,13 @@ package cmd
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/fatih/color"
|
||||
"github.com/hibiken/asynq/internal/rdb"
|
||||
@@ -58,6 +60,7 @@ type AggregateStats struct {
|
||||
Scheduled int
|
||||
Retry int
|
||||
Archived int
|
||||
Completed int
|
||||
Processed int
|
||||
Failed int
|
||||
Timestamp time.Time
|
||||
@@ -85,6 +88,7 @@ func stats(cmd *cobra.Command, args []string) {
|
||||
aggStats.Scheduled += s.Scheduled
|
||||
aggStats.Retry += s.Retry
|
||||
aggStats.Archived += s.Archived
|
||||
aggStats.Completed += s.Completed
|
||||
aggStats.Processed += s.Processed
|
||||
aggStats.Failed += s.Failed
|
||||
aggStats.Timestamp = s.Timestamp
|
||||
@@ -124,22 +128,50 @@ func stats(cmd *cobra.Command, args []string) {
|
||||
}
|
||||
|
||||
func printStatsByState(s *AggregateStats) {
|
||||
format := strings.Repeat("%v\t", 5) + "\n"
|
||||
format := strings.Repeat("%v\t", 6) + "\n"
|
||||
tw := new(tabwriter.Writer).Init(os.Stdout, 0, 8, 2, ' ', 0)
|
||||
fmt.Fprintf(tw, format, "active", "pending", "scheduled", "retry", "archived")
|
||||
fmt.Fprintf(tw, format, "----------", "--------", "---------", "-----", "----")
|
||||
fmt.Fprintf(tw, format, s.Active, s.Pending, s.Scheduled, s.Retry, s.Archived)
|
||||
fmt.Fprintf(tw, format, "active", "pending", "scheduled", "retry", "archived", "completed")
|
||||
width := maxInt(9 /* defaultWidth */, maxWidthOf(s.Active, s.Pending, s.Scheduled, s.Retry, s.Archived, s.Completed)) // length of widest column
|
||||
sep := strings.Repeat("-", width)
|
||||
fmt.Fprintf(tw, format, sep, sep, sep, sep, sep, sep)
|
||||
fmt.Fprintf(tw, format, s.Active, s.Pending, s.Scheduled, s.Retry, s.Archived, s.Completed)
|
||||
tw.Flush()
|
||||
}
|
||||
|
||||
// numDigits returns the number of digits in n.
|
||||
func numDigits(n int) int {
|
||||
return len(strconv.Itoa(n))
|
||||
}
|
||||
|
||||
// maxWidthOf returns the max number of digits amount the provided vals.
|
||||
func maxWidthOf(vals ...int) int {
|
||||
max := 0
|
||||
for _, v := range vals {
|
||||
if vw := numDigits(v); vw > max {
|
||||
max = vw
|
||||
}
|
||||
}
|
||||
return max
|
||||
}
|
||||
|
||||
func maxInt(a, b int) int {
|
||||
return int(math.Max(float64(a), float64(b)))
|
||||
}
|
||||
|
||||
func printStatsByQueue(stats []*rdb.Stats) {
|
||||
var headers, seps, counts []string
|
||||
maxHeaderWidth := 0
|
||||
for _, s := range stats {
|
||||
title := queueTitle(s)
|
||||
headers = append(headers, title)
|
||||
seps = append(seps, strings.Repeat("-", len(title)))
|
||||
if w := utf8.RuneCountInString(title); w > maxHeaderWidth {
|
||||
maxHeaderWidth = w
|
||||
}
|
||||
counts = append(counts, strconv.Itoa(s.Size))
|
||||
}
|
||||
for i := 0; i < len(headers); i++ {
|
||||
seps = append(seps, strings.Repeat("-", maxHeaderWidth))
|
||||
}
|
||||
format := strings.Repeat("%v\t", len(headers)) + "\n"
|
||||
tw := new(tabwriter.Writer).Init(os.Stdout, 0, 8, 2, ' ', 0)
|
||||
fmt.Fprintf(tw, format, toInterfaceSlice(headers)...)
|
||||
|
@@ -86,6 +86,7 @@ The value for the state flag should be one of:
|
||||
- scheduled
|
||||
- retry
|
||||
- archived
|
||||
- completed
|
||||
|
||||
List opeartion paginates the result set.
|
||||
By default, the command fetches the first 30 tasks.
|
||||
@@ -189,6 +190,8 @@ func taskList(cmd *cobra.Command, args []string) {
|
||||
listRetryTasks(qname, pageNum, pageSize)
|
||||
case "archived":
|
||||
listArchivedTasks(qname, pageNum, pageSize)
|
||||
case "completed":
|
||||
listCompletedTasks(qname, pageNum, pageSize)
|
||||
default:
|
||||
fmt.Printf("error: state=%q is not supported\n", state)
|
||||
os.Exit(1)
|
||||
@@ -210,7 +213,7 @@ func listActiveTasks(qname string, pageNum, pageSize int) {
|
||||
[]string{"ID", "Type", "Payload"},
|
||||
func(w io.Writer, tmpl string) {
|
||||
for _, t := range tasks {
|
||||
fmt.Fprintf(w, tmpl, t.ID, t.Type, formatPayload(t.Payload))
|
||||
fmt.Fprintf(w, tmpl, t.ID, t.Type, sprintBytes(t.Payload))
|
||||
}
|
||||
},
|
||||
)
|
||||
@@ -231,7 +234,7 @@ func listPendingTasks(qname string, pageNum, pageSize int) {
|
||||
[]string{"ID", "Type", "Payload"},
|
||||
func(w io.Writer, tmpl string) {
|
||||
for _, t := range tasks {
|
||||
fmt.Fprintf(w, tmpl, t.ID, t.Type, formatPayload(t.Payload))
|
||||
fmt.Fprintf(w, tmpl, t.ID, t.Type, sprintBytes(t.Payload))
|
||||
}
|
||||
},
|
||||
)
|
||||
@@ -252,7 +255,7 @@ func listScheduledTasks(qname string, pageNum, pageSize int) {
|
||||
[]string{"ID", "Type", "Payload", "Process In"},
|
||||
func(w io.Writer, tmpl string) {
|
||||
for _, t := range tasks {
|
||||
fmt.Fprintf(w, tmpl, t.ID, t.Type, formatPayload(t.Payload), formatProcessAt(t.NextProcessAt))
|
||||
fmt.Fprintf(w, tmpl, t.ID, t.Type, sprintBytes(t.Payload), formatProcessAt(t.NextProcessAt))
|
||||
}
|
||||
},
|
||||
)
|
||||
@@ -284,8 +287,8 @@ func listRetryTasks(qname string, pageNum, pageSize int) {
|
||||
[]string{"ID", "Type", "Payload", "Next Retry", "Last Error", "Last Failed", "Retried", "Max Retry"},
|
||||
func(w io.Writer, tmpl string) {
|
||||
for _, t := range tasks {
|
||||
fmt.Fprintf(w, tmpl, t.ID, t.Type, formatPayload(t.Payload), formatProcessAt(t.NextProcessAt),
|
||||
t.LastErr, formatLastFailedAt(t.LastFailedAt), t.Retried, t.MaxRetry)
|
||||
fmt.Fprintf(w, tmpl, t.ID, t.Type, sprintBytes(t.Payload), formatProcessAt(t.NextProcessAt),
|
||||
t.LastErr, formatPastTime(t.LastFailedAt), t.Retried, t.MaxRetry)
|
||||
}
|
||||
},
|
||||
)
|
||||
@@ -306,7 +309,27 @@ func listArchivedTasks(qname string, pageNum, pageSize int) {
|
||||
[]string{"ID", "Type", "Payload", "Last Failed", "Last Error"},
|
||||
func(w io.Writer, tmpl string) {
|
||||
for _, t := range tasks {
|
||||
fmt.Fprintf(w, tmpl, t.ID, t.Type, formatPayload(t.Payload), formatLastFailedAt(t.LastFailedAt), t.LastErr)
|
||||
fmt.Fprintf(w, tmpl, t.ID, t.Type, sprintBytes(t.Payload), formatPastTime(t.LastFailedAt), t.LastErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func listCompletedTasks(qname string, pageNum, pageSize int) {
|
||||
i := createInspector()
|
||||
tasks, err := i.ListCompletedTasks(qname, asynq.PageSize(pageSize), asynq.Page(pageNum))
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
if len(tasks) == 0 {
|
||||
fmt.Printf("No completed tasks in %q queue\n", qname)
|
||||
return
|
||||
}
|
||||
printTable(
|
||||
[]string{"ID", "Type", "Payload", "CompletedAt", "Result"},
|
||||
func(w io.Writer, tmpl string) {
|
||||
for _, t := range tasks {
|
||||
fmt.Fprintf(w, tmpl, t.ID, t.Type, sprintBytes(t.Payload), formatPastTime(t.CompletedAt), sprintBytes(t.Result))
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -356,7 +379,7 @@ func printTaskInfo(info *asynq.TaskInfo) {
|
||||
if len(info.LastErr) != 0 {
|
||||
fmt.Println()
|
||||
bold.Println("Last Failure")
|
||||
fmt.Printf("Failed at: %s\n", formatLastFailedAt(info.LastFailedAt))
|
||||
fmt.Printf("Failed at: %s\n", formatPastTime(info.LastFailedAt))
|
||||
fmt.Printf("Error message: %s\n", info.LastErr)
|
||||
}
|
||||
}
|
||||
@@ -371,11 +394,12 @@ func formatNextProcessAt(processAt time.Time) string {
|
||||
return fmt.Sprintf("%s (in %v)", processAt.Format(time.UnixDate), processAt.Sub(time.Now()).Round(time.Second))
|
||||
}
|
||||
|
||||
func formatLastFailedAt(lastFailedAt time.Time) string {
|
||||
if lastFailedAt.IsZero() || lastFailedAt.Unix() == 0 {
|
||||
// formatPastTime takes t which is time in the past and returns a user-friendly string.
|
||||
func formatPastTime(t time.Time) string {
|
||||
if t.IsZero() || t.Unix() == 0 {
|
||||
return ""
|
||||
}
|
||||
return lastFailedAt.Format(time.UnixDate)
|
||||
return t.Format(time.UnixDate)
|
||||
}
|
||||
|
||||
func taskArchive(cmd *cobra.Command, args []string) {
|
||||
@@ -496,6 +520,8 @@ func taskDeleteAll(cmd *cobra.Command, args []string) {
|
||||
n, err = i.DeleteAllRetryTasks(qname)
|
||||
case "archived":
|
||||
n, err = i.DeleteAllArchivedTasks(qname)
|
||||
case "completed":
|
||||
n, err = i.DeleteAllCompletedTasks(qname)
|
||||
default:
|
||||
fmt.Printf("error: unsupported state %q\n", state)
|
||||
os.Exit(1)
|
||||
|
10
tools/go.mod
10
tools/go.mod
@@ -3,21 +3,13 @@ module github.com/hibiken/asynq/tools
|
||||
go 1.13
|
||||
|
||||
require (
|
||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6 // indirect
|
||||
github.com/coreos/go-etcd v2.0.0+incompatible // indirect
|
||||
github.com/cpuguy83/go-md2man v1.0.10 // indirect
|
||||
github.com/fatih/color v1.9.0
|
||||
github.com/go-redis/redis/v7 v7.4.0
|
||||
github.com/golang/protobuf v1.4.1 // indirect
|
||||
github.com/go-redis/redis/v8 v8.11.2
|
||||
github.com/google/uuid v1.2.0
|
||||
github.com/hibiken/asynq v0.17.1
|
||||
github.com/mitchellh/go-homedir v1.1.0
|
||||
github.com/spf13/cast v1.3.1
|
||||
github.com/spf13/cobra v1.1.1
|
||||
github.com/spf13/viper v1.7.0
|
||||
github.com/ugorji/go v1.1.4 // indirect
|
||||
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8 // indirect
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77 // indirect
|
||||
)
|
||||
|
||||
replace github.com/hibiken/asynq => ./..
|
||||
|
105
tools/go.sum
105
tools/go.sum
@@ -18,7 +18,6 @@ github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAE
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
|
||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
|
||||
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
@@ -26,39 +25,39 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce
|
||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s=
|
||||
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
|
||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-redis/redis/v7 v7.2.0 h1:CrCexy/jYWZjW0AyVoHlcJUeZN19VWlbepTh1Vq6dJs=
|
||||
github.com/go-redis/redis/v7 v7.2.0/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRfnwdHj/Dcg=
|
||||
github.com/go-redis/redis/v7 v7.4.0 h1:7obg6wUoj05T0EpY0o8B59S9w5yeMWql7sw2kwNW1x4=
|
||||
github.com/go-redis/redis/v7 v7.4.0/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRfnwdHj/Dcg=
|
||||
github.com/go-redis/redis/v8 v8.11.2 h1:WqlSpAwz8mxDSMCvbyz1Mkiqe0LE5OY4j3lgkvu1Ts0=
|
||||
github.com/go-redis/redis/v8 v8.11.2/go.mod h1:DLomh7y2e3ggQXQLd1YgmvIfecPJoFl7WU5SOQ/r06M=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
@@ -69,36 +68,34 @@ github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb
|
||||
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.1 h1:ZFgWrT+bLgsYPirOnRfKLYJLvssAegOj/hgyMFdJZe0=
|
||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
|
||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs=
|
||||
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
@@ -124,7 +121,6 @@ github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO
|
||||
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
|
||||
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
|
||||
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
|
||||
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
@@ -143,7 +139,6 @@ github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORN
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=
|
||||
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
@@ -168,12 +163,17 @@ github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
|
||||
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo=
|
||||
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME=
|
||||
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||
github.com/onsi/ginkgo v1.15.0 h1:1V1NfVQR87RtWAgp1lv9JZJ5Jap+XFGKPi00andXGi4=
|
||||
github.com/onsi/ginkgo v1.15.0/go.mod h1:hF8qUzuuC8DJGygJH3726JnCZX4MYbRB8yFfISqnKUg=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/onsi/gomega v1.10.5 h1:7n6FEkpFmfCoo2t+YYqXH0evK+a9ICQz0xcAy9dYcaQ=
|
||||
github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48=
|
||||
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
@@ -196,7 +196,6 @@ github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
|
||||
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||
@@ -213,49 +212,41 @@ github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B
|
||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng=
|
||||
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s=
|
||||
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
|
||||
github.com/spf13/cobra v1.0.0 h1:6m/oheQuQ13N9ks4hubMG6BnvwOeaJrqSPLahSnczz8=
|
||||
github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
|
||||
github.com/spf13/cobra v1.1.1 h1:KfztREH0tPxJJ+geloSLaAkaPkr4ki2Er5quFV1TDo4=
|
||||
github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI=
|
||||
github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=
|
||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
|
||||
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
|
||||
github.com/spf13/viper v1.6.2 h1:7aKfF+e8/k68gda3LOjo5RxiUqddoFxVq4BKBPrxk5E=
|
||||
github.com/spf13/viper v1.6.2/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k=
|
||||
github.com/spf13/viper v1.7.0 h1:xVKxvI7ouOI5I+U9s2eeiUfMaWBVoXA3AWskkrqK0VM=
|
||||
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
|
||||
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
|
||||
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/goleak v0.10.0 h1:G3eWbSNIskeRqtsN/1uI5B+eP73y3JUuBsv9AZjehb4=
|
||||
go.uber.org/goleak v0.10.0/go.mod h1:VCZuO8V8mFPlL0F5J5GK1rtHV3DrFcQ1R8ryq7FK0aI=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
@@ -274,6 +265,7 @@ golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU
|
||||
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@@ -287,11 +279,12 @@ golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190923162816-aa69164e4478 h1:l5EDrHhldLYb3ZRHDUhXF7Om7MvYXnkV9/iQNo1lX6g=
|
||||
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb h1:eBmm0M9fYhWpKZLjQUUKka/LtIxf46G4fxeEz5KJr9U=
|
||||
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
@@ -300,6 +293,7 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@@ -307,7 +301,6 @@ golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5h
|
||||
golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -316,14 +309,19 @@ golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e h1:9vRrk9YW2BTzLP0VCB9ZDjU4cPqkg+IDWL7XgxA1yxQ=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210112080510-489259a85091 h1:DMyOG0U+gKfu8JZzg2UQe9MeaC1X+xQWlAKcRnjxjCw=
|
||||
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
@@ -345,9 +343,13 @@ golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtn
|
||||
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
|
||||
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
@@ -369,7 +371,6 @@ google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvx
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
@@ -379,6 +380,7 @@ google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQ
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c=
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
@@ -388,7 +390,6 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno=
|
||||
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
@@ -397,12 +398,12 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkep
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo=
|
||||
gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
|
40
x/rate/example_test.go
Normal file
40
x/rate/example_test.go
Normal file
@@ -0,0 +1,40 @@
|
||||
package rate_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/hibiken/asynq"
|
||||
"github.com/hibiken/asynq/x/rate"
|
||||
)
|
||||
|
||||
type RateLimitError struct {
|
||||
RetryIn time.Duration
|
||||
}
|
||||
|
||||
func (e *RateLimitError) Error() string {
|
||||
return fmt.Sprintf("rate limited (retry in %v)", e.RetryIn)
|
||||
}
|
||||
|
||||
func ExampleNewSemaphore() {
|
||||
redisConnOpt := asynq.RedisClientOpt{Addr: ":6379"}
|
||||
sema := rate.NewSemaphore(redisConnOpt, "my_queue", 10)
|
||||
// call sema.Close() when appropriate
|
||||
|
||||
_ = asynq.HandlerFunc(func(ctx context.Context, task *asynq.Task) error {
|
||||
ok, err := sema.Acquire(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !ok {
|
||||
return &RateLimitError{RetryIn: 30 * time.Second}
|
||||
}
|
||||
|
||||
// Make sure to release the token once we're done.
|
||||
defer sema.Release(ctx)
|
||||
|
||||
// Process task
|
||||
return nil
|
||||
})
|
||||
}
|
114
x/rate/semaphore.go
Normal file
114
x/rate/semaphore.go
Normal file
@@ -0,0 +1,114 @@
|
||||
// Package rate contains rate limiting strategies for asynq.Handler(s).
|
||||
package rate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-redis/redis/v8"
|
||||
"github.com/hibiken/asynq"
|
||||
asynqcontext "github.com/hibiken/asynq/internal/context"
|
||||
)
|
||||
|
||||
// NewSemaphore creates a counting Semaphore for the given scope with the given number of tokens.
|
||||
func NewSemaphore(rco asynq.RedisConnOpt, scope string, maxTokens int) *Semaphore {
|
||||
rc, ok := rco.MakeRedisClient().(redis.UniversalClient)
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("rate.NewSemaphore: unsupported RedisConnOpt type %T", rco))
|
||||
}
|
||||
|
||||
if maxTokens < 1 {
|
||||
panic("rate.NewSemaphore: maxTokens cannot be less than 1")
|
||||
}
|
||||
|
||||
if len(strings.TrimSpace(scope)) == 0 {
|
||||
panic("rate.NewSemaphore: scope should not be empty")
|
||||
}
|
||||
|
||||
return &Semaphore{
|
||||
rc: rc,
|
||||
scope: scope,
|
||||
maxTokens: maxTokens,
|
||||
}
|
||||
}
|
||||
|
||||
// Semaphore is a distributed counting semaphore which can be used to set maxTokens across multiple asynq servers.
|
||||
type Semaphore struct {
|
||||
rc redis.UniversalClient
|
||||
maxTokens int
|
||||
scope string
|
||||
}
|
||||
|
||||
// KEYS[1] -> asynq:sema:<scope>
|
||||
// ARGV[1] -> max concurrency
|
||||
// ARGV[2] -> current time in unix time
|
||||
// ARGV[3] -> deadline in unix time
|
||||
// ARGV[4] -> task ID
|
||||
var acquireCmd = redis.NewScript(`
|
||||
redis.call("ZREMRANGEBYSCORE", KEYS[1], "-inf", tonumber(ARGV[2])-1)
|
||||
local count = redis.call("ZCARD", KEYS[1])
|
||||
|
||||
if (count < tonumber(ARGV[1])) then
|
||||
redis.call("ZADD", KEYS[1], ARGV[3], ARGV[4])
|
||||
return 'true'
|
||||
else
|
||||
return 'false'
|
||||
end
|
||||
`)
|
||||
|
||||
// Acquire attempts to acquire a token from the semaphore.
|
||||
// - Returns (true, nil), iff semaphore key exists and current value is less than maxTokens
|
||||
// - Returns (false, nil) when token cannot be acquired
|
||||
// - Returns (false, error) otherwise
|
||||
//
|
||||
// The context.Context passed to Acquire must have a deadline set,
|
||||
// this ensures that token is released if the job goroutine crashes and does not call Release.
|
||||
func (s *Semaphore) Acquire(ctx context.Context) (bool, error) {
|
||||
d, ok := ctx.Deadline()
|
||||
if !ok {
|
||||
return false, fmt.Errorf("provided context must have a deadline")
|
||||
}
|
||||
|
||||
taskID, ok := asynqcontext.GetTaskID(ctx)
|
||||
if !ok {
|
||||
return false, fmt.Errorf("provided context is missing task ID value")
|
||||
}
|
||||
|
||||
return acquireCmd.Run(ctx, s.rc,
|
||||
[]string{semaphoreKey(s.scope)},
|
||||
s.maxTokens,
|
||||
time.Now().Unix(),
|
||||
d.Unix(),
|
||||
taskID,
|
||||
).Bool()
|
||||
}
|
||||
|
||||
// Release will release the token on the counting semaphore.
|
||||
func (s *Semaphore) Release(ctx context.Context) error {
|
||||
taskID, ok := asynqcontext.GetTaskID(ctx)
|
||||
if !ok {
|
||||
return fmt.Errorf("provided context is missing task ID value")
|
||||
}
|
||||
|
||||
n, err := s.rc.ZRem(ctx, semaphoreKey(s.scope), taskID).Result()
|
||||
if err != nil {
|
||||
return fmt.Errorf("redis command failed: %v", err)
|
||||
}
|
||||
|
||||
if n == 0 {
|
||||
return fmt.Errorf("no token found for task %q", taskID)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close closes the connection to redis.
|
||||
func (s *Semaphore) Close() error {
|
||||
return s.rc.Close()
|
||||
}
|
||||
|
||||
func semaphoreKey(scope string) string {
|
||||
return fmt.Sprintf("asynq:sema:%s", scope)
|
||||
}
|
408
x/rate/semaphore_test.go
Normal file
408
x/rate/semaphore_test.go
Normal file
@@ -0,0 +1,408 @@
|
||||
package rate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/go-redis/redis/v8"
|
||||
"github.com/google/uuid"
|
||||
"github.com/hibiken/asynq"
|
||||
"github.com/hibiken/asynq/internal/base"
|
||||
asynqcontext "github.com/hibiken/asynq/internal/context"
|
||||
)
|
||||
|
||||
var (
|
||||
redisAddr string
|
||||
redisDB int
|
||||
|
||||
useRedisCluster bool
|
||||
redisClusterAddrs string // comma-separated list of host:port
|
||||
)
|
||||
|
||||
func init() {
|
||||
flag.StringVar(&redisAddr, "redis_addr", "localhost:6379", "redis address to use in testing")
|
||||
flag.IntVar(&redisDB, "redis_db", 14, "redis db number to use in testing")
|
||||
flag.BoolVar(&useRedisCluster, "redis_cluster", false, "use redis cluster as a broker in testing")
|
||||
flag.StringVar(&redisClusterAddrs, "redis_cluster_addrs", "localhost:7000,localhost:7001,localhost:7002", "comma separated list of redis server addresses")
|
||||
}
|
||||
|
||||
func TestNewSemaphore(t *testing.T) {
|
||||
tests := []struct {
|
||||
desc string
|
||||
name string
|
||||
maxConcurrency int
|
||||
wantPanic string
|
||||
connOpt asynq.RedisConnOpt
|
||||
}{
|
||||
{
|
||||
desc: "Bad RedisConnOpt",
|
||||
wantPanic: "rate.NewSemaphore: unsupported RedisConnOpt type *rate.badConnOpt",
|
||||
connOpt: &badConnOpt{},
|
||||
},
|
||||
{
|
||||
desc: "Zero maxTokens should panic",
|
||||
wantPanic: "rate.NewSemaphore: maxTokens cannot be less than 1",
|
||||
},
|
||||
{
|
||||
desc: "Empty scope should panic",
|
||||
maxConcurrency: 2,
|
||||
name: " ",
|
||||
wantPanic: "rate.NewSemaphore: scope should not be empty",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.desc, func(t *testing.T) {
|
||||
if tt.wantPanic != "" {
|
||||
defer func() {
|
||||
if r := recover(); r.(string) != tt.wantPanic {
|
||||
t.Errorf("%s;\nNewSemaphore should panic with msg: %s, got %s", tt.desc, tt.wantPanic, r.(string))
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
opt := tt.connOpt
|
||||
if tt.connOpt == nil {
|
||||
opt = getRedisConnOpt(t)
|
||||
}
|
||||
|
||||
sema := NewSemaphore(opt, tt.name, tt.maxConcurrency)
|
||||
defer sema.Close()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewSemaphore_Acquire(t *testing.T) {
|
||||
tests := []struct {
|
||||
desc string
|
||||
name string
|
||||
maxConcurrency int
|
||||
taskIDs []string
|
||||
ctxFunc func(string) (context.Context, context.CancelFunc)
|
||||
want []bool
|
||||
}{
|
||||
{
|
||||
desc: "Should acquire token when current token count is less than maxTokens",
|
||||
name: "task-1",
|
||||
maxConcurrency: 3,
|
||||
taskIDs: []string{uuid.NewString(), uuid.NewString()},
|
||||
ctxFunc: func(id string) (context.Context, context.CancelFunc) {
|
||||
return asynqcontext.New(&base.TaskMessage{
|
||||
ID: id,
|
||||
Queue: "task-1",
|
||||
}, time.Now().Add(time.Second))
|
||||
},
|
||||
want: []bool{true, true},
|
||||
},
|
||||
{
|
||||
desc: "Should fail acquiring token when current token count is equal to maxTokens",
|
||||
name: "task-2",
|
||||
maxConcurrency: 3,
|
||||
taskIDs: []string{uuid.NewString(), uuid.NewString(), uuid.NewString(), uuid.NewString()},
|
||||
ctxFunc: func(id string) (context.Context, context.CancelFunc) {
|
||||
return asynqcontext.New(&base.TaskMessage{
|
||||
ID: id,
|
||||
Queue: "task-2",
|
||||
}, time.Now().Add(time.Second))
|
||||
},
|
||||
want: []bool{true, true, true, false},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.desc, func(t *testing.T) {
|
||||
opt := getRedisConnOpt(t)
|
||||
rc := opt.MakeRedisClient().(redis.UniversalClient)
|
||||
defer rc.Close()
|
||||
|
||||
if err := rc.Del(context.Background(), semaphoreKey(tt.name)).Err(); err != nil {
|
||||
t.Errorf("%s;\nredis.UniversalClient.Del() got error %v", tt.desc, err)
|
||||
}
|
||||
|
||||
sema := NewSemaphore(opt, tt.name, tt.maxConcurrency)
|
||||
defer sema.Close()
|
||||
|
||||
for i := 0; i < len(tt.taskIDs); i++ {
|
||||
ctx, cancel := tt.ctxFunc(tt.taskIDs[i])
|
||||
|
||||
got, err := sema.Acquire(ctx)
|
||||
if err != nil {
|
||||
t.Errorf("%s;\nSemaphore.Acquire() got error %v", tt.desc, err)
|
||||
}
|
||||
|
||||
if got != tt.want[i] {
|
||||
t.Errorf("%s;\nSemaphore.Acquire(ctx) returned %v, want %v", tt.desc, got, tt.want[i])
|
||||
}
|
||||
|
||||
cancel()
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewSemaphore_Acquire_Error(t *testing.T) {
|
||||
tests := []struct {
|
||||
desc string
|
||||
name string
|
||||
maxConcurrency int
|
||||
taskIDs []string
|
||||
ctxFunc func(string) (context.Context, context.CancelFunc)
|
||||
errStr string
|
||||
}{
|
||||
{
|
||||
desc: "Should return error if context has no deadline",
|
||||
name: "task-3",
|
||||
maxConcurrency: 1,
|
||||
taskIDs: []string{uuid.NewString(), uuid.NewString()},
|
||||
ctxFunc: func(id string) (context.Context, context.CancelFunc) {
|
||||
return context.Background(), func() {}
|
||||
},
|
||||
errStr: "provided context must have a deadline",
|
||||
},
|
||||
{
|
||||
desc: "Should return error when context is missing taskID",
|
||||
name: "task-4",
|
||||
maxConcurrency: 1,
|
||||
taskIDs: []string{uuid.NewString()},
|
||||
ctxFunc: func(_ string) (context.Context, context.CancelFunc) {
|
||||
return context.WithTimeout(context.Background(), time.Second)
|
||||
},
|
||||
errStr: "provided context is missing task ID value",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.desc, func(t *testing.T) {
|
||||
opt := getRedisConnOpt(t)
|
||||
rc := opt.MakeRedisClient().(redis.UniversalClient)
|
||||
defer rc.Close()
|
||||
|
||||
if err := rc.Del(context.Background(), semaphoreKey(tt.name)).Err(); err != nil {
|
||||
t.Errorf("%s;\nredis.UniversalClient.Del() got error %v", tt.desc, err)
|
||||
}
|
||||
|
||||
sema := NewSemaphore(opt, tt.name, tt.maxConcurrency)
|
||||
defer sema.Close()
|
||||
|
||||
for i := 0; i < len(tt.taskIDs); i++ {
|
||||
ctx, cancel := tt.ctxFunc(tt.taskIDs[i])
|
||||
|
||||
_, err := sema.Acquire(ctx)
|
||||
if err == nil || err.Error() != tt.errStr {
|
||||
t.Errorf("%s;\nSemaphore.Acquire() got error %v want error %v", tt.desc, err, tt.errStr)
|
||||
}
|
||||
|
||||
cancel()
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewSemaphore_Acquire_StaleToken(t *testing.T) {
|
||||
opt := getRedisConnOpt(t)
|
||||
rc := opt.MakeRedisClient().(redis.UniversalClient)
|
||||
defer rc.Close()
|
||||
|
||||
taskID := uuid.NewString()
|
||||
|
||||
// adding a set member to mimic the case where token is acquired but the goroutine crashed,
|
||||
// in which case, the token will not be explicitly removed and should be present already
|
||||
rc.ZAdd(context.Background(), semaphoreKey("stale-token"), &redis.Z{
|
||||
Score: float64(time.Now().Add(-10 * time.Second).Unix()),
|
||||
Member: taskID,
|
||||
})
|
||||
|
||||
sema := NewSemaphore(opt, "stale-token", 1)
|
||||
defer sema.Close()
|
||||
|
||||
ctx, cancel := asynqcontext.New(&base.TaskMessage{
|
||||
ID: taskID,
|
||||
Queue: "task-1",
|
||||
}, time.Now().Add(time.Second))
|
||||
defer cancel()
|
||||
|
||||
got, err := sema.Acquire(ctx)
|
||||
if err != nil {
|
||||
t.Errorf("Acquire_StaleToken;\nSemaphore.Acquire() got error %v", err)
|
||||
}
|
||||
|
||||
if !got {
|
||||
t.Error("Acquire_StaleToken;\nSemaphore.Acquire() got false want true")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewSemaphore_Release(t *testing.T) {
|
||||
tests := []struct {
|
||||
desc string
|
||||
name string
|
||||
taskIDs []string
|
||||
ctxFunc func(string) (context.Context, context.CancelFunc)
|
||||
wantCount int64
|
||||
}{
|
||||
{
|
||||
desc: "Should decrease token count",
|
||||
name: "task-5",
|
||||
taskIDs: []string{uuid.NewString()},
|
||||
ctxFunc: func(id string) (context.Context, context.CancelFunc) {
|
||||
return asynqcontext.New(&base.TaskMessage{
|
||||
ID: id,
|
||||
Queue: "task-3",
|
||||
}, time.Now().Add(time.Second))
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "Should decrease token count by 2",
|
||||
name: "task-6",
|
||||
taskIDs: []string{uuid.NewString(), uuid.NewString()},
|
||||
ctxFunc: func(id string) (context.Context, context.CancelFunc) {
|
||||
return asynqcontext.New(&base.TaskMessage{
|
||||
ID: id,
|
||||
Queue: "task-4",
|
||||
}, time.Now().Add(time.Second))
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.desc, func(t *testing.T) {
|
||||
opt := getRedisConnOpt(t)
|
||||
rc := opt.MakeRedisClient().(redis.UniversalClient)
|
||||
defer rc.Close()
|
||||
|
||||
if err := rc.Del(context.Background(), semaphoreKey(tt.name)).Err(); err != nil {
|
||||
t.Errorf("%s;\nredis.UniversalClient.Del() got error %v", tt.desc, err)
|
||||
}
|
||||
|
||||
var members []*redis.Z
|
||||
for i := 0; i < len(tt.taskIDs); i++ {
|
||||
members = append(members, &redis.Z{
|
||||
Score: float64(time.Now().Add(time.Duration(i) * time.Second).Unix()),
|
||||
Member: tt.taskIDs[i],
|
||||
})
|
||||
}
|
||||
if err := rc.ZAdd(context.Background(), semaphoreKey(tt.name), members...).Err(); err != nil {
|
||||
t.Errorf("%s;\nredis.UniversalClient.ZAdd() got error %v", tt.desc, err)
|
||||
}
|
||||
|
||||
sema := NewSemaphore(opt, tt.name, 3)
|
||||
defer sema.Close()
|
||||
|
||||
for i := 0; i < len(tt.taskIDs); i++ {
|
||||
ctx, cancel := tt.ctxFunc(tt.taskIDs[i])
|
||||
|
||||
if err := sema.Release(ctx); err != nil {
|
||||
t.Errorf("%s;\nSemaphore.Release() got error %v", tt.desc, err)
|
||||
}
|
||||
|
||||
cancel()
|
||||
}
|
||||
|
||||
i, err := rc.ZCount(context.Background(), semaphoreKey(tt.name), "-inf", "+inf").Result()
|
||||
if err != nil {
|
||||
t.Errorf("%s;\nredis.UniversalClient.ZCount() got error %v", tt.desc, err)
|
||||
}
|
||||
|
||||
if i != tt.wantCount {
|
||||
t.Errorf("%s;\nSemaphore.Release(ctx) didn't release token, got %v want 0", tt.desc, i)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewSemaphore_Release_Error(t *testing.T) {
|
||||
testID := uuid.NewString()
|
||||
|
||||
tests := []struct {
|
||||
desc string
|
||||
name string
|
||||
taskIDs []string
|
||||
ctxFunc func(string) (context.Context, context.CancelFunc)
|
||||
errStr string
|
||||
}{
|
||||
{
|
||||
desc: "Should return error when context is missing taskID",
|
||||
name: "task-7",
|
||||
taskIDs: []string{uuid.NewString()},
|
||||
ctxFunc: func(_ string) (context.Context, context.CancelFunc) {
|
||||
return context.WithTimeout(context.Background(), time.Second)
|
||||
},
|
||||
errStr: "provided context is missing task ID value",
|
||||
},
|
||||
{
|
||||
desc: "Should return error when context has taskID which never acquired token",
|
||||
name: "task-8",
|
||||
taskIDs: []string{uuid.NewString()},
|
||||
ctxFunc: func(_ string) (context.Context, context.CancelFunc) {
|
||||
return asynqcontext.New(&base.TaskMessage{
|
||||
ID: testID,
|
||||
Queue: "task-4",
|
||||
}, time.Now().Add(time.Second))
|
||||
},
|
||||
errStr: fmt.Sprintf("no token found for task %q", testID),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.desc, func(t *testing.T) {
|
||||
opt := getRedisConnOpt(t)
|
||||
rc := opt.MakeRedisClient().(redis.UniversalClient)
|
||||
defer rc.Close()
|
||||
|
||||
if err := rc.Del(context.Background(), semaphoreKey(tt.name)).Err(); err != nil {
|
||||
t.Errorf("%s;\nredis.UniversalClient.Del() got error %v", tt.desc, err)
|
||||
}
|
||||
|
||||
var members []*redis.Z
|
||||
for i := 0; i < len(tt.taskIDs); i++ {
|
||||
members = append(members, &redis.Z{
|
||||
Score: float64(time.Now().Add(time.Duration(i) * time.Second).Unix()),
|
||||
Member: tt.taskIDs[i],
|
||||
})
|
||||
}
|
||||
if err := rc.ZAdd(context.Background(), semaphoreKey(tt.name), members...).Err(); err != nil {
|
||||
t.Errorf("%s;\nredis.UniversalClient.ZAdd() got error %v", tt.desc, err)
|
||||
}
|
||||
|
||||
sema := NewSemaphore(opt, tt.name, 3)
|
||||
defer sema.Close()
|
||||
|
||||
for i := 0; i < len(tt.taskIDs); i++ {
|
||||
ctx, cancel := tt.ctxFunc(tt.taskIDs[i])
|
||||
|
||||
if err := sema.Release(ctx); err == nil || err.Error() != tt.errStr {
|
||||
t.Errorf("%s;\nSemaphore.Release() got error %v want error %v", tt.desc, err, tt.errStr)
|
||||
}
|
||||
|
||||
cancel()
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func getRedisConnOpt(tb testing.TB) asynq.RedisConnOpt {
|
||||
tb.Helper()
|
||||
if useRedisCluster {
|
||||
addrs := strings.Split(redisClusterAddrs, ",")
|
||||
if len(addrs) == 0 {
|
||||
tb.Fatal("No redis cluster addresses provided. Please set addresses using --redis_cluster_addrs flag.")
|
||||
}
|
||||
return asynq.RedisClusterClientOpt{
|
||||
Addrs: addrs,
|
||||
}
|
||||
}
|
||||
return asynq.RedisClientOpt{
|
||||
Addr: redisAddr,
|
||||
DB: redisDB,
|
||||
}
|
||||
}
|
||||
|
||||
type badConnOpt struct {
|
||||
}
|
||||
|
||||
func (b badConnOpt) MakeRedisClient() interface{} {
|
||||
return nil
|
||||
}
|
Reference in New Issue
Block a user