2020-01-03 10:13:16 +08:00
|
|
|
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
|
|
|
// Use of this source code is governed by a MIT license
|
|
|
|
// that can be found in the LICENSE file.
|
|
|
|
|
2019-11-30 09:40:31 +08:00
|
|
|
package asynq
|
|
|
|
|
|
|
|
import (
|
2020-03-18 21:49:39 +08:00
|
|
|
"errors"
|
2019-11-30 09:40:31 +08:00
|
|
|
"testing"
|
|
|
|
"time"
|
2019-12-21 23:42:32 +08:00
|
|
|
|
|
|
|
"github.com/google/go-cmp/cmp"
|
2020-03-18 21:49:39 +08:00
|
|
|
"github.com/google/go-cmp/cmp/cmpopts"
|
2019-12-30 01:41:00 +08:00
|
|
|
h "github.com/hibiken/asynq/internal/asynqtest"
|
2019-12-22 23:15:45 +08:00
|
|
|
"github.com/hibiken/asynq/internal/base"
|
2019-11-30 09:40:31 +08:00
|
|
|
)
|
|
|
|
|
2020-02-24 07:40:04 +08:00
|
|
|
func TestClientEnqueueAt(t *testing.T) {
|
2019-11-30 09:40:31 +08:00
|
|
|
r := setup(t)
|
2020-02-23 12:42:53 +08:00
|
|
|
client := NewClient(RedisClientOpt{
|
2020-02-24 07:40:04 +08:00
|
|
|
Addr: redisAddr,
|
|
|
|
DB: redisDB,
|
2020-01-17 13:01:27 +08:00
|
|
|
})
|
2019-11-30 09:40:31 +08:00
|
|
|
|
2020-01-05 05:13:46 +08:00
|
|
|
task := NewTask("send_email", map[string]interface{}{"to": "customer@gmail.com", "from": "merchant@example.com"})
|
2019-12-21 23:42:32 +08:00
|
|
|
|
2020-02-24 07:40:04 +08:00
|
|
|
var (
|
|
|
|
now = time.Now()
|
|
|
|
oneHourLater = now.Add(time.Hour)
|
|
|
|
)
|
|
|
|
|
2019-11-30 09:40:31 +08:00
|
|
|
tests := []struct {
|
2019-12-21 23:42:32 +08:00
|
|
|
desc string
|
|
|
|
task *Task
|
|
|
|
processAt time.Time
|
|
|
|
opts []Option
|
2020-07-03 20:49:52 +08:00
|
|
|
wantRes *Result
|
2020-01-06 22:53:40 +08:00
|
|
|
wantEnqueued map[string][]*base.TaskMessage
|
2020-08-18 21:01:38 +08:00
|
|
|
wantScheduled map[string][]base.Z
|
2019-11-30 09:40:31 +08:00
|
|
|
}{
|
|
|
|
{
|
2019-12-21 23:42:32 +08:00
|
|
|
desc: "Process task immediately",
|
|
|
|
task: task,
|
2020-02-24 07:40:04 +08:00
|
|
|
processAt: now,
|
2019-12-21 23:42:32 +08:00
|
|
|
opts: []Option{},
|
2020-07-03 20:49:52 +08:00
|
|
|
wantRes: &Result{
|
|
|
|
Queue: "default",
|
|
|
|
Retry: defaultMaxRetry,
|
|
|
|
Timeout: defaultTimeout,
|
|
|
|
Deadline: noDeadline,
|
|
|
|
},
|
2020-01-06 22:53:40 +08:00
|
|
|
wantEnqueued: map[string][]*base.TaskMessage{
|
2020-03-14 05:18:18 +08:00
|
|
|
"default": {
|
|
|
|
{
|
2020-03-08 12:24:03 +08:00
|
|
|
Type: task.Type,
|
|
|
|
Payload: task.Payload.data,
|
|
|
|
Retry: defaultMaxRetry,
|
|
|
|
Queue: "default",
|
2020-06-22 23:33:58 +08:00
|
|
|
Timeout: int64(defaultTimeout.Seconds()),
|
|
|
|
Deadline: noDeadline.Unix(),
|
2020-01-06 22:53:40 +08:00
|
|
|
},
|
2019-12-21 23:42:32 +08:00
|
|
|
},
|
|
|
|
},
|
2020-08-18 21:01:38 +08:00
|
|
|
wantScheduled: map[string][]base.Z{
|
|
|
|
"default": {},
|
|
|
|
},
|
2019-12-21 23:42:32 +08:00
|
|
|
},
|
|
|
|
{
|
2020-07-03 20:49:52 +08:00
|
|
|
desc: "Schedule task to be processed in the future",
|
|
|
|
task: task,
|
|
|
|
processAt: oneHourLater,
|
|
|
|
opts: []Option{},
|
|
|
|
wantRes: &Result{
|
|
|
|
Queue: "default",
|
|
|
|
Retry: defaultMaxRetry,
|
|
|
|
Timeout: defaultTimeout,
|
|
|
|
Deadline: noDeadline,
|
|
|
|
},
|
2020-08-18 21:01:38 +08:00
|
|
|
wantEnqueued: map[string][]*base.TaskMessage{
|
|
|
|
"default": {},
|
|
|
|
},
|
|
|
|
wantScheduled: map[string][]base.Z{
|
|
|
|
"default": {
|
|
|
|
{
|
|
|
|
Message: &base.TaskMessage{
|
|
|
|
Type: task.Type,
|
|
|
|
Payload: task.Payload.data,
|
|
|
|
Retry: defaultMaxRetry,
|
|
|
|
Queue: "default",
|
|
|
|
Timeout: int64(defaultTimeout.Seconds()),
|
|
|
|
Deadline: noDeadline.Unix(),
|
|
|
|
},
|
|
|
|
Score: oneHourLater.Unix(),
|
2019-12-21 23:42:32 +08:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2020-02-24 07:40:04 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, tc := range tests {
|
|
|
|
h.FlushDB(t, r) // clean up db before each test case.
|
|
|
|
|
2020-07-03 20:49:52 +08:00
|
|
|
gotRes, err := client.EnqueueAt(tc.processAt, tc.task, tc.opts...)
|
2020-02-24 07:40:04 +08:00
|
|
|
if err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
continue
|
|
|
|
}
|
2020-07-03 20:49:52 +08:00
|
|
|
if diff := cmp.Diff(tc.wantRes, gotRes, cmpopts.IgnoreFields(Result{}, "ID")); diff != "" {
|
|
|
|
t.Errorf("%s;\nEnqueueAt(processAt, task) returned %v, want %v; (-want,+got)\n%s",
|
|
|
|
tc.desc, gotRes, tc.wantRes, diff)
|
|
|
|
}
|
2020-02-24 07:40:04 +08:00
|
|
|
|
|
|
|
for qname, want := range tc.wantEnqueued {
|
|
|
|
gotEnqueued := h.GetEnqueuedMessages(t, r, qname)
|
|
|
|
if diff := cmp.Diff(want, gotEnqueued, h.IgnoreIDOpt); diff != "" {
|
|
|
|
t.Errorf("%s;\nmismatch found in %q; (-want,+got)\n%s", tc.desc, base.QueueKey(qname), diff)
|
|
|
|
}
|
|
|
|
}
|
2020-08-18 21:01:38 +08:00
|
|
|
for qname, want := range tc.wantScheduled {
|
|
|
|
gotScheduled := h.GetScheduledEntries(t, r, qname)
|
|
|
|
if diff := cmp.Diff(want, gotScheduled, h.IgnoreIDOpt); diff != "" {
|
|
|
|
t.Errorf("%s;\nmismatch found in %q; (-want,+got)\n%s", tc.desc, base.ScheduledKey(qname), diff)
|
|
|
|
}
|
2020-02-24 07:40:04 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestClientEnqueue(t *testing.T) {
|
|
|
|
r := setup(t)
|
|
|
|
client := NewClient(RedisClientOpt{
|
|
|
|
Addr: redisAddr,
|
|
|
|
DB: redisDB,
|
|
|
|
})
|
|
|
|
|
|
|
|
task := NewTask("send_email", map[string]interface{}{"to": "customer@gmail.com", "from": "merchant@example.com"})
|
|
|
|
|
|
|
|
tests := []struct {
|
|
|
|
desc string
|
|
|
|
task *Task
|
|
|
|
opts []Option
|
2020-07-03 20:49:52 +08:00
|
|
|
wantRes *Result
|
2020-02-24 07:40:04 +08:00
|
|
|
wantEnqueued map[string][]*base.TaskMessage
|
|
|
|
}{
|
2019-12-21 23:42:32 +08:00
|
|
|
{
|
2020-02-24 07:40:04 +08:00
|
|
|
desc: "Process task immediately with a custom retry count",
|
|
|
|
task: task,
|
2019-12-21 23:42:32 +08:00
|
|
|
opts: []Option{
|
|
|
|
MaxRetry(3),
|
|
|
|
},
|
2020-07-03 20:49:52 +08:00
|
|
|
wantRes: &Result{
|
|
|
|
Queue: "default",
|
|
|
|
Retry: 3,
|
|
|
|
Timeout: defaultTimeout,
|
|
|
|
Deadline: noDeadline,
|
|
|
|
},
|
2020-01-06 22:53:40 +08:00
|
|
|
wantEnqueued: map[string][]*base.TaskMessage{
|
2020-03-14 05:18:18 +08:00
|
|
|
"default": {
|
|
|
|
{
|
2020-03-08 12:24:03 +08:00
|
|
|
Type: task.Type,
|
|
|
|
Payload: task.Payload.data,
|
|
|
|
Retry: 3,
|
|
|
|
Queue: "default",
|
2020-06-22 23:33:58 +08:00
|
|
|
Timeout: int64(defaultTimeout.Seconds()),
|
|
|
|
Deadline: noDeadline.Unix(),
|
2020-01-06 22:53:40 +08:00
|
|
|
},
|
2019-12-21 23:42:32 +08:00
|
|
|
},
|
|
|
|
},
|
2019-11-30 09:40:31 +08:00
|
|
|
},
|
|
|
|
{
|
2020-02-24 07:40:04 +08:00
|
|
|
desc: "Negative retry count",
|
|
|
|
task: task,
|
2019-12-21 23:42:32 +08:00
|
|
|
opts: []Option{
|
|
|
|
MaxRetry(-2),
|
|
|
|
},
|
2020-07-03 20:49:52 +08:00
|
|
|
wantRes: &Result{
|
|
|
|
Queue: "default",
|
|
|
|
Retry: 0,
|
|
|
|
Timeout: defaultTimeout,
|
|
|
|
Deadline: noDeadline,
|
|
|
|
},
|
2020-01-06 22:53:40 +08:00
|
|
|
wantEnqueued: map[string][]*base.TaskMessage{
|
2020-03-14 05:18:18 +08:00
|
|
|
"default": {
|
|
|
|
{
|
2020-03-08 12:24:03 +08:00
|
|
|
Type: task.Type,
|
|
|
|
Payload: task.Payload.data,
|
|
|
|
Retry: 0, // Retry count should be set to zero
|
|
|
|
Queue: "default",
|
2020-06-22 23:33:58 +08:00
|
|
|
Timeout: int64(defaultTimeout.Seconds()),
|
|
|
|
Deadline: noDeadline.Unix(),
|
2020-01-06 22:53:40 +08:00
|
|
|
},
|
2019-12-21 23:42:32 +08:00
|
|
|
},
|
|
|
|
},
|
2019-11-30 09:40:31 +08:00
|
|
|
},
|
2019-12-22 02:02:03 +08:00
|
|
|
{
|
2020-02-24 07:40:04 +08:00
|
|
|
desc: "Conflicting options",
|
|
|
|
task: task,
|
2019-12-22 02:02:03 +08:00
|
|
|
opts: []Option{
|
|
|
|
MaxRetry(2),
|
|
|
|
MaxRetry(10),
|
|
|
|
},
|
2020-07-03 20:49:52 +08:00
|
|
|
wantRes: &Result{
|
|
|
|
Queue: "default",
|
|
|
|
Retry: 10,
|
|
|
|
Timeout: defaultTimeout,
|
|
|
|
Deadline: noDeadline,
|
|
|
|
},
|
2020-01-06 22:53:40 +08:00
|
|
|
wantEnqueued: map[string][]*base.TaskMessage{
|
2020-03-14 05:18:18 +08:00
|
|
|
"default": {
|
|
|
|
{
|
2020-03-08 12:24:03 +08:00
|
|
|
Type: task.Type,
|
|
|
|
Payload: task.Payload.data,
|
|
|
|
Retry: 10, // Last option takes precedence
|
|
|
|
Queue: "default",
|
2020-06-22 23:33:58 +08:00
|
|
|
Timeout: int64(defaultTimeout.Seconds()),
|
|
|
|
Deadline: noDeadline.Unix(),
|
2020-01-06 22:53:40 +08:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
2020-02-24 07:40:04 +08:00
|
|
|
desc: "With queue option",
|
|
|
|
task: task,
|
2020-01-06 22:53:40 +08:00
|
|
|
opts: []Option{
|
|
|
|
Queue("custom"),
|
|
|
|
},
|
2020-07-03 20:49:52 +08:00
|
|
|
wantRes: &Result{
|
|
|
|
Queue: "custom",
|
|
|
|
Retry: defaultMaxRetry,
|
|
|
|
Timeout: defaultTimeout,
|
|
|
|
Deadline: noDeadline,
|
|
|
|
},
|
2020-01-06 22:53:40 +08:00
|
|
|
wantEnqueued: map[string][]*base.TaskMessage{
|
2020-03-14 05:18:18 +08:00
|
|
|
"custom": {
|
|
|
|
{
|
2020-03-08 12:24:03 +08:00
|
|
|
Type: task.Type,
|
|
|
|
Payload: task.Payload.data,
|
|
|
|
Retry: defaultMaxRetry,
|
|
|
|
Queue: "custom",
|
2020-06-22 23:33:58 +08:00
|
|
|
Timeout: int64(defaultTimeout.Seconds()),
|
|
|
|
Deadline: noDeadline.Unix(),
|
2020-01-06 22:53:40 +08:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
2020-02-24 07:40:04 +08:00
|
|
|
desc: "Queue option should be case-insensitive",
|
|
|
|
task: task,
|
2020-01-06 22:53:40 +08:00
|
|
|
opts: []Option{
|
|
|
|
Queue("HIGH"),
|
|
|
|
},
|
2020-07-03 20:49:52 +08:00
|
|
|
wantRes: &Result{
|
|
|
|
Queue: "high",
|
|
|
|
Retry: defaultMaxRetry,
|
|
|
|
Timeout: defaultTimeout,
|
|
|
|
Deadline: noDeadline,
|
|
|
|
},
|
2020-01-06 22:53:40 +08:00
|
|
|
wantEnqueued: map[string][]*base.TaskMessage{
|
2020-03-14 05:18:18 +08:00
|
|
|
"high": {
|
|
|
|
{
|
2020-03-08 12:24:03 +08:00
|
|
|
Type: task.Type,
|
|
|
|
Payload: task.Payload.data,
|
|
|
|
Retry: defaultMaxRetry,
|
|
|
|
Queue: "high",
|
2020-06-22 23:33:58 +08:00
|
|
|
Timeout: int64(defaultTimeout.Seconds()),
|
|
|
|
Deadline: noDeadline.Unix(),
|
2020-02-12 13:53:59 +08:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
2020-03-08 12:24:03 +08:00
|
|
|
desc: "With timeout option",
|
2020-02-24 07:40:04 +08:00
|
|
|
task: task,
|
2020-02-12 13:53:59 +08:00
|
|
|
opts: []Option{
|
|
|
|
Timeout(20 * time.Second),
|
|
|
|
},
|
2020-07-03 20:49:52 +08:00
|
|
|
wantRes: &Result{
|
|
|
|
Queue: "default",
|
|
|
|
Retry: defaultMaxRetry,
|
|
|
|
Timeout: 20 * time.Second,
|
|
|
|
Deadline: noDeadline,
|
|
|
|
},
|
2020-02-12 13:53:59 +08:00
|
|
|
wantEnqueued: map[string][]*base.TaskMessage{
|
2020-03-14 05:18:18 +08:00
|
|
|
"default": {
|
|
|
|
{
|
2020-03-08 12:24:03 +08:00
|
|
|
Type: task.Type,
|
|
|
|
Payload: task.Payload.data,
|
|
|
|
Retry: defaultMaxRetry,
|
|
|
|
Queue: "default",
|
2020-06-17 12:12:50 +08:00
|
|
|
Timeout: 20,
|
2020-06-22 23:33:58 +08:00
|
|
|
Deadline: noDeadline.Unix(),
|
2020-03-08 12:24:03 +08:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
desc: "With deadline option",
|
|
|
|
task: task,
|
|
|
|
opts: []Option{
|
|
|
|
Deadline(time.Date(2020, time.June, 24, 0, 0, 0, 0, time.UTC)),
|
|
|
|
},
|
2020-07-03 20:49:52 +08:00
|
|
|
wantRes: &Result{
|
|
|
|
Queue: "default",
|
|
|
|
Retry: defaultMaxRetry,
|
|
|
|
Timeout: noTimeout,
|
|
|
|
Deadline: time.Date(2020, time.June, 24, 0, 0, 0, 0, time.UTC),
|
|
|
|
},
|
2020-03-08 12:24:03 +08:00
|
|
|
wantEnqueued: map[string][]*base.TaskMessage{
|
2020-03-14 05:18:18 +08:00
|
|
|
"default": {
|
|
|
|
{
|
2020-03-08 12:24:03 +08:00
|
|
|
Type: task.Type,
|
|
|
|
Payload: task.Payload.data,
|
|
|
|
Retry: defaultMaxRetry,
|
|
|
|
Queue: "default",
|
2020-06-22 23:33:58 +08:00
|
|
|
Timeout: int64(noTimeout.Seconds()),
|
|
|
|
Deadline: time.Date(2020, time.June, 24, 0, 0, 0, 0, time.UTC).Unix(),
|
2020-06-17 12:12:50 +08:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
desc: "With both deadline and timeout options",
|
|
|
|
task: task,
|
|
|
|
opts: []Option{
|
|
|
|
Timeout(20 * time.Second),
|
|
|
|
Deadline(time.Date(2020, time.June, 24, 0, 0, 0, 0, time.UTC)),
|
|
|
|
},
|
2020-07-03 20:49:52 +08:00
|
|
|
wantRes: &Result{
|
|
|
|
Queue: "default",
|
|
|
|
Retry: defaultMaxRetry,
|
|
|
|
Timeout: 20 * time.Second,
|
|
|
|
Deadline: time.Date(2020, time.June, 24, 0, 0, 0, 0, time.UTC),
|
|
|
|
},
|
2020-06-17 12:12:50 +08:00
|
|
|
wantEnqueued: map[string][]*base.TaskMessage{
|
|
|
|
"default": {
|
|
|
|
{
|
|
|
|
Type: task.Type,
|
|
|
|
Payload: task.Payload.data,
|
|
|
|
Retry: defaultMaxRetry,
|
|
|
|
Queue: "default",
|
|
|
|
Timeout: 20,
|
2020-06-22 23:33:58 +08:00
|
|
|
Deadline: time.Date(2020, time.June, 24, 0, 0, 0, 0, time.UTC).Unix(),
|
2020-01-06 22:53:40 +08:00
|
|
|
},
|
2019-12-22 02:02:03 +08:00
|
|
|
},
|
|
|
|
},
|
2020-02-24 07:40:04 +08:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tc := range tests {
|
|
|
|
h.FlushDB(t, r) // clean up db before each test case.
|
|
|
|
|
2020-07-03 20:49:52 +08:00
|
|
|
gotRes, err := client.Enqueue(tc.task, tc.opts...)
|
2020-02-24 07:40:04 +08:00
|
|
|
if err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
continue
|
|
|
|
}
|
2020-07-03 20:49:52 +08:00
|
|
|
if diff := cmp.Diff(tc.wantRes, gotRes, cmpopts.IgnoreFields(Result{}, "ID")); diff != "" {
|
|
|
|
t.Errorf("%s;\nEnqueue(task) returned %v, want %v; (-want,+got)\n%s",
|
|
|
|
tc.desc, gotRes, tc.wantRes, diff)
|
|
|
|
}
|
2020-02-24 07:40:04 +08:00
|
|
|
|
|
|
|
for qname, want := range tc.wantEnqueued {
|
|
|
|
got := h.GetEnqueuedMessages(t, r, qname)
|
|
|
|
if diff := cmp.Diff(want, got, h.IgnoreIDOpt); diff != "" {
|
|
|
|
t.Errorf("%s;\nmismatch found in %q; (-want,+got)\n%s", tc.desc, base.QueueKey(qname), diff)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestClientEnqueueIn(t *testing.T) {
|
|
|
|
r := setup(t)
|
|
|
|
client := NewClient(RedisClientOpt{
|
|
|
|
Addr: redisAddr,
|
|
|
|
DB: redisDB,
|
|
|
|
})
|
|
|
|
|
|
|
|
task := NewTask("send_email", map[string]interface{}{"to": "customer@gmail.com", "from": "merchant@example.com"})
|
|
|
|
|
|
|
|
tests := []struct {
|
|
|
|
desc string
|
|
|
|
task *Task
|
|
|
|
delay time.Duration
|
|
|
|
opts []Option
|
2020-07-03 20:49:52 +08:00
|
|
|
wantRes *Result
|
2020-02-24 07:40:04 +08:00
|
|
|
wantEnqueued map[string][]*base.TaskMessage
|
2020-08-18 21:01:38 +08:00
|
|
|
wantScheduled map[string][]base.Z
|
2020-02-24 07:40:04 +08:00
|
|
|
}{
|
|
|
|
{
|
2020-07-03 20:49:52 +08:00
|
|
|
desc: "schedule a task to be enqueued in one hour",
|
|
|
|
task: task,
|
|
|
|
delay: time.Hour,
|
|
|
|
opts: []Option{},
|
|
|
|
wantRes: &Result{
|
|
|
|
Queue: "default",
|
|
|
|
Retry: defaultMaxRetry,
|
|
|
|
Timeout: defaultTimeout,
|
|
|
|
Deadline: noDeadline,
|
|
|
|
},
|
2020-08-18 21:01:38 +08:00
|
|
|
wantEnqueued: map[string][]*base.TaskMessage{
|
|
|
|
"default": {},
|
|
|
|
},
|
|
|
|
wantScheduled: map[string][]base.Z{
|
|
|
|
"default": {
|
|
|
|
{
|
|
|
|
Message: &base.TaskMessage{
|
|
|
|
Type: task.Type,
|
|
|
|
Payload: task.Payload.data,
|
|
|
|
Retry: defaultMaxRetry,
|
|
|
|
Queue: "default",
|
|
|
|
Timeout: int64(defaultTimeout.Seconds()),
|
|
|
|
Deadline: noDeadline.Unix(),
|
|
|
|
},
|
|
|
|
Score: time.Now().Add(time.Hour).Unix(),
|
2020-02-24 07:40:04 +08:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
desc: "Zero delay",
|
|
|
|
task: task,
|
|
|
|
delay: 0,
|
|
|
|
opts: []Option{},
|
2020-07-03 20:49:52 +08:00
|
|
|
wantRes: &Result{
|
|
|
|
Queue: "default",
|
|
|
|
Retry: defaultMaxRetry,
|
|
|
|
Timeout: defaultTimeout,
|
|
|
|
Deadline: noDeadline,
|
|
|
|
},
|
2020-02-24 07:40:04 +08:00
|
|
|
wantEnqueued: map[string][]*base.TaskMessage{
|
2020-03-14 05:18:18 +08:00
|
|
|
"default": {
|
|
|
|
{
|
2020-03-08 12:24:03 +08:00
|
|
|
Type: task.Type,
|
|
|
|
Payload: task.Payload.data,
|
|
|
|
Retry: defaultMaxRetry,
|
|
|
|
Queue: "default",
|
2020-06-22 23:33:58 +08:00
|
|
|
Timeout: int64(defaultTimeout.Seconds()),
|
|
|
|
Deadline: noDeadline.Unix(),
|
2020-02-24 07:40:04 +08:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2020-08-18 21:01:38 +08:00
|
|
|
wantScheduled: map[string][]base.Z{
|
|
|
|
"default": {},
|
|
|
|
},
|
2019-12-22 02:02:03 +08:00
|
|
|
},
|
2019-11-30 09:40:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, tc := range tests {
|
2019-12-30 01:41:00 +08:00
|
|
|
h.FlushDB(t, r) // clean up db before each test case.
|
2019-11-30 09:40:31 +08:00
|
|
|
|
2020-07-03 20:49:52 +08:00
|
|
|
gotRes, err := client.EnqueueIn(tc.delay, tc.task, tc.opts...)
|
2019-11-30 09:40:31 +08:00
|
|
|
if err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
continue
|
|
|
|
}
|
2020-07-03 20:49:52 +08:00
|
|
|
if diff := cmp.Diff(tc.wantRes, gotRes, cmpopts.IgnoreFields(Result{}, "ID")); diff != "" {
|
|
|
|
t.Errorf("%s;\nEnqueueIn(delay, task) returned %v, want %v; (-want,+got)\n%s",
|
|
|
|
tc.desc, gotRes, tc.wantRes, diff)
|
|
|
|
}
|
2019-11-30 09:40:31 +08:00
|
|
|
|
2020-01-06 22:53:40 +08:00
|
|
|
for qname, want := range tc.wantEnqueued {
|
|
|
|
gotEnqueued := h.GetEnqueuedMessages(t, r, qname)
|
|
|
|
if diff := cmp.Diff(want, gotEnqueued, h.IgnoreIDOpt); diff != "" {
|
|
|
|
t.Errorf("%s;\nmismatch found in %q; (-want,+got)\n%s", tc.desc, base.QueueKey(qname), diff)
|
|
|
|
}
|
2019-12-21 23:42:32 +08:00
|
|
|
}
|
2020-08-18 21:01:38 +08:00
|
|
|
for qname, want := range tc.wantScheduled {
|
|
|
|
gotScheduled := h.GetScheduledEntries(t, r, qname)
|
|
|
|
if diff := cmp.Diff(want, gotScheduled, h.IgnoreIDOpt); diff != "" {
|
|
|
|
t.Errorf("%s;\nmismatch found in %q; (-want,+got)\n%s", tc.desc, base.ScheduledKey(qname), diff)
|
|
|
|
}
|
2019-11-30 09:40:31 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-03-18 21:49:39 +08:00
|
|
|
|
2020-04-26 22:48:38 +08:00
|
|
|
func TestClientDefaultOptions(t *testing.T) {
|
|
|
|
r := setup(t)
|
|
|
|
|
|
|
|
tests := []struct {
|
|
|
|
desc string
|
|
|
|
defaultOpts []Option // options set at the client level.
|
|
|
|
opts []Option // options used at enqueue time.
|
|
|
|
task *Task
|
2020-07-03 20:49:52 +08:00
|
|
|
wantRes *Result
|
2020-04-26 22:48:38 +08:00
|
|
|
queue string // queue that the message should go into.
|
|
|
|
want *base.TaskMessage
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
desc: "With queue routing option",
|
|
|
|
defaultOpts: []Option{Queue("feed")},
|
|
|
|
opts: []Option{},
|
|
|
|
task: NewTask("feed:import", nil),
|
2020-07-03 20:49:52 +08:00
|
|
|
wantRes: &Result{
|
|
|
|
Queue: "feed",
|
|
|
|
Retry: defaultMaxRetry,
|
|
|
|
Timeout: defaultTimeout,
|
|
|
|
Deadline: noDeadline,
|
|
|
|
},
|
|
|
|
queue: "feed",
|
2020-04-26 22:48:38 +08:00
|
|
|
want: &base.TaskMessage{
|
|
|
|
Type: "feed:import",
|
|
|
|
Payload: nil,
|
|
|
|
Retry: defaultMaxRetry,
|
|
|
|
Queue: "feed",
|
2020-06-22 23:33:58 +08:00
|
|
|
Timeout: int64(defaultTimeout.Seconds()),
|
|
|
|
Deadline: noDeadline.Unix(),
|
2020-04-26 22:48:38 +08:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
desc: "With multiple options",
|
|
|
|
defaultOpts: []Option{Queue("feed"), MaxRetry(5)},
|
|
|
|
opts: []Option{},
|
|
|
|
task: NewTask("feed:import", nil),
|
2020-07-03 20:49:52 +08:00
|
|
|
wantRes: &Result{
|
|
|
|
Queue: "feed",
|
|
|
|
Retry: 5,
|
|
|
|
Timeout: defaultTimeout,
|
|
|
|
Deadline: noDeadline,
|
|
|
|
},
|
|
|
|
queue: "feed",
|
2020-04-26 22:48:38 +08:00
|
|
|
want: &base.TaskMessage{
|
|
|
|
Type: "feed:import",
|
|
|
|
Payload: nil,
|
|
|
|
Retry: 5,
|
|
|
|
Queue: "feed",
|
2020-06-22 23:33:58 +08:00
|
|
|
Timeout: int64(defaultTimeout.Seconds()),
|
|
|
|
Deadline: noDeadline.Unix(),
|
2020-04-26 22:48:38 +08:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
desc: "With overriding options at enqueue time",
|
|
|
|
defaultOpts: []Option{Queue("feed"), MaxRetry(5)},
|
|
|
|
opts: []Option{Queue("critical")},
|
|
|
|
task: NewTask("feed:import", nil),
|
2020-07-03 20:49:52 +08:00
|
|
|
wantRes: &Result{
|
|
|
|
Queue: "critical",
|
|
|
|
Retry: 5,
|
|
|
|
Timeout: defaultTimeout,
|
|
|
|
Deadline: noDeadline,
|
|
|
|
},
|
|
|
|
queue: "critical",
|
2020-04-26 22:48:38 +08:00
|
|
|
want: &base.TaskMessage{
|
|
|
|
Type: "feed:import",
|
|
|
|
Payload: nil,
|
|
|
|
Retry: 5,
|
|
|
|
Queue: "critical",
|
2020-06-22 23:33:58 +08:00
|
|
|
Timeout: int64(defaultTimeout.Seconds()),
|
|
|
|
Deadline: noDeadline.Unix(),
|
2020-04-26 22:48:38 +08:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tc := range tests {
|
|
|
|
h.FlushDB(t, r)
|
|
|
|
c := NewClient(RedisClientOpt{Addr: redisAddr, DB: redisDB})
|
|
|
|
c.SetDefaultOptions(tc.task.Type, tc.defaultOpts...)
|
2020-07-03 20:49:52 +08:00
|
|
|
gotRes, err := c.Enqueue(tc.task, tc.opts...)
|
2020-04-26 22:48:38 +08:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2020-07-03 20:49:52 +08:00
|
|
|
if diff := cmp.Diff(tc.wantRes, gotRes, cmpopts.IgnoreFields(Result{}, "ID")); diff != "" {
|
|
|
|
t.Errorf("%s;\nEnqueue(task, opts...) returned %v, want %v; (-want,+got)\n%s",
|
|
|
|
tc.desc, gotRes, tc.wantRes, diff)
|
|
|
|
}
|
2020-04-26 22:48:38 +08:00
|
|
|
enqueued := h.GetEnqueuedMessages(t, r, tc.queue)
|
|
|
|
if len(enqueued) != 1 {
|
|
|
|
t.Errorf("%s;\nexpected queue %q to have one message; got %d messages in the queue.",
|
|
|
|
tc.desc, tc.queue, len(enqueued))
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
got := enqueued[0]
|
|
|
|
if diff := cmp.Diff(tc.want, got, h.IgnoreIDOpt); diff != "" {
|
|
|
|
t.Errorf("%s;\nmismatch found in enqueued task message; (-want,+got)\n%s",
|
|
|
|
tc.desc, diff)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-18 21:49:39 +08:00
|
|
|
func TestEnqueueUnique(t *testing.T) {
|
|
|
|
r := setup(t)
|
|
|
|
c := NewClient(RedisClientOpt{
|
|
|
|
Addr: redisAddr,
|
|
|
|
DB: redisDB,
|
|
|
|
})
|
|
|
|
|
|
|
|
tests := []struct {
|
|
|
|
task *Task
|
|
|
|
ttl time.Duration
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
NewTask("email", map[string]interface{}{"user_id": 123}),
|
|
|
|
time.Hour,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tc := range tests {
|
|
|
|
h.FlushDB(t, r) // clean up db before each test case.
|
|
|
|
|
|
|
|
// Enqueue the task first. It should succeed.
|
2020-07-03 20:49:52 +08:00
|
|
|
_, err := c.Enqueue(tc.task, Unique(tc.ttl))
|
2020-03-18 21:49:39 +08:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2020-08-18 21:01:38 +08:00
|
|
|
gotTTL := r.TTL(base.UniqueKey(base.DefaultQueueName, tc.task.Type, tc.task.Payload.data)).Val()
|
2020-03-18 21:49:39 +08:00
|
|
|
if !cmp.Equal(tc.ttl.Seconds(), gotTTL.Seconds(), cmpopts.EquateApprox(0, 1)) {
|
|
|
|
t.Errorf("TTL = %v, want %v", gotTTL, tc.ttl)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Enqueue the task again. It should fail.
|
2020-07-03 20:49:52 +08:00
|
|
|
_, err = c.Enqueue(tc.task, Unique(tc.ttl))
|
2020-03-18 21:49:39 +08:00
|
|
|
if err == nil {
|
|
|
|
t.Errorf("Enqueueing %+v did not return an error", tc.task)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if !errors.Is(err, ErrDuplicateTask) {
|
|
|
|
t.Errorf("Enqueueing %+v returned an error that is not ErrDuplicateTask", tc.task)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestEnqueueInUnique(t *testing.T) {
|
|
|
|
r := setup(t)
|
|
|
|
c := NewClient(RedisClientOpt{
|
|
|
|
Addr: redisAddr,
|
|
|
|
DB: redisDB,
|
|
|
|
})
|
|
|
|
|
|
|
|
tests := []struct {
|
|
|
|
task *Task
|
|
|
|
d time.Duration
|
|
|
|
ttl time.Duration
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
NewTask("reindex", nil),
|
|
|
|
time.Hour,
|
|
|
|
10 * time.Minute,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tc := range tests {
|
|
|
|
h.FlushDB(t, r) // clean up db before each test case.
|
|
|
|
|
|
|
|
// Enqueue the task first. It should succeed.
|
2020-07-03 20:49:52 +08:00
|
|
|
_, err := c.EnqueueIn(tc.d, tc.task, Unique(tc.ttl))
|
2020-03-18 21:49:39 +08:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2020-08-18 21:01:38 +08:00
|
|
|
gotTTL := r.TTL(base.UniqueKey(base.DefaultQueueName, tc.task.Type, tc.task.Payload.data)).Val()
|
2020-03-18 21:49:39 +08:00
|
|
|
wantTTL := time.Duration(tc.ttl.Seconds()+tc.d.Seconds()) * time.Second
|
|
|
|
if !cmp.Equal(wantTTL.Seconds(), gotTTL.Seconds(), cmpopts.EquateApprox(0, 1)) {
|
|
|
|
t.Errorf("TTL = %v, want %v", gotTTL, wantTTL)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Enqueue the task again. It should fail.
|
2020-07-03 20:49:52 +08:00
|
|
|
_, err = c.EnqueueIn(tc.d, tc.task, Unique(tc.ttl))
|
2020-03-18 21:49:39 +08:00
|
|
|
if err == nil {
|
|
|
|
t.Errorf("Enqueueing %+v did not return an error", tc.task)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if !errors.Is(err, ErrDuplicateTask) {
|
|
|
|
t.Errorf("Enqueueing %+v returned an error that is not ErrDuplicateTask", tc.task)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestEnqueueAtUnique(t *testing.T) {
|
|
|
|
r := setup(t)
|
|
|
|
c := NewClient(RedisClientOpt{
|
|
|
|
Addr: redisAddr,
|
|
|
|
DB: redisDB,
|
|
|
|
})
|
|
|
|
|
|
|
|
tests := []struct {
|
|
|
|
task *Task
|
|
|
|
at time.Time
|
|
|
|
ttl time.Duration
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
NewTask("reindex", nil),
|
|
|
|
time.Now().Add(time.Hour),
|
|
|
|
10 * time.Minute,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tc := range tests {
|
|
|
|
h.FlushDB(t, r) // clean up db before each test case.
|
|
|
|
|
|
|
|
// Enqueue the task first. It should succeed.
|
2020-07-03 20:49:52 +08:00
|
|
|
_, err := c.EnqueueAt(tc.at, tc.task, Unique(tc.ttl))
|
2020-03-18 21:49:39 +08:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2020-08-18 21:01:38 +08:00
|
|
|
gotTTL := r.TTL(base.UniqueKey(base.DefaultQueueName, tc.task.Type, tc.task.Payload.data)).Val()
|
2020-03-18 21:49:39 +08:00
|
|
|
wantTTL := tc.at.Add(tc.ttl).Sub(time.Now())
|
|
|
|
if !cmp.Equal(wantTTL.Seconds(), gotTTL.Seconds(), cmpopts.EquateApprox(0, 1)) {
|
|
|
|
t.Errorf("TTL = %v, want %v", gotTTL, wantTTL)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Enqueue the task again. It should fail.
|
2020-07-03 20:49:52 +08:00
|
|
|
_, err = c.EnqueueAt(tc.at, tc.task, Unique(tc.ttl))
|
2020-03-18 21:49:39 +08:00
|
|
|
if err == nil {
|
|
|
|
t.Errorf("Enqueueing %+v did not return an error", tc.task)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if !errors.Is(err, ErrDuplicateTask) {
|
|
|
|
t.Errorf("Enqueueing %+v returned an error that is not ErrDuplicateTask", tc.task)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|