2020-01-03 10:13:16 +08:00
|
|
|
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
|
|
|
// Use of this source code is governed by a MIT license
|
|
|
|
// that can be found in the LICENSE file.
|
|
|
|
|
2019-11-28 06:03:04 +08:00
|
|
|
package asynq
|
|
|
|
|
|
|
|
import (
|
2020-02-11 23:06:52 +08:00
|
|
|
"context"
|
2019-11-28 06:03:04 +08:00
|
|
|
"fmt"
|
2020-01-07 23:03:39 +08:00
|
|
|
"sort"
|
2019-11-30 04:48:54 +08:00
|
|
|
"sync"
|
2019-11-28 06:03:04 +08:00
|
|
|
"testing"
|
2019-11-30 04:48:54 +08:00
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/google/go-cmp/cmp"
|
2020-01-12 02:02:13 +08:00
|
|
|
"github.com/google/go-cmp/cmp/cmpopts"
|
2019-12-30 01:41:00 +08:00
|
|
|
h "github.com/hibiken/asynq/internal/asynqtest"
|
2019-12-22 23:15:45 +08:00
|
|
|
"github.com/hibiken/asynq/internal/base"
|
2019-12-04 13:01:26 +08:00
|
|
|
"github.com/hibiken/asynq/internal/rdb"
|
2019-11-28 06:03:04 +08:00
|
|
|
)
|
|
|
|
|
2019-11-30 04:48:54 +08:00
|
|
|
func TestProcessorSuccess(t *testing.T) {
|
|
|
|
r := setup(t)
|
2019-12-04 13:01:26 +08:00
|
|
|
rdbClient := rdb.NewRDB(r)
|
2019-11-30 04:48:54 +08:00
|
|
|
|
2019-12-30 01:41:00 +08:00
|
|
|
m1 := h.NewTaskMessage("send_email", nil)
|
|
|
|
m2 := h.NewTaskMessage("gen_thumbnail", nil)
|
|
|
|
m3 := h.NewTaskMessage("reindex", nil)
|
|
|
|
m4 := h.NewTaskMessage("sync", nil)
|
2019-11-30 04:48:54 +08:00
|
|
|
|
2020-01-05 05:13:46 +08:00
|
|
|
t1 := NewTask(m1.Type, m1.Payload)
|
|
|
|
t2 := NewTask(m2.Type, m2.Payload)
|
|
|
|
t3 := NewTask(m3.Type, m3.Payload)
|
|
|
|
t4 := NewTask(m4.Type, m4.Payload)
|
2019-11-30 04:48:54 +08:00
|
|
|
|
|
|
|
tests := []struct {
|
2019-12-30 01:41:00 +08:00
|
|
|
enqueued []*base.TaskMessage // initial default queue state
|
2019-12-22 23:15:45 +08:00
|
|
|
incoming []*base.TaskMessage // tasks to be enqueued during run
|
|
|
|
wait time.Duration // wait duration between starting and stopping processor for this test case
|
|
|
|
wantProcessed []*Task // tasks to be processed at the end
|
2019-11-30 04:48:54 +08:00
|
|
|
}{
|
|
|
|
{
|
2019-12-30 01:41:00 +08:00
|
|
|
enqueued: []*base.TaskMessage{m1},
|
2019-12-22 23:15:45 +08:00
|
|
|
incoming: []*base.TaskMessage{m2, m3, m4},
|
2019-11-30 04:48:54 +08:00
|
|
|
wait: time.Second,
|
|
|
|
wantProcessed: []*Task{t1, t2, t3, t4},
|
|
|
|
},
|
|
|
|
{
|
2019-12-30 01:41:00 +08:00
|
|
|
enqueued: []*base.TaskMessage{},
|
2019-12-22 23:15:45 +08:00
|
|
|
incoming: []*base.TaskMessage{m1},
|
2019-11-30 04:48:54 +08:00
|
|
|
wait: time.Second,
|
|
|
|
wantProcessed: []*Task{t1},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tc := range tests {
|
2020-01-07 23:03:39 +08:00
|
|
|
h.FlushDB(t, r) // clean up db before each test case.
|
2020-01-07 22:28:34 +08:00
|
|
|
h.SeedEnqueuedQueue(t, r, tc.enqueued) // initialize default queue.
|
2019-12-30 01:41:00 +08:00
|
|
|
|
2019-11-30 04:48:54 +08:00
|
|
|
// instantiate a new processor
|
|
|
|
var mu sync.Mutex
|
|
|
|
var processed []*Task
|
2020-02-11 23:06:52 +08:00
|
|
|
handler := func(ctx context.Context, task *Task) error {
|
2019-11-30 04:48:54 +08:00
|
|
|
mu.Lock()
|
|
|
|
defer mu.Unlock()
|
|
|
|
processed = append(processed, task)
|
|
|
|
return nil
|
|
|
|
}
|
2020-02-18 22:57:39 +08:00
|
|
|
ps := base.NewProcessState("localhost", 1234, 10, defaultQueueConfig, false)
|
2020-02-13 09:12:09 +08:00
|
|
|
cancelations := base.NewCancelations()
|
2020-03-01 08:27:59 +08:00
|
|
|
p := newProcessor(rdbClient, ps, defaultDelayFunc, nil, cancelations, nil)
|
2019-12-30 09:43:19 +08:00
|
|
|
p.handler = HandlerFunc(handler)
|
2019-11-30 04:48:54 +08:00
|
|
|
|
2020-02-16 15:14:30 +08:00
|
|
|
var wg sync.WaitGroup
|
|
|
|
p.start(&wg)
|
2019-11-30 04:48:54 +08:00
|
|
|
for _, msg := range tc.incoming {
|
2019-12-04 13:01:26 +08:00
|
|
|
err := rdbClient.Enqueue(msg)
|
2019-11-30 04:48:54 +08:00
|
|
|
if err != nil {
|
|
|
|
p.terminate()
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
time.Sleep(tc.wait)
|
|
|
|
p.terminate()
|
|
|
|
|
2020-01-05 05:13:46 +08:00
|
|
|
if diff := cmp.Diff(tc.wantProcessed, processed, sortTaskOpt, cmp.AllowUnexported(Payload{})); diff != "" {
|
2019-11-30 04:48:54 +08:00
|
|
|
t.Errorf("mismatch found in processed tasks; (-want, +got)\n%s", diff)
|
|
|
|
}
|
|
|
|
|
2019-12-22 23:15:45 +08:00
|
|
|
if l := r.LLen(base.InProgressQueue).Val(); l != 0 {
|
|
|
|
t.Errorf("%q has %d tasks, want 0", base.InProgressQueue, l)
|
2019-11-30 04:48:54 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestProcessorRetry(t *testing.T) {
|
|
|
|
r := setup(t)
|
2019-12-04 13:01:26 +08:00
|
|
|
rdbClient := rdb.NewRDB(r)
|
2019-11-30 04:48:54 +08:00
|
|
|
|
2019-12-30 01:41:00 +08:00
|
|
|
m1 := h.NewTaskMessage("send_email", nil)
|
2019-11-30 04:48:54 +08:00
|
|
|
m1.Retried = m1.Retry // m1 has reached its max retry count
|
2019-12-30 01:41:00 +08:00
|
|
|
m2 := h.NewTaskMessage("gen_thumbnail", nil)
|
|
|
|
m3 := h.NewTaskMessage("reindex", nil)
|
|
|
|
m4 := h.NewTaskMessage("sync", nil)
|
2019-11-30 04:48:54 +08:00
|
|
|
|
|
|
|
errMsg := "something went wrong"
|
|
|
|
// r* is m* after retry
|
|
|
|
r1 := *m1
|
|
|
|
r1.ErrorMsg = errMsg
|
|
|
|
r2 := *m2
|
|
|
|
r2.ErrorMsg = errMsg
|
|
|
|
r2.Retried = m2.Retried + 1
|
|
|
|
r3 := *m3
|
|
|
|
r3.ErrorMsg = errMsg
|
|
|
|
r3.Retried = m3.Retried + 1
|
|
|
|
r4 := *m4
|
|
|
|
r4.ErrorMsg = errMsg
|
|
|
|
r4.Retried = m4.Retried + 1
|
|
|
|
|
2019-12-30 09:43:19 +08:00
|
|
|
now := time.Now()
|
|
|
|
|
2019-11-30 04:48:54 +08:00
|
|
|
tests := []struct {
|
2020-03-01 08:27:59 +08:00
|
|
|
enqueued []*base.TaskMessage // initial default queue state
|
|
|
|
incoming []*base.TaskMessage // tasks to be enqueued during run
|
|
|
|
delay time.Duration // retry delay duration
|
|
|
|
handler Handler // task handler
|
|
|
|
wait time.Duration // wait duration between starting and stopping processor for this test case
|
|
|
|
wantRetry []h.ZSetEntry // tasks in retry queue at the end
|
|
|
|
wantDead []*base.TaskMessage // tasks in dead queue at the end
|
|
|
|
wantErrCount int // number of times error handler should be called
|
2019-11-30 04:48:54 +08:00
|
|
|
}{
|
|
|
|
{
|
2019-12-30 09:43:19 +08:00
|
|
|
enqueued: []*base.TaskMessage{m1, m2},
|
|
|
|
incoming: []*base.TaskMessage{m3, m4},
|
|
|
|
delay: time.Minute,
|
2020-03-01 08:27:59 +08:00
|
|
|
handler: HandlerFunc(func(ctx context.Context, task *Task) error {
|
|
|
|
return fmt.Errorf(errMsg)
|
|
|
|
}),
|
|
|
|
wait: time.Second,
|
2019-12-30 09:43:19 +08:00
|
|
|
wantRetry: []h.ZSetEntry{
|
2020-01-12 02:02:13 +08:00
|
|
|
{Msg: &r2, Score: float64(now.Add(time.Minute).Unix())},
|
|
|
|
{Msg: &r3, Score: float64(now.Add(time.Minute).Unix())},
|
|
|
|
{Msg: &r4, Score: float64(now.Add(time.Minute).Unix())},
|
2019-12-30 09:43:19 +08:00
|
|
|
},
|
2020-03-01 08:27:59 +08:00
|
|
|
wantDead: []*base.TaskMessage{&r1},
|
|
|
|
wantErrCount: 4,
|
2019-11-30 04:48:54 +08:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tc := range tests {
|
2020-01-07 23:03:39 +08:00
|
|
|
h.FlushDB(t, r) // clean up db before each test case.
|
2020-01-07 22:28:34 +08:00
|
|
|
h.SeedEnqueuedQueue(t, r, tc.enqueued) // initialize default queue.
|
2019-12-30 01:41:00 +08:00
|
|
|
|
2019-11-30 04:48:54 +08:00
|
|
|
// instantiate a new processor
|
2019-12-30 09:43:19 +08:00
|
|
|
delayFunc := func(n int, e error, t *Task) time.Duration {
|
|
|
|
return tc.delay
|
|
|
|
}
|
2020-03-01 08:27:59 +08:00
|
|
|
var (
|
|
|
|
mu sync.Mutex // guards n
|
|
|
|
n int // number of times error handler is called
|
|
|
|
)
|
|
|
|
errHandler := func(t *Task, err error, retried, maxRetry int) {
|
|
|
|
mu.Lock()
|
|
|
|
defer mu.Unlock()
|
|
|
|
n++
|
2019-11-30 04:48:54 +08:00
|
|
|
}
|
2020-02-18 22:57:39 +08:00
|
|
|
ps := base.NewProcessState("localhost", 1234, 10, defaultQueueConfig, false)
|
2020-02-13 09:12:09 +08:00
|
|
|
cancelations := base.NewCancelations()
|
2020-03-01 08:27:59 +08:00
|
|
|
p := newProcessor(rdbClient, ps, delayFunc, nil, cancelations, ErrorHandlerFunc(errHandler))
|
|
|
|
p.handler = tc.handler
|
2019-11-30 04:48:54 +08:00
|
|
|
|
2020-02-16 15:14:30 +08:00
|
|
|
var wg sync.WaitGroup
|
|
|
|
p.start(&wg)
|
2019-11-30 04:48:54 +08:00
|
|
|
for _, msg := range tc.incoming {
|
2019-12-04 13:01:26 +08:00
|
|
|
err := rdbClient.Enqueue(msg)
|
2019-11-30 04:48:54 +08:00
|
|
|
if err != nil {
|
|
|
|
p.terminate()
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
time.Sleep(tc.wait)
|
|
|
|
p.terminate()
|
|
|
|
|
2020-01-12 02:02:13 +08:00
|
|
|
cmpOpt := cmpopts.EquateApprox(0, float64(time.Second)) // allow up to second difference in zset score
|
2019-12-30 09:43:19 +08:00
|
|
|
gotRetry := h.GetRetryEntries(t, r)
|
2020-01-12 02:02:13 +08:00
|
|
|
if diff := cmp.Diff(tc.wantRetry, gotRetry, h.SortZSetEntryOpt, cmpOpt); diff != "" {
|
2019-12-22 23:15:45 +08:00
|
|
|
t.Errorf("mismatch found in %q after running processor; (-want, +got)\n%s", base.RetryQueue, diff)
|
2019-11-30 04:48:54 +08:00
|
|
|
}
|
|
|
|
|
2019-12-30 01:41:00 +08:00
|
|
|
gotDead := h.GetDeadMessages(t, r)
|
|
|
|
if diff := cmp.Diff(tc.wantDead, gotDead, h.SortMsgOpt); diff != "" {
|
2019-12-22 23:15:45 +08:00
|
|
|
t.Errorf("mismatch found in %q after running processor; (-want, +got)\n%s", base.DeadQueue, diff)
|
2019-11-30 04:48:54 +08:00
|
|
|
}
|
|
|
|
|
2019-12-22 23:15:45 +08:00
|
|
|
if l := r.LLen(base.InProgressQueue).Val(); l != 0 {
|
|
|
|
t.Errorf("%q has %d tasks, want 0", base.InProgressQueue, l)
|
2019-11-30 04:48:54 +08:00
|
|
|
}
|
2020-03-01 08:27:59 +08:00
|
|
|
|
|
|
|
if n != tc.wantErrCount {
|
2020-03-01 13:34:12 +08:00
|
|
|
t.Errorf("error handler was called %d times, want %d", n, tc.wantErrCount)
|
2020-03-01 08:27:59 +08:00
|
|
|
}
|
2019-11-30 04:48:54 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-07 23:03:39 +08:00
|
|
|
func TestProcessorQueues(t *testing.T) {
|
|
|
|
sortOpt := cmp.Transformer("SortStrings", func(in []string) []string {
|
|
|
|
out := append([]string(nil), in...) // Copy input to avoid mutating it
|
|
|
|
sort.Strings(out)
|
|
|
|
return out
|
|
|
|
})
|
|
|
|
|
|
|
|
tests := []struct {
|
2020-02-13 14:23:25 +08:00
|
|
|
queueCfg map[string]int
|
2020-01-07 23:03:39 +08:00
|
|
|
want []string
|
|
|
|
}{
|
|
|
|
{
|
2020-02-13 14:23:25 +08:00
|
|
|
queueCfg: map[string]int{
|
2020-01-07 23:03:39 +08:00
|
|
|
"high": 6,
|
|
|
|
"default": 3,
|
|
|
|
"low": 1,
|
|
|
|
},
|
|
|
|
want: []string{"high", "default", "low"},
|
|
|
|
},
|
|
|
|
{
|
2020-02-13 14:23:25 +08:00
|
|
|
queueCfg: map[string]int{
|
2020-01-07 23:03:39 +08:00
|
|
|
"default": 1,
|
|
|
|
},
|
|
|
|
want: []string{"default"},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tc := range tests {
|
2020-02-13 09:12:09 +08:00
|
|
|
cancelations := base.NewCancelations()
|
2020-02-18 22:57:39 +08:00
|
|
|
ps := base.NewProcessState("localhost", 1234, 10, tc.queueCfg, false)
|
2020-03-01 08:27:59 +08:00
|
|
|
p := newProcessor(nil, ps, defaultDelayFunc, nil, cancelations, nil)
|
2020-01-07 23:03:39 +08:00
|
|
|
got := p.queues()
|
|
|
|
if diff := cmp.Diff(tc.want, got, sortOpt); diff != "" {
|
|
|
|
t.Errorf("with queue config: %v\n(*processor).queues() = %v, want %v\n(-want,+got):\n%s",
|
|
|
|
tc.queueCfg, got, tc.want, diff)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-12 23:46:51 +08:00
|
|
|
func TestProcessorWithStrictPriority(t *testing.T) {
|
|
|
|
r := setup(t)
|
|
|
|
rdbClient := rdb.NewRDB(r)
|
|
|
|
|
|
|
|
m1 := h.NewTaskMessage("send_email", nil)
|
|
|
|
m2 := h.NewTaskMessage("send_email", nil)
|
|
|
|
m3 := h.NewTaskMessage("send_email", nil)
|
|
|
|
m4 := h.NewTaskMessage("gen_thumbnail", nil)
|
|
|
|
m5 := h.NewTaskMessage("gen_thumbnail", nil)
|
|
|
|
m6 := h.NewTaskMessage("sync", nil)
|
|
|
|
m7 := h.NewTaskMessage("sync", nil)
|
|
|
|
|
|
|
|
t1 := NewTask(m1.Type, m1.Payload)
|
|
|
|
t2 := NewTask(m2.Type, m2.Payload)
|
|
|
|
t3 := NewTask(m3.Type, m3.Payload)
|
|
|
|
t4 := NewTask(m4.Type, m4.Payload)
|
|
|
|
t5 := NewTask(m5.Type, m5.Payload)
|
|
|
|
t6 := NewTask(m6.Type, m6.Payload)
|
|
|
|
t7 := NewTask(m7.Type, m7.Payload)
|
|
|
|
|
|
|
|
tests := []struct {
|
|
|
|
enqueued map[string][]*base.TaskMessage // initial queues state
|
|
|
|
wait time.Duration // wait duration between starting and stopping processor for this test case
|
|
|
|
wantProcessed []*Task // tasks to be processed at the end
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
enqueued: map[string][]*base.TaskMessage{
|
|
|
|
base.DefaultQueueName: {m4, m5},
|
|
|
|
"critical": {m1, m2, m3},
|
|
|
|
"low": {m6, m7},
|
|
|
|
},
|
|
|
|
wait: time.Second,
|
|
|
|
wantProcessed: []*Task{t1, t2, t3, t4, t5, t6, t7},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tc := range tests {
|
|
|
|
h.FlushDB(t, r) // clean up db before each test case.
|
|
|
|
for qname, msgs := range tc.enqueued {
|
|
|
|
h.SeedEnqueuedQueue(t, r, msgs, qname)
|
|
|
|
}
|
|
|
|
|
|
|
|
// instantiate a new processor
|
|
|
|
var mu sync.Mutex
|
|
|
|
var processed []*Task
|
2020-02-11 23:06:52 +08:00
|
|
|
handler := func(ctx context.Context, task *Task) error {
|
2020-01-12 23:46:51 +08:00
|
|
|
mu.Lock()
|
|
|
|
defer mu.Unlock()
|
|
|
|
processed = append(processed, task)
|
|
|
|
return nil
|
|
|
|
}
|
2020-02-13 14:23:25 +08:00
|
|
|
queueCfg := map[string]int{
|
2020-01-12 23:46:51 +08:00
|
|
|
"critical": 3,
|
|
|
|
base.DefaultQueueName: 2,
|
|
|
|
"low": 1,
|
|
|
|
}
|
|
|
|
// Note: Set concurrency to 1 to make sure tasks are processed one at a time.
|
2020-02-13 09:12:09 +08:00
|
|
|
cancelations := base.NewCancelations()
|
2020-02-18 22:57:39 +08:00
|
|
|
ps := base.NewProcessState("localhost", 1234, 1 /* concurrency */, queueCfg, true /*strict*/)
|
2020-03-01 08:27:59 +08:00
|
|
|
p := newProcessor(rdbClient, ps, defaultDelayFunc, nil, cancelations, nil)
|
2020-01-12 23:46:51 +08:00
|
|
|
p.handler = HandlerFunc(handler)
|
|
|
|
|
2020-02-16 15:14:30 +08:00
|
|
|
var wg sync.WaitGroup
|
|
|
|
p.start(&wg)
|
2020-01-12 23:46:51 +08:00
|
|
|
time.Sleep(tc.wait)
|
|
|
|
p.terminate()
|
|
|
|
|
|
|
|
if diff := cmp.Diff(tc.wantProcessed, processed, cmp.AllowUnexported(Payload{})); diff != "" {
|
|
|
|
t.Errorf("mismatch found in processed tasks; (-want, +got)\n%s", diff)
|
|
|
|
}
|
|
|
|
|
|
|
|
if l := r.LLen(base.InProgressQueue).Val(); l != 0 {
|
|
|
|
t.Errorf("%q has %d tasks, want 0", base.InProgressQueue, l)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-28 06:03:04 +08:00
|
|
|
func TestPerform(t *testing.T) {
|
|
|
|
tests := []struct {
|
|
|
|
desc string
|
2019-12-03 12:42:21 +08:00
|
|
|
handler HandlerFunc
|
2019-11-28 06:03:04 +08:00
|
|
|
task *Task
|
|
|
|
wantErr bool
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
desc: "handler returns nil",
|
2020-02-11 23:06:52 +08:00
|
|
|
handler: func(ctx context.Context, t *Task) error {
|
2019-11-28 06:03:04 +08:00
|
|
|
return nil
|
|
|
|
},
|
2020-01-05 05:13:46 +08:00
|
|
|
task: NewTask("gen_thumbnail", map[string]interface{}{"src": "some/img/path"}),
|
2019-11-28 06:03:04 +08:00
|
|
|
wantErr: false,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
desc: "handler returns error",
|
2020-02-11 23:06:52 +08:00
|
|
|
handler: func(ctx context.Context, t *Task) error {
|
2019-11-28 06:03:04 +08:00
|
|
|
return fmt.Errorf("something went wrong")
|
|
|
|
},
|
2020-01-05 05:13:46 +08:00
|
|
|
task: NewTask("gen_thumbnail", map[string]interface{}{"src": "some/img/path"}),
|
2019-11-28 06:03:04 +08:00
|
|
|
wantErr: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
desc: "handler panics",
|
2020-02-11 23:06:52 +08:00
|
|
|
handler: func(ctx context.Context, t *Task) error {
|
2019-11-28 06:03:04 +08:00
|
|
|
panic("something went terribly wrong")
|
|
|
|
},
|
2020-01-05 05:13:46 +08:00
|
|
|
task: NewTask("gen_thumbnail", map[string]interface{}{"src": "some/img/path"}),
|
2019-11-28 06:03:04 +08:00
|
|
|
wantErr: true,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tc := range tests {
|
2020-02-11 23:06:52 +08:00
|
|
|
got := perform(context.Background(), tc.task, tc.handler)
|
2019-11-28 06:03:04 +08:00
|
|
|
if !tc.wantErr && got != nil {
|
|
|
|
t.Errorf("%s: perform() = %v, want nil", tc.desc, got)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if tc.wantErr && got == nil {
|
|
|
|
t.Errorf("%s: perform() = nil, want non-nil error", tc.desc)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|