2
0
mirror of https://github.com/hibiken/asynq.git synced 2024-12-25 23:32:17 +08:00
asynq/processor_test.go

365 lines
10 KiB
Go
Raw Normal View History

2020-01-03 10:13:16 +08:00
// Copyright 2020 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
2019-11-28 06:03:04 +08:00
package asynq
import (
"context"
2019-11-28 06:03:04 +08:00
"fmt"
"sort"
2019-11-30 04:48:54 +08:00
"sync"
2019-11-28 06:03:04 +08:00
"testing"
2019-11-30 04:48:54 +08:00
"time"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
h "github.com/hibiken/asynq/internal/asynqtest"
2019-12-22 23:15:45 +08:00
"github.com/hibiken/asynq/internal/base"
2019-12-04 13:01:26 +08:00
"github.com/hibiken/asynq/internal/rdb"
2019-11-28 06:03:04 +08:00
)
2019-11-30 04:48:54 +08:00
func TestProcessorSuccess(t *testing.T) {
r := setup(t)
2019-12-04 13:01:26 +08:00
rdbClient := rdb.NewRDB(r)
2019-11-30 04:48:54 +08:00
m1 := h.NewTaskMessage("send_email", nil)
m2 := h.NewTaskMessage("gen_thumbnail", nil)
m3 := h.NewTaskMessage("reindex", nil)
m4 := h.NewTaskMessage("sync", nil)
2019-11-30 04:48:54 +08:00
t1 := NewTask(m1.Type, m1.Payload)
t2 := NewTask(m2.Type, m2.Payload)
t3 := NewTask(m3.Type, m3.Payload)
t4 := NewTask(m4.Type, m4.Payload)
2019-11-30 04:48:54 +08:00
tests := []struct {
enqueued []*base.TaskMessage // initial default queue state
2019-12-22 23:15:45 +08:00
incoming []*base.TaskMessage // tasks to be enqueued during run
wait time.Duration // wait duration between starting and stopping processor for this test case
wantProcessed []*Task // tasks to be processed at the end
2019-11-30 04:48:54 +08:00
}{
{
enqueued: []*base.TaskMessage{m1},
2019-12-22 23:15:45 +08:00
incoming: []*base.TaskMessage{m2, m3, m4},
2019-11-30 04:48:54 +08:00
wait: time.Second,
wantProcessed: []*Task{t1, t2, t3, t4},
},
{
enqueued: []*base.TaskMessage{},
2019-12-22 23:15:45 +08:00
incoming: []*base.TaskMessage{m1},
2019-11-30 04:48:54 +08:00
wait: time.Second,
wantProcessed: []*Task{t1},
},
}
for _, tc := range tests {
h.FlushDB(t, r) // clean up db before each test case.
h.SeedEnqueuedQueue(t, r, tc.enqueued) // initialize default queue.
2019-11-30 04:48:54 +08:00
// instantiate a new processor
var mu sync.Mutex
var processed []*Task
handler := func(ctx context.Context, task *Task) error {
2019-11-30 04:48:54 +08:00
mu.Lock()
defer mu.Unlock()
processed = append(processed, task)
return nil
}
workerCh := make(chan int)
go fakeHeartbeater(workerCh)
2020-02-18 22:57:39 +08:00
ps := base.NewProcessState("localhost", 1234, 10, defaultQueueConfig, false)
cancelations := base.NewCancelations()
2020-02-18 22:57:39 +08:00
p := newProcessor(rdbClient, ps, defaultDelayFunc, nil, cancelations)
p.handler = HandlerFunc(handler)
2019-11-30 04:48:54 +08:00
2020-02-16 15:14:30 +08:00
var wg sync.WaitGroup
p.start(&wg)
2019-11-30 04:48:54 +08:00
for _, msg := range tc.incoming {
2019-12-04 13:01:26 +08:00
err := rdbClient.Enqueue(msg)
2019-11-30 04:48:54 +08:00
if err != nil {
p.terminate()
t.Fatal(err)
}
}
time.Sleep(tc.wait)
p.terminate()
close(workerCh)
2019-11-30 04:48:54 +08:00
if diff := cmp.Diff(tc.wantProcessed, processed, sortTaskOpt, cmp.AllowUnexported(Payload{})); diff != "" {
2019-11-30 04:48:54 +08:00
t.Errorf("mismatch found in processed tasks; (-want, +got)\n%s", diff)
}
2019-12-22 23:15:45 +08:00
if l := r.LLen(base.InProgressQueue).Val(); l != 0 {
t.Errorf("%q has %d tasks, want 0", base.InProgressQueue, l)
2019-11-30 04:48:54 +08:00
}
}
}
func TestProcessorRetry(t *testing.T) {
r := setup(t)
2019-12-04 13:01:26 +08:00
rdbClient := rdb.NewRDB(r)
2019-11-30 04:48:54 +08:00
m1 := h.NewTaskMessage("send_email", nil)
2019-11-30 04:48:54 +08:00
m1.Retried = m1.Retry // m1 has reached its max retry count
m2 := h.NewTaskMessage("gen_thumbnail", nil)
m3 := h.NewTaskMessage("reindex", nil)
m4 := h.NewTaskMessage("sync", nil)
2019-11-30 04:48:54 +08:00
errMsg := "something went wrong"
// r* is m* after retry
r1 := *m1
r1.ErrorMsg = errMsg
r2 := *m2
r2.ErrorMsg = errMsg
r2.Retried = m2.Retried + 1
r3 := *m3
r3.ErrorMsg = errMsg
r3.Retried = m3.Retried + 1
r4 := *m4
r4.ErrorMsg = errMsg
r4.Retried = m4.Retried + 1
now := time.Now()
2019-11-30 04:48:54 +08:00
tests := []struct {
enqueued []*base.TaskMessage // initial default queue state
2019-12-22 23:15:45 +08:00
incoming []*base.TaskMessage // tasks to be enqueued during run
delay time.Duration // retry delay duration
2019-12-22 23:15:45 +08:00
wait time.Duration // wait duration between starting and stopping processor for this test case
wantRetry []h.ZSetEntry // tasks in retry queue at the end
2019-12-22 23:15:45 +08:00
wantDead []*base.TaskMessage // tasks in dead queue at the end
2019-11-30 04:48:54 +08:00
}{
{
enqueued: []*base.TaskMessage{m1, m2},
incoming: []*base.TaskMessage{m3, m4},
delay: time.Minute,
wait: time.Second,
wantRetry: []h.ZSetEntry{
{Msg: &r2, Score: float64(now.Add(time.Minute).Unix())},
{Msg: &r3, Score: float64(now.Add(time.Minute).Unix())},
{Msg: &r4, Score: float64(now.Add(time.Minute).Unix())},
},
wantDead: []*base.TaskMessage{&r1},
2019-11-30 04:48:54 +08:00
},
}
for _, tc := range tests {
h.FlushDB(t, r) // clean up db before each test case.
h.SeedEnqueuedQueue(t, r, tc.enqueued) // initialize default queue.
2019-11-30 04:48:54 +08:00
// instantiate a new processor
delayFunc := func(n int, e error, t *Task) time.Duration {
return tc.delay
}
handler := func(ctx context.Context, task *Task) error {
2019-11-30 04:48:54 +08:00
return fmt.Errorf(errMsg)
}
workerCh := make(chan int)
go fakeHeartbeater(workerCh)
2020-02-18 22:57:39 +08:00
ps := base.NewProcessState("localhost", 1234, 10, defaultQueueConfig, false)
cancelations := base.NewCancelations()
2020-02-18 22:57:39 +08:00
p := newProcessor(rdbClient, ps, delayFunc, nil, cancelations)
p.handler = HandlerFunc(handler)
2019-11-30 04:48:54 +08:00
2020-02-16 15:14:30 +08:00
var wg sync.WaitGroup
p.start(&wg)
2019-11-30 04:48:54 +08:00
for _, msg := range tc.incoming {
2019-12-04 13:01:26 +08:00
err := rdbClient.Enqueue(msg)
2019-11-30 04:48:54 +08:00
if err != nil {
p.terminate()
t.Fatal(err)
}
}
time.Sleep(tc.wait)
p.terminate()
close(workerCh)
2019-11-30 04:48:54 +08:00
cmpOpt := cmpopts.EquateApprox(0, float64(time.Second)) // allow up to second difference in zset score
gotRetry := h.GetRetryEntries(t, r)
if diff := cmp.Diff(tc.wantRetry, gotRetry, h.SortZSetEntryOpt, cmpOpt); diff != "" {
2019-12-22 23:15:45 +08:00
t.Errorf("mismatch found in %q after running processor; (-want, +got)\n%s", base.RetryQueue, diff)
2019-11-30 04:48:54 +08:00
}
gotDead := h.GetDeadMessages(t, r)
if diff := cmp.Diff(tc.wantDead, gotDead, h.SortMsgOpt); diff != "" {
2019-12-22 23:15:45 +08:00
t.Errorf("mismatch found in %q after running processor; (-want, +got)\n%s", base.DeadQueue, diff)
2019-11-30 04:48:54 +08:00
}
2019-12-22 23:15:45 +08:00
if l := r.LLen(base.InProgressQueue).Val(); l != 0 {
t.Errorf("%q has %d tasks, want 0", base.InProgressQueue, l)
2019-11-30 04:48:54 +08:00
}
}
}
func TestProcessorQueues(t *testing.T) {
sortOpt := cmp.Transformer("SortStrings", func(in []string) []string {
out := append([]string(nil), in...) // Copy input to avoid mutating it
sort.Strings(out)
return out
})
tests := []struct {
queueCfg map[string]int
want []string
}{
{
queueCfg: map[string]int{
"high": 6,
"default": 3,
"low": 1,
},
want: []string{"high", "default", "low"},
},
{
queueCfg: map[string]int{
"default": 1,
},
want: []string{"default"},
},
}
for _, tc := range tests {
cancelations := base.NewCancelations()
2020-02-18 22:57:39 +08:00
ps := base.NewProcessState("localhost", 1234, 10, tc.queueCfg, false)
p := newProcessor(nil, ps, defaultDelayFunc, nil, cancelations)
got := p.queues()
if diff := cmp.Diff(tc.want, got, sortOpt); diff != "" {
t.Errorf("with queue config: %v\n(*processor).queues() = %v, want %v\n(-want,+got):\n%s",
tc.queueCfg, got, tc.want, diff)
}
}
}
2020-01-12 23:46:51 +08:00
func TestProcessorWithStrictPriority(t *testing.T) {
r := setup(t)
rdbClient := rdb.NewRDB(r)
m1 := h.NewTaskMessage("send_email", nil)
m2 := h.NewTaskMessage("send_email", nil)
m3 := h.NewTaskMessage("send_email", nil)
m4 := h.NewTaskMessage("gen_thumbnail", nil)
m5 := h.NewTaskMessage("gen_thumbnail", nil)
m6 := h.NewTaskMessage("sync", nil)
m7 := h.NewTaskMessage("sync", nil)
t1 := NewTask(m1.Type, m1.Payload)
t2 := NewTask(m2.Type, m2.Payload)
t3 := NewTask(m3.Type, m3.Payload)
t4 := NewTask(m4.Type, m4.Payload)
t5 := NewTask(m5.Type, m5.Payload)
t6 := NewTask(m6.Type, m6.Payload)
t7 := NewTask(m7.Type, m7.Payload)
tests := []struct {
enqueued map[string][]*base.TaskMessage // initial queues state
wait time.Duration // wait duration between starting and stopping processor for this test case
wantProcessed []*Task // tasks to be processed at the end
}{
{
enqueued: map[string][]*base.TaskMessage{
base.DefaultQueueName: {m4, m5},
"critical": {m1, m2, m3},
"low": {m6, m7},
},
wait: time.Second,
wantProcessed: []*Task{t1, t2, t3, t4, t5, t6, t7},
},
}
for _, tc := range tests {
h.FlushDB(t, r) // clean up db before each test case.
for qname, msgs := range tc.enqueued {
h.SeedEnqueuedQueue(t, r, msgs, qname)
}
// instantiate a new processor
var mu sync.Mutex
var processed []*Task
handler := func(ctx context.Context, task *Task) error {
2020-01-12 23:46:51 +08:00
mu.Lock()
defer mu.Unlock()
processed = append(processed, task)
return nil
}
queueCfg := map[string]int{
2020-01-12 23:46:51 +08:00
"critical": 3,
base.DefaultQueueName: 2,
"low": 1,
}
// Note: Set concurrency to 1 to make sure tasks are processed one at a time.
workerCh := make(chan int)
go fakeHeartbeater(workerCh)
cancelations := base.NewCancelations()
2020-02-18 22:57:39 +08:00
ps := base.NewProcessState("localhost", 1234, 1 /* concurrency */, queueCfg, true /*strict*/)
p := newProcessor(rdbClient, ps, defaultDelayFunc, nil, cancelations)
2020-01-12 23:46:51 +08:00
p.handler = HandlerFunc(handler)
2020-02-16 15:14:30 +08:00
var wg sync.WaitGroup
p.start(&wg)
2020-01-12 23:46:51 +08:00
time.Sleep(tc.wait)
p.terminate()
close(workerCh)
2020-01-12 23:46:51 +08:00
if diff := cmp.Diff(tc.wantProcessed, processed, cmp.AllowUnexported(Payload{})); diff != "" {
t.Errorf("mismatch found in processed tasks; (-want, +got)\n%s", diff)
}
if l := r.LLen(base.InProgressQueue).Val(); l != 0 {
t.Errorf("%q has %d tasks, want 0", base.InProgressQueue, l)
}
}
}
2019-11-28 06:03:04 +08:00
func TestPerform(t *testing.T) {
tests := []struct {
desc string
handler HandlerFunc
2019-11-28 06:03:04 +08:00
task *Task
wantErr bool
}{
{
desc: "handler returns nil",
handler: func(ctx context.Context, t *Task) error {
2019-11-28 06:03:04 +08:00
return nil
},
task: NewTask("gen_thumbnail", map[string]interface{}{"src": "some/img/path"}),
2019-11-28 06:03:04 +08:00
wantErr: false,
},
{
desc: "handler returns error",
handler: func(ctx context.Context, t *Task) error {
2019-11-28 06:03:04 +08:00
return fmt.Errorf("something went wrong")
},
task: NewTask("gen_thumbnail", map[string]interface{}{"src": "some/img/path"}),
2019-11-28 06:03:04 +08:00
wantErr: true,
},
{
desc: "handler panics",
handler: func(ctx context.Context, t *Task) error {
2019-11-28 06:03:04 +08:00
panic("something went terribly wrong")
},
task: NewTask("gen_thumbnail", map[string]interface{}{"src": "some/img/path"}),
2019-11-28 06:03:04 +08:00
wantErr: true,
},
}
for _, tc := range tests {
got := perform(context.Background(), tc.task, tc.handler)
2019-11-28 06:03:04 +08:00
if !tc.wantErr && got != nil {
t.Errorf("%s: perform() = %v, want nil", tc.desc, got)
continue
}
if tc.wantErr && got == nil {
t.Errorf("%s: perform() = nil, want non-nil error", tc.desc)
continue
}
}
}
// fake heartbeater to receive sends from the worker channel.
func fakeHeartbeater(ch <-chan int) {
for range ch {
}
}