2
0
mirror of https://github.com/hibiken/asynq.git synced 2024-11-10 11:31:58 +08:00

Use asynqtest helpers in asynq package tests

This commit is contained in:
Ken Hibino 2019-12-29 09:41:00 -08:00
parent 807729d36a
commit ae0c2f9ca5
5 changed files with 72 additions and 212 deletions

View File

@ -1,32 +1,17 @@
package asynq package asynq
import ( import (
"encoding/json"
"math/rand"
"sort" "sort"
"testing" "testing"
"time"
"github.com/go-redis/redis/v7" "github.com/go-redis/redis/v7"
"github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts" h "github.com/hibiken/asynq/internal/asynqtest"
"github.com/hibiken/asynq/internal/base"
"github.com/rs/xid"
) )
// This file defines test helper functions used by // This file defines test helper functions used by
// other test files. // other test files.
func init() {
rand.Seed(time.Now().UnixNano())
}
// scheduledEntry represents an item in redis sorted set (aka ZSET).
type sortedSetEntry struct {
msg *base.TaskMessage
score int64
}
func setup(t *testing.T) *redis.Client { func setup(t *testing.T) *redis.Client {
t.Helper() t.Helper()
r := redis.NewClient(&redis.Options{ r := redis.NewClient(&redis.Options{
@ -34,9 +19,7 @@ func setup(t *testing.T) *redis.Client {
DB: 14, DB: 14,
}) })
// Start each test with a clean slate. // Start each test with a clean slate.
if err := r.FlushDB().Err(); err != nil { h.FlushDB(t, r)
panic(err)
}
return r return r
} }
@ -47,68 +30,3 @@ var sortTaskOpt = cmp.Transformer("SortMsg", func(in []*Task) []*Task {
}) })
return out return out
}) })
var sortMsgOpt = cmp.Transformer("SortMsg", func(in []*base.TaskMessage) []*base.TaskMessage {
out := append([]*base.TaskMessage(nil), in...) // Copy input to avoid mutating it
sort.Slice(out, func(i, j int) bool {
return out[i].ID.String() < out[j].ID.String()
})
return out
})
var sortZSetEntryOpt = cmp.Transformer("SortZSetEntry", func(in []sortedSetEntry) []sortedSetEntry {
out := append([]sortedSetEntry(nil), in...) // Copy input to avoid mutating it
sort.Slice(out, func(i, j int) bool {
return out[i].msg.ID.String() < out[j].msg.ID.String()
})
return out
})
var ignoreIDOpt = cmpopts.IgnoreFields(base.TaskMessage{}, "ID")
func randomTask(taskType, qname string, payload map[string]interface{}) *base.TaskMessage {
return &base.TaskMessage{
ID: xid.New(),
Type: taskType,
Queue: qname,
Retry: defaultMaxRetry,
Payload: make(map[string]interface{}),
}
}
func mustMarshal(t *testing.T, task *base.TaskMessage) string {
t.Helper()
data, err := json.Marshal(task)
if err != nil {
t.Fatal(err)
}
return string(data)
}
func mustUnmarshal(t *testing.T, data string) *base.TaskMessage {
t.Helper()
var task base.TaskMessage
err := json.Unmarshal([]byte(data), &task)
if err != nil {
t.Fatal(err)
}
return &task
}
func mustMarshalSlice(t *testing.T, tasks []*base.TaskMessage) []string {
t.Helper()
var data []string
for _, task := range tasks {
data = append(data, mustMarshal(t, task))
}
return data
}
func mustUnmarshalSlice(t *testing.T, data []string) []*base.TaskMessage {
t.Helper()
var tasks []*base.TaskMessage
for _, s := range data {
tasks = append(tasks, mustUnmarshal(t, s))
}
return tasks
}

View File

@ -5,6 +5,7 @@ import (
"time" "time"
"github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp"
h "github.com/hibiken/asynq/internal/asynqtest"
"github.com/hibiken/asynq/internal/base" "github.com/hibiken/asynq/internal/base"
"github.com/hibiken/asynq/internal/rdb" "github.com/hibiken/asynq/internal/rdb"
) )
@ -21,7 +22,7 @@ func TestClient(t *testing.T) {
processAt time.Time processAt time.Time
opts []Option opts []Option
wantEnqueued []*base.TaskMessage wantEnqueued []*base.TaskMessage
wantScheduled []sortedSetEntry wantScheduled []h.ZSetEntry
}{ }{
{ {
desc: "Process task immediately", desc: "Process task immediately",
@ -44,15 +45,15 @@ func TestClient(t *testing.T) {
processAt: time.Now().Add(2 * time.Hour), processAt: time.Now().Add(2 * time.Hour),
opts: []Option{}, opts: []Option{},
wantEnqueued: nil, // db is flushed in setup so list does not exist hence nil wantEnqueued: nil, // db is flushed in setup so list does not exist hence nil
wantScheduled: []sortedSetEntry{ wantScheduled: []h.ZSetEntry{
{ {
msg: &base.TaskMessage{ Msg: &base.TaskMessage{
Type: task.Type, Type: task.Type,
Payload: task.Payload, Payload: task.Payload,
Retry: defaultMaxRetry, Retry: defaultMaxRetry,
Queue: "default", Queue: "default",
}, },
score: time.Now().Add(2 * time.Hour).Unix(), Score: time.Now().Add(2 * time.Hour).Unix(),
}, },
}, },
}, },
@ -111,10 +112,7 @@ func TestClient(t *testing.T) {
} }
for _, tc := range tests { for _, tc := range tests {
// clean up db before each test case. h.FlushDB(t, r) // clean up db before each test case.
if err := r.FlushDB().Err(); err != nil {
t.Fatal(err)
}
err := client.Process(tc.task, tc.processAt, tc.opts...) err := client.Process(tc.task, tc.processAt, tc.opts...)
if err != nil { if err != nil {
@ -122,23 +120,13 @@ func TestClient(t *testing.T) {
continue continue
} }
gotEnqueuedRaw := r.LRange(base.DefaultQueue, 0, -1).Val() gotEnqueued := h.GetEnqueuedMessages(t, r)
gotEnqueued := mustUnmarshalSlice(t, gotEnqueuedRaw) if diff := cmp.Diff(tc.wantEnqueued, gotEnqueued, h.IgnoreIDOpt); diff != "" {
if diff := cmp.Diff(tc.wantEnqueued, gotEnqueued, ignoreIDOpt); diff != "" {
t.Errorf("%s;\nmismatch found in %q; (-want,+got)\n%s", tc.desc, base.DefaultQueue, diff) t.Errorf("%s;\nmismatch found in %q; (-want,+got)\n%s", tc.desc, base.DefaultQueue, diff)
} }
gotScheduledRaw := r.ZRangeWithScores(base.ScheduledQueue, 0, -1).Val() gotScheduled := h.GetScheduledEntries(t, r)
var gotScheduled []sortedSetEntry if diff := cmp.Diff(tc.wantScheduled, gotScheduled, h.IgnoreIDOpt); diff != "" {
for _, z := range gotScheduledRaw {
gotScheduled = append(gotScheduled, sortedSetEntry{
msg: mustUnmarshal(t, z.Member.(string)),
score: int64(z.Score),
})
}
cmpOpt := cmp.AllowUnexported(sortedSetEntry{})
if diff := cmp.Diff(tc.wantScheduled, gotScheduled, cmpOpt, ignoreIDOpt); diff != "" {
t.Errorf("%s;\nmismatch found in %q; (-want,+got)\n%s", tc.desc, base.ScheduledQueue, diff) t.Errorf("%s;\nmismatch found in %q; (-want,+got)\n%s", tc.desc, base.ScheduledQueue, diff)
} }
} }

View File

@ -8,6 +8,7 @@ import (
"github.com/go-redis/redis/v7" "github.com/go-redis/redis/v7"
"github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/hibiken/asynq/internal/base" "github.com/hibiken/asynq/internal/base"
"github.com/rs/xid" "github.com/rs/xid"
) )
@ -36,6 +37,9 @@ var SortZSetEntryOpt = cmp.Transformer("SortZSetEntries", func(in []ZSetEntry) [
return out return out
}) })
// IgnoreIDOpt is an cmp.Option to ignore ID field in task messages when comparing.
var IgnoreIDOpt = cmpopts.IgnoreFields(base.TaskMessage{}, "ID")
// NewTaskMessage returns a new instance of TaskMessage given a task type and payload. // NewTaskMessage returns a new instance of TaskMessage given a task type and payload.
func NewTaskMessage(taskType string, payload map[string]interface{}) *base.TaskMessage { func NewTaskMessage(taskType string, payload map[string]interface{}) *base.TaskMessage {
return &base.TaskMessage{ return &base.TaskMessage{

View File

@ -7,6 +7,7 @@ import (
"time" "time"
"github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp"
h "github.com/hibiken/asynq/internal/asynqtest"
"github.com/hibiken/asynq/internal/base" "github.com/hibiken/asynq/internal/base"
"github.com/hibiken/asynq/internal/rdb" "github.com/hibiken/asynq/internal/rdb"
) )
@ -15,10 +16,10 @@ func TestProcessorSuccess(t *testing.T) {
r := setup(t) r := setup(t)
rdbClient := rdb.NewRDB(r) rdbClient := rdb.NewRDB(r)
m1 := randomTask("send_email", "default", nil) m1 := h.NewTaskMessage("send_email", nil)
m2 := randomTask("gen_thumbnail", "default", nil) m2 := h.NewTaskMessage("gen_thumbnail", nil)
m3 := randomTask("reindex", "default", nil) m3 := h.NewTaskMessage("reindex", nil)
m4 := randomTask("sync", "default", nil) m4 := h.NewTaskMessage("sync", nil)
t1 := &Task{Type: m1.Type, Payload: m1.Payload} t1 := &Task{Type: m1.Type, Payload: m1.Payload}
t2 := &Task{Type: m2.Type, Payload: m2.Payload} t2 := &Task{Type: m2.Type, Payload: m2.Payload}
@ -26,19 +27,19 @@ func TestProcessorSuccess(t *testing.T) {
t4 := &Task{Type: m4.Type, Payload: m4.Payload} t4 := &Task{Type: m4.Type, Payload: m4.Payload}
tests := []struct { tests := []struct {
initQueue []*base.TaskMessage // initial default queue state enqueued []*base.TaskMessage // initial default queue state
incoming []*base.TaskMessage // tasks to be enqueued during run incoming []*base.TaskMessage // tasks to be enqueued during run
wait time.Duration // wait duration between starting and stopping processor for this test case wait time.Duration // wait duration between starting and stopping processor for this test case
wantProcessed []*Task // tasks to be processed at the end wantProcessed []*Task // tasks to be processed at the end
}{ }{
{ {
initQueue: []*base.TaskMessage{m1}, enqueued: []*base.TaskMessage{m1},
incoming: []*base.TaskMessage{m2, m3, m4}, incoming: []*base.TaskMessage{m2, m3, m4},
wait: time.Second, wait: time.Second,
wantProcessed: []*Task{t1, t2, t3, t4}, wantProcessed: []*Task{t1, t2, t3, t4},
}, },
{ {
initQueue: []*base.TaskMessage{}, enqueued: []*base.TaskMessage{},
incoming: []*base.TaskMessage{m1}, incoming: []*base.TaskMessage{m1},
wait: time.Second, wait: time.Second,
wantProcessed: []*Task{t1}, wantProcessed: []*Task{t1},
@ -46,32 +47,22 @@ func TestProcessorSuccess(t *testing.T) {
} }
for _, tc := range tests { for _, tc := range tests {
// clean up db before each test case. h.FlushDB(t, r) // clean up db before each test case.
if err := r.FlushDB().Err(); err != nil { h.SeedDefaultQueue(t, r, tc.enqueued) // initialize default queue.
t.Fatal(err)
}
// instantiate a new processor // instantiate a new processor
var mu sync.Mutex var mu sync.Mutex
var processed []*Task var processed []*Task
var h HandlerFunc handler := func(task *Task) error {
h = func(task *Task) error {
mu.Lock() mu.Lock()
defer mu.Unlock() defer mu.Unlock()
processed = append(processed, task) processed = append(processed, task)
return nil return nil
} }
p := newProcessor(rdbClient, 10, h) p := newProcessor(rdbClient, 10, HandlerFunc(handler))
p.dequeueTimeout = time.Second // short time out for test purpose p.dequeueTimeout = time.Second // short time out for test purpose
// initialize default queue.
for _, msg := range tc.initQueue {
err := rdbClient.Enqueue(msg)
if err != nil {
t.Fatal(err)
}
}
p.start() p.start()
for _, msg := range tc.incoming { for _, msg := range tc.incoming {
err := rdbClient.Enqueue(msg) err := rdbClient.Enqueue(msg)
if err != nil { if err != nil {
@ -96,11 +87,11 @@ func TestProcessorRetry(t *testing.T) {
r := setup(t) r := setup(t)
rdbClient := rdb.NewRDB(r) rdbClient := rdb.NewRDB(r)
m1 := randomTask("send_email", "default", nil) m1 := h.NewTaskMessage("send_email", nil)
m1.Retried = m1.Retry // m1 has reached its max retry count m1.Retried = m1.Retry // m1 has reached its max retry count
m2 := randomTask("gen_thumbnail", "default", nil) m2 := h.NewTaskMessage("gen_thumbnail", nil)
m3 := randomTask("reindex", "default", nil) m3 := h.NewTaskMessage("reindex", nil)
m4 := randomTask("sync", "default", nil) m4 := h.NewTaskMessage("sync", nil)
errMsg := "something went wrong" errMsg := "something went wrong"
// r* is m* after retry // r* is m* after retry
@ -117,14 +108,14 @@ func TestProcessorRetry(t *testing.T) {
r4.Retried = m4.Retried + 1 r4.Retried = m4.Retried + 1
tests := []struct { tests := []struct {
initQueue []*base.TaskMessage // initial default queue state enqueued []*base.TaskMessage // initial default queue state
incoming []*base.TaskMessage // tasks to be enqueued during run incoming []*base.TaskMessage // tasks to be enqueued during run
wait time.Duration // wait duration between starting and stopping processor for this test case wait time.Duration // wait duration between starting and stopping processor for this test case
wantRetry []*base.TaskMessage // tasks in retry queue at the end wantRetry []*base.TaskMessage // tasks in retry queue at the end
wantDead []*base.TaskMessage // tasks in dead queue at the end wantDead []*base.TaskMessage // tasks in dead queue at the end
}{ }{
{ {
initQueue: []*base.TaskMessage{m1, m2}, enqueued: []*base.TaskMessage{m1, m2},
incoming: []*base.TaskMessage{m3, m4}, incoming: []*base.TaskMessage{m3, m4},
wait: time.Second, wait: time.Second,
wantRetry: []*base.TaskMessage{&r2, &r3, &r4}, wantRetry: []*base.TaskMessage{&r2, &r3, &r4},
@ -133,24 +124,15 @@ func TestProcessorRetry(t *testing.T) {
} }
for _, tc := range tests { for _, tc := range tests {
// clean up db before each test case. h.FlushDB(t, r) // clean up db before each test case.
if err := r.FlushDB().Err(); err != nil { h.SeedDefaultQueue(t, r, tc.enqueued) // initialize default queue.
t.Fatal(err)
}
// instantiate a new processor // instantiate a new processor
var h HandlerFunc handler := func(task *Task) error {
h = func(task *Task) error {
return fmt.Errorf(errMsg) return fmt.Errorf(errMsg)
} }
p := newProcessor(rdbClient, 10, h) p := newProcessor(rdbClient, 10, HandlerFunc(handler))
p.dequeueTimeout = time.Second // short time out for test purpose p.dequeueTimeout = time.Second // short time out for test purpose
// initialize default queue.
for _, msg := range tc.initQueue {
err := rdbClient.Enqueue(msg)
if err != nil {
t.Fatal(err)
}
}
p.start() p.start()
for _, msg := range tc.incoming { for _, msg := range tc.incoming {
@ -163,15 +145,13 @@ func TestProcessorRetry(t *testing.T) {
time.Sleep(tc.wait) time.Sleep(tc.wait)
p.terminate() p.terminate()
gotRetryRaw := r.ZRange(base.RetryQueue, 0, -1).Val() gotRetry := h.GetRetryMessages(t, r)
gotRetry := mustUnmarshalSlice(t, gotRetryRaw) if diff := cmp.Diff(tc.wantRetry, gotRetry, h.SortMsgOpt); diff != "" {
if diff := cmp.Diff(tc.wantRetry, gotRetry, sortMsgOpt); diff != "" {
t.Errorf("mismatch found in %q after running processor; (-want, +got)\n%s", base.RetryQueue, diff) t.Errorf("mismatch found in %q after running processor; (-want, +got)\n%s", base.RetryQueue, diff)
} }
gotDeadRaw := r.ZRange(base.DeadQueue, 0, -1).Val() gotDead := h.GetDeadMessages(t, r)
gotDead := mustUnmarshalSlice(t, gotDeadRaw) if diff := cmp.Diff(tc.wantDead, gotDead, h.SortMsgOpt); diff != "" {
if diff := cmp.Diff(tc.wantDead, gotDead, sortMsgOpt); diff != "" {
t.Errorf("mismatch found in %q after running processor; (-want, +got)\n%s", base.DeadQueue, diff) t.Errorf("mismatch found in %q after running processor; (-want, +got)\n%s", base.DeadQueue, diff)
} }

View File

@ -4,29 +4,26 @@ import (
"testing" "testing"
"time" "time"
"github.com/go-redis/redis/v7"
"github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp"
h "github.com/hibiken/asynq/internal/asynqtest"
"github.com/hibiken/asynq/internal/base" "github.com/hibiken/asynq/internal/base"
"github.com/hibiken/asynq/internal/rdb" "github.com/hibiken/asynq/internal/rdb"
) )
func TestScheduler(t *testing.T) { func TestScheduler(t *testing.T) {
type scheduledTask struct {
msg *base.TaskMessage
processAt time.Time
}
r := setup(t) r := setup(t)
rdbClient := rdb.NewRDB(r) rdbClient := rdb.NewRDB(r)
const pollInterval = time.Second const pollInterval = time.Second
s := newScheduler(rdbClient, pollInterval) s := newScheduler(rdbClient, pollInterval)
t1 := randomTask("gen_thumbnail", "default", nil) t1 := h.NewTaskMessage("gen_thumbnail", nil)
t2 := randomTask("send_email", "default", nil) t2 := h.NewTaskMessage("send_email", nil)
t3 := randomTask("reindex", "default", nil) t3 := h.NewTaskMessage("reindex", nil)
t4 := randomTask("sync", "default", nil) t4 := h.NewTaskMessage("sync", nil)
now := time.Now()
tests := []struct { tests := []struct {
initScheduled []scheduledTask // scheduled queue initial state initScheduled []h.ZSetEntry // scheduled queue initial state
initRetry []scheduledTask // retry queue initial state initRetry []h.ZSetEntry // retry queue initial state
initQueue []*base.TaskMessage // default queue initial state initQueue []*base.TaskMessage // default queue initial state
wait time.Duration // wait duration before checking for final state wait time.Duration // wait duration before checking for final state
wantScheduled []*base.TaskMessage // schedule queue final state wantScheduled []*base.TaskMessage // schedule queue final state
@ -34,12 +31,12 @@ func TestScheduler(t *testing.T) {
wantQueue []*base.TaskMessage // default queue final state wantQueue []*base.TaskMessage // default queue final state
}{ }{
{ {
initScheduled: []scheduledTask{ initScheduled: []h.ZSetEntry{
{t1, time.Now().Add(time.Hour)}, {Msg: t1, Score: now.Add(time.Hour).Unix()},
{t2, time.Now().Add(-2 * time.Second)}, {Msg: t2, Score: now.Add(-2 * time.Second).Unix()},
}, },
initRetry: []scheduledTask{ initRetry: []h.ZSetEntry{
{t3, time.Now().Add(-500 * time.Millisecond)}, {Msg: t3, Score: time.Now().Add(-500 * time.Millisecond).Unix()},
}, },
initQueue: []*base.TaskMessage{t4}, initQueue: []*base.TaskMessage{t4},
wait: pollInterval * 2, wait: pollInterval * 2,
@ -48,12 +45,12 @@ func TestScheduler(t *testing.T) {
wantQueue: []*base.TaskMessage{t2, t3, t4}, wantQueue: []*base.TaskMessage{t2, t3, t4},
}, },
{ {
initScheduled: []scheduledTask{ initScheduled: []h.ZSetEntry{
{t1, time.Now()}, {Msg: t1, Score: now.Unix()},
{t2, time.Now().Add(-2 * time.Second)}, {Msg: t2, Score: now.Add(-2 * time.Second).Unix()},
{t3, time.Now().Add(-500 * time.Millisecond)}, {Msg: t3, Score: now.Add(-500 * time.Millisecond).Unix()},
}, },
initRetry: []scheduledTask{}, initRetry: []h.ZSetEntry{},
initQueue: []*base.TaskMessage{t4}, initQueue: []*base.TaskMessage{t4},
wait: pollInterval * 2, wait: pollInterval * 2,
wantScheduled: []*base.TaskMessage{}, wantScheduled: []*base.TaskMessage{},
@ -63,54 +60,27 @@ func TestScheduler(t *testing.T) {
} }
for _, tc := range tests { for _, tc := range tests {
// clean up db before each test case. h.FlushDB(t, r) // clean up db before each test case.
if err := r.FlushDB().Err(); err != nil { h.SeedScheduledQueue(t, r, tc.initScheduled) // initialize scheduled queue
t.Fatal(err) h.SeedRetryQueue(t, r, tc.initRetry) // initialize retry queue
} h.SeedDefaultQueue(t, r, tc.initQueue) // initialize default queue
// initialize scheduled queue
for _, st := range tc.initScheduled {
err := rdbClient.Schedule(st.msg, st.processAt)
if err != nil {
t.Fatal(err)
}
}
// initialize retry queue
for _, st := range tc.initRetry {
err := r.ZAdd(base.RetryQueue, &redis.Z{
Member: mustMarshal(t, st.msg),
Score: float64(st.processAt.Unix()),
}).Err()
if err != nil {
t.Fatal(err)
}
}
// initialize default queue
for _, msg := range tc.initQueue {
err := rdbClient.Enqueue(msg)
if err != nil {
t.Fatal(err)
}
}
s.start() s.start()
time.Sleep(tc.wait) time.Sleep(tc.wait)
s.terminate() s.terminate()
gotScheduledRaw := r.ZRange(base.ScheduledQueue, 0, -1).Val() gotScheduled := h.GetScheduledMessages(t, r)
gotScheduled := mustUnmarshalSlice(t, gotScheduledRaw) if diff := cmp.Diff(tc.wantScheduled, gotScheduled, h.SortMsgOpt); diff != "" {
if diff := cmp.Diff(tc.wantScheduled, gotScheduled, sortMsgOpt); diff != "" {
t.Errorf("mismatch found in %q after running scheduler: (-want, +got)\n%s", base.ScheduledQueue, diff) t.Errorf("mismatch found in %q after running scheduler: (-want, +got)\n%s", base.ScheduledQueue, diff)
} }
gotRetryRaw := r.ZRange(base.RetryQueue, 0, -1).Val() gotRetry := h.GetRetryMessages(t, r)
gotRetry := mustUnmarshalSlice(t, gotRetryRaw) if diff := cmp.Diff(tc.wantRetry, gotRetry, h.SortMsgOpt); diff != "" {
if diff := cmp.Diff(tc.wantRetry, gotRetry, sortMsgOpt); diff != "" {
t.Errorf("mismatch found in %q after running scheduler: (-want, +got)\n%s", base.RetryQueue, diff) t.Errorf("mismatch found in %q after running scheduler: (-want, +got)\n%s", base.RetryQueue, diff)
} }
gotQueueRaw := r.LRange(base.DefaultQueue, 0, -1).Val() gotEnqueued := h.GetEnqueuedMessages(t, r)
gotQueue := mustUnmarshalSlice(t, gotQueueRaw) if diff := cmp.Diff(tc.wantQueue, gotEnqueued, h.SortMsgOpt); diff != "" {
if diff := cmp.Diff(tc.wantQueue, gotQueue, sortMsgOpt); diff != "" {
t.Errorf("mismatch found in %q after running scheduler: (-want, +got)\n%s", base.DefaultQueue, diff) t.Errorf("mismatch found in %q after running scheduler: (-want, +got)\n%s", base.DefaultQueue, diff)
} }
} }