2
0
mirror of https://github.com/hibiken/asynq.git synced 2024-12-26 15:52:18 +08:00

Merge pull request #15 from hibiken/refactor/testing

Refactor tests
This commit is contained in:
Ken Hibino 2019-12-14 13:58:33 -08:00 committed by GitHub
commit 1857d12cea
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 506 additions and 685 deletions

View File

@ -4,12 +4,11 @@ import "github.com/go-redis/redis/v7"
/* /*
TODOs: TODOs:
- [P0] asynqmon del <taskID>, asynqmon delall <qname>
- [P0] asynqmon kill <taskID>, asynqmon killall <qname> - [P0] asynqmon kill <taskID>, asynqmon killall <qname>
- [P0] Test refactor - helpers to initialize queues and read queue contents - [P0] Assigning int or any number type to Payload will be converted to float64 in handler
- [P0] Redis Memory Usage, Connection info in stats - [P0] Redis Memory Usage, Connection info in stats
- [P0] Processed, Failed count for today - [P0] Processed, Failed count for today
- [P0] Go docs + CONTRIBUTION.md + Github issue template - [P0] Go docs + CONTRIBUTION.md + Github issue template + License comment
- [P0] Redis Sentinel support - [P0] Redis Sentinel support
- [P1] Add Support for multiple queues and priority - [P1] Add Support for multiple queues and priority
- [P1] User defined max-retry count - [P1] User defined max-retry count

View File

@ -0,0 +1,147 @@
package rdb
import (
"encoding/json"
"math/rand"
"sort"
"testing"
"time"
"github.com/go-redis/redis/v7"
"github.com/google/go-cmp/cmp"
"github.com/rs/xid"
)
// This file defines test helpers for the rdb package testing.
func init() {
rand.Seed(time.Now().UnixNano())
}
// TODO(hibiken): Get Redis address and db number from ENV variables.
func setup(t *testing.T) *RDB {
t.Helper()
r := NewRDB(redis.NewClient(&redis.Options{
Addr: "localhost:6379",
DB: 13,
}))
// Start each test with a clean slate.
flushDB(t, r)
return r
}
func flushDB(t *testing.T, r *RDB) {
t.Helper()
if err := r.client.FlushDB().Err(); err != nil {
t.Fatal(err)
}
}
var sortMsgOpt = cmp.Transformer("SortMsg", func(in []*TaskMessage) []*TaskMessage {
out := append([]*TaskMessage(nil), in...) // Copy input to avoid mutating it
sort.Slice(out, func(i, j int) bool {
return out[i].ID.String() < out[j].ID.String()
})
return out
})
func newTaskMessage(taskType string, payload map[string]interface{}) *TaskMessage {
return &TaskMessage{
ID: xid.New(),
Type: taskType,
Queue: "default",
Retry: 25,
Payload: payload,
}
}
func mustMarshal(t *testing.T, msg *TaskMessage) string {
t.Helper()
data, err := json.Marshal(msg)
if err != nil {
t.Fatal(err)
}
return string(data)
}
func mustUnmarshal(t *testing.T, data string) *TaskMessage {
t.Helper()
var msg TaskMessage
err := json.Unmarshal([]byte(data), &msg)
if err != nil {
t.Fatal(err)
}
return &msg
}
func mustMarshalSlice(t *testing.T, msgs []*TaskMessage) []string {
t.Helper()
var data []string
for _, m := range msgs {
data = append(data, mustMarshal(t, m))
}
return data
}
func mustUnmarshalSlice(t *testing.T, data []string) []*TaskMessage {
t.Helper()
var msgs []*TaskMessage
for _, s := range data {
msgs = append(msgs, mustUnmarshal(t, s))
}
return msgs
}
func seedRedisList(t *testing.T, c *redis.Client, key string, msgs []*TaskMessage) {
data := mustMarshalSlice(t, msgs)
for _, s := range data {
if err := c.LPush(key, s).Err(); err != nil {
t.Fatal(err)
}
}
}
func seedRedisZSet(t *testing.T, c *redis.Client, key string, items []sortedSetEntry) {
for _, item := range items {
z := &redis.Z{Member: mustMarshal(t, item.msg), Score: float64(item.score)}
if err := c.ZAdd(key, z).Err(); err != nil {
t.Fatal(err)
}
}
}
// scheduledEntry represents an item in redis sorted set (aka ZSET).
type sortedSetEntry struct {
msg *TaskMessage
score int64
}
// seedDefaultQueue initializes the default queue with the given messages.
func seedDefaultQueue(t *testing.T, r *RDB, msgs []*TaskMessage) {
t.Helper()
seedRedisList(t, r.client, defaultQ, msgs)
}
// seedInProgressQueue initializes the in-progress queue with the given messages.
func seedInProgressQueue(t *testing.T, r *RDB, msgs []*TaskMessage) {
t.Helper()
seedRedisList(t, r.client, inProgressQ, msgs)
}
// seedScheduledQueue initializes the scheduled queue with the given messages.
func seedScheduledQueue(t *testing.T, r *RDB, entries []sortedSetEntry) {
t.Helper()
seedRedisZSet(t, r.client, scheduledQ, entries)
}
// seedRetryQueue initializes the retry queue with the given messages.
func seedRetryQueue(t *testing.T, r *RDB, entries []sortedSetEntry) {
t.Helper()
seedRedisZSet(t, r.client, retryQ, entries)
}
// seedDeadQueue initializes the dead queue with the given messages.
func seedDeadQueue(t *testing.T, r *RDB, entries []sortedSetEntry) {
t.Helper()
seedRedisZSet(t, r.client, deadQ, entries)
}

File diff suppressed because it is too large Load Diff

View File

@ -1,118 +1,37 @@
package rdb package rdb
import ( import (
"encoding/json"
"fmt" "fmt"
"math/rand"
"sort"
"testing" "testing"
"time" "time"
"github.com/go-redis/redis/v7"
"github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp"
"github.com/rs/xid"
) )
func init() {
rand.Seed(time.Now().UnixNano())
}
func setup(t *testing.T) *RDB {
t.Helper()
r := NewRDB(redis.NewClient(&redis.Options{
Addr: "localhost:6379",
DB: 13,
}))
// Start each test with a clean slate.
if err := r.client.FlushDB().Err(); err != nil {
panic(err)
}
return r
}
var sortMsgOpt = cmp.Transformer("SortMsg", func(in []*TaskMessage) []*TaskMessage {
out := append([]*TaskMessage(nil), in...) // Copy input to avoid mutating it
sort.Slice(out, func(i, j int) bool {
return out[i].ID.String() < out[j].ID.String()
})
return out
})
func randomTask(taskType, qname string, payload map[string]interface{}) *TaskMessage {
return &TaskMessage{
ID: xid.New(),
Type: taskType,
Queue: qname,
Retry: 25,
Payload: make(map[string]interface{}),
}
}
func mustMarshal(t *testing.T, task *TaskMessage) string {
t.Helper()
data, err := json.Marshal(task)
if err != nil {
t.Fatal(err)
}
return string(data)
}
func mustUnmarshal(t *testing.T, data string) *TaskMessage {
t.Helper()
var task TaskMessage
err := json.Unmarshal([]byte(data), &task)
if err != nil {
t.Fatal(err)
}
return &task
}
func mustMarshalSlice(t *testing.T, tasks []*TaskMessage) []string {
t.Helper()
var data []string
for _, task := range tasks {
data = append(data, mustMarshal(t, task))
}
return data
}
func mustUnmarshalSlice(t *testing.T, data []string) []*TaskMessage {
t.Helper()
var tasks []*TaskMessage
for _, s := range data {
tasks = append(tasks, mustUnmarshal(t, s))
}
return tasks
}
func TestEnqueue(t *testing.T) { func TestEnqueue(t *testing.T) {
r := setup(t) r := setup(t)
tests := []struct { tests := []struct {
msg *TaskMessage msg *TaskMessage
}{ }{
{msg: randomTask("send_email", "default", {msg: newTaskMessage("send_email", map[string]interface{}{"to": "exampleuser@gmail.com", "from": "noreply@example.com"})},
map[string]interface{}{"to": "exampleuser@gmail.com", "from": "noreply@example.com"})}, {msg: newTaskMessage("generate_csv", map[string]interface{}{})},
{msg: randomTask("generate_csv", "default", {msg: newTaskMessage("sync", nil)},
map[string]interface{}{})},
{msg: randomTask("sync", "default", nil)},
} }
for _, tc := range tests { for _, tc := range tests {
// clean up db before each test case. flushDB(t, r) // clean up db before each test case.
if err := r.client.FlushDB().Err(); err != nil {
t.Fatal(err)
}
err := r.Enqueue(tc.msg) err := r.Enqueue(tc.msg)
if err != nil { if err != nil {
t.Error(err) t.Errorf("(*RDB).Enqueue = %v, want nil", err)
continue continue
} }
res := r.client.LRange(defaultQ, 0, -1).Val() res := r.client.LRange(defaultQ, 0, -1).Val()
if len(res) != 1 { if len(res) != 1 {
t.Errorf("LIST %q has length %d, want 1", defaultQ, len(res)) t.Errorf("%q has length %d, want 1", defaultQ, len(res))
continue continue
} }
if diff := cmp.Diff(*tc.msg, *mustUnmarshal(t, res[0])); diff != "" { if diff := cmp.Diff(tc.msg, mustUnmarshal(t, res[0])); diff != "" {
t.Errorf("persisted data differed from the original input (-want, +got)\n%s", diff) t.Errorf("persisted data differed from the original input (-want, +got)\n%s", diff)
} }
} }
@ -120,25 +39,21 @@ func TestEnqueue(t *testing.T) {
func TestDequeue(t *testing.T) { func TestDequeue(t *testing.T) {
r := setup(t) r := setup(t)
t1 := randomTask("send_email", "default", map[string]interface{}{"subject": "hello!"}) t1 := newTaskMessage("send_email", map[string]interface{}{"subject": "hello!"})
tests := []struct { tests := []struct {
queued []*TaskMessage enqueued []*TaskMessage
want *TaskMessage want *TaskMessage
err error err error
inProgress int64 // length of "in-progress" tasks after dequeue inProgress int64 // length of "in-progress" tasks after dequeue
}{ }{
{queued: []*TaskMessage{t1}, want: t1, err: nil, inProgress: 1}, {enqueued: []*TaskMessage{t1}, want: t1, err: nil, inProgress: 1},
{queued: []*TaskMessage{}, want: nil, err: ErrDequeueTimeout, inProgress: 0}, {enqueued: []*TaskMessage{}, want: nil, err: ErrDequeueTimeout, inProgress: 0},
} }
for _, tc := range tests { for _, tc := range tests {
// clean up db before each test case. flushDB(t, r) // clean up db before each test case
if err := r.client.FlushDB().Err(); err != nil { seedDefaultQueue(t, r, tc.enqueued)
t.Fatal(err)
}
for _, m := range tc.queued {
r.Enqueue(m)
}
got, err := r.Dequeue(time.Second) got, err := r.Dequeue(time.Second)
if !cmp.Equal(got, tc.want) || err != tc.err { if !cmp.Equal(got, tc.want) || err != tc.err {
t.Errorf("(*RDB).Dequeue(time.Second) = %v, %v; want %v, %v", t.Errorf("(*RDB).Dequeue(time.Second) = %v, %v; want %v, %v",
@ -146,65 +61,52 @@ func TestDequeue(t *testing.T) {
continue continue
} }
if l := r.client.LLen(inProgressQ).Val(); l != tc.inProgress { if l := r.client.LLen(inProgressQ).Val(); l != tc.inProgress {
t.Errorf("LIST %q has length %d, want %d", inProgressQ, l, tc.inProgress) t.Errorf("%q has length %d, want %d", inProgressQ, l, tc.inProgress)
} }
} }
} }
func TestDone(t *testing.T) { func TestDone(t *testing.T) {
r := setup(t) r := setup(t)
t1 := randomTask("send_email", "default", nil) t1 := newTaskMessage("send_email", nil)
t2 := randomTask("export_csv", "csv", nil) t2 := newTaskMessage("export_csv", nil)
tests := []struct { tests := []struct {
initial []*TaskMessage // initial state of the in-progress list inProgress []*TaskMessage // initial state of the in-progress list
target *TaskMessage // task to remove target *TaskMessage // task to remove
final []*TaskMessage // final state of the in-progress list wantInProgress []*TaskMessage // final state of the in-progress list
}{ }{
{ {
initial: []*TaskMessage{t1, t2}, inProgress: []*TaskMessage{t1, t2},
target: t1, target: t1,
final: []*TaskMessage{t2}, wantInProgress: []*TaskMessage{t2},
}, },
{ {
initial: []*TaskMessage{t2}, inProgress: []*TaskMessage{t2},
target: t1, target: t1,
final: []*TaskMessage{t2}, wantInProgress: []*TaskMessage{t2},
}, },
{ {
initial: []*TaskMessage{t1}, inProgress: []*TaskMessage{t1},
target: t1, target: t1,
final: []*TaskMessage{}, wantInProgress: []*TaskMessage{},
}, },
} }
for _, tc := range tests { for _, tc := range tests {
// clean up db before each test case. flushDB(t, r) // clean up db before each test case
if err := r.client.FlushDB().Err(); err != nil { seedInProgressQueue(t, r, tc.inProgress)
t.Fatal(err)
}
// set up initial state
for _, task := range tc.initial {
err := r.client.LPush(inProgressQ, mustMarshal(t, task)).Err()
if err != nil {
t.Fatal(err)
}
}
err := r.Done(tc.target) err := r.Done(tc.target)
if err != nil { if err != nil {
t.Error(err) t.Errorf("(*RDB).Done(task) = %v, want nil", err)
continue continue
} }
var got []*TaskMessage
data := r.client.LRange(inProgressQ, 0, -1).Val() data := r.client.LRange(inProgressQ, 0, -1).Val()
for _, s := range data { gotInProgress := mustUnmarshalSlice(t, data)
got = append(got, mustUnmarshal(t, s)) if diff := cmp.Diff(tc.wantInProgress, gotInProgress, sortMsgOpt); diff != "" {
} t.Errorf("mismatch found in %q after calling (*RDB).Done: (-want, +got):\n%s", inProgressQ, diff)
if diff := cmp.Diff(tc.final, got, sortMsgOpt); diff != "" {
t.Errorf("mismatch found in %q after calling (*rdb).remove: (-want, +got):\n%s", defaultQ, diff)
continue continue
} }
} }
@ -212,33 +114,24 @@ func TestDone(t *testing.T) {
func TestKill(t *testing.T) { func TestKill(t *testing.T) {
r := setup(t) r := setup(t)
t1 := randomTask("send_email", "default", nil) t1 := newTaskMessage("send_email", nil)
// TODO(hibiken): add test cases for trimming // TODO(hibiken): add test cases for trimming
tests := []struct { tests := []struct {
initial []*TaskMessage // inital state of "dead" set dead []sortedSetEntry // inital state of dead queue
target *TaskMessage // task to kill target *TaskMessage // task to kill
want []*TaskMessage // final state of "dead" set wantDead []*TaskMessage // final state of dead queue
}{ }{
{ {
initial: []*TaskMessage{}, dead: []sortedSetEntry{},
target: t1, target: t1,
want: []*TaskMessage{t1}, wantDead: []*TaskMessage{t1},
}, },
} }
for _, tc := range tests { for _, tc := range tests {
// clean up db before each test case. flushDB(t, r) // clean up db before each test case
if err := r.client.FlushDB().Err(); err != nil { seedDeadQueue(t, r, tc.dead)
t.Fatal(err)
}
// set up initial state
for _, task := range tc.initial {
err := r.client.ZAdd(deadQ, &redis.Z{Member: mustMarshal(t, task), Score: float64(time.Now().Unix())}).Err()
if err != nil {
t.Fatal(err)
}
}
err := r.Kill(tc.target) err := r.Kill(tc.target)
if err != nil { if err != nil {
@ -246,10 +139,10 @@ func TestKill(t *testing.T) {
continue continue
} }
actual := r.client.ZRange(deadQ, 0, -1).Val() data := r.client.ZRange(deadQ, 0, -1).Val()
got := mustUnmarshalSlice(t, actual) gotDead := mustUnmarshalSlice(t, data)
if diff := cmp.Diff(tc.want, got, sortMsgOpt); diff != "" { if diff := cmp.Diff(tc.wantDead, gotDead, sortMsgOpt); diff != "" {
t.Errorf("mismatch found in %q after calling (*rdb).kill: (-want, +got):\n%s", deadQ, diff) t.Errorf("mismatch found in %q after calling (*RDB).Kill: (-want, +got):\n%s", deadQ, diff)
continue continue
} }
} }
@ -257,64 +150,54 @@ func TestKill(t *testing.T) {
func TestRestoreUnfinished(t *testing.T) { func TestRestoreUnfinished(t *testing.T) {
r := setup(t) r := setup(t)
t1 := randomTask("send_email", "default", nil) t1 := newTaskMessage("send_email", nil)
t2 := randomTask("export_csv", "csv", nil) t2 := newTaskMessage("export_csv", nil)
t3 := randomTask("sync_stuff", "sync", nil) t3 := newTaskMessage("sync_stuff", nil)
tests := []struct { tests := []struct {
beforeSrc []*TaskMessage inProgress []*TaskMessage
beforeDst []*TaskMessage enqueued []*TaskMessage
afterSrc []*TaskMessage wantInProgress []*TaskMessage
afterDst []*TaskMessage wantEnqueued []*TaskMessage
}{ }{
{ {
beforeSrc: []*TaskMessage{t1, t2, t3}, inProgress: []*TaskMessage{t1, t2, t3},
beforeDst: []*TaskMessage{}, enqueued: []*TaskMessage{},
afterSrc: []*TaskMessage{}, wantInProgress: []*TaskMessage{},
afterDst: []*TaskMessage{t1, t2, t3}, wantEnqueued: []*TaskMessage{t1, t2, t3},
}, },
{ {
beforeSrc: []*TaskMessage{}, inProgress: []*TaskMessage{},
beforeDst: []*TaskMessage{t1, t2, t3}, enqueued: []*TaskMessage{t1, t2, t3},
afterSrc: []*TaskMessage{}, wantInProgress: []*TaskMessage{},
afterDst: []*TaskMessage{t1, t2, t3}, wantEnqueued: []*TaskMessage{t1, t2, t3},
}, },
{ {
beforeSrc: []*TaskMessage{t2, t3}, inProgress: []*TaskMessage{t2, t3},
beforeDst: []*TaskMessage{t1}, enqueued: []*TaskMessage{t1},
afterSrc: []*TaskMessage{}, wantInProgress: []*TaskMessage{},
afterDst: []*TaskMessage{t1, t2, t3}, wantEnqueued: []*TaskMessage{t1, t2, t3},
}, },
} }
for _, tc := range tests { for _, tc := range tests {
// clean up db before each test case. flushDB(t, r) // clean up db before each test case
if err := r.client.FlushDB().Err(); err != nil { seedInProgressQueue(t, r, tc.inProgress)
t.Error(err) seedDefaultQueue(t, r, tc.enqueued)
continue
}
// seed src list.
for _, msg := range tc.beforeSrc {
r.client.LPush(inProgressQ, mustMarshal(t, msg))
}
// seed dst list.
for _, msg := range tc.beforeDst {
r.client.LPush(defaultQ, mustMarshal(t, msg))
}
if err := r.RestoreUnfinished(); err != nil { if err := r.RestoreUnfinished(); err != nil {
t.Errorf("(*RDB).RestoreUnfinished() = %v, want nil", err) t.Errorf("(*RDB).RestoreUnfinished() = %v, want nil", err)
continue continue
} }
src := r.client.LRange(inProgressQ, 0, -1).Val() gotInProgressRaw := r.client.LRange(inProgressQ, 0, -1).Val()
gotSrc := mustUnmarshalSlice(t, src) gotInProgress := mustUnmarshalSlice(t, gotInProgressRaw)
if diff := cmp.Diff(tc.afterSrc, gotSrc, sortMsgOpt); diff != "" { if diff := cmp.Diff(tc.wantInProgress, gotInProgress, sortMsgOpt); diff != "" {
t.Errorf("mismatch found in %q (-want, +got)\n%s", inProgressQ, diff) t.Errorf("mismatch found in %q (-want, +got)\n%s", inProgressQ, diff)
} }
dst := r.client.LRange(defaultQ, 0, -1).Val() gotEnqueuedRaw := r.client.LRange(defaultQ, 0, -1).Val()
gotDst := mustUnmarshalSlice(t, dst) gotEnqueued := mustUnmarshalSlice(t, gotEnqueuedRaw)
if diff := cmp.Diff(tc.afterDst, gotDst, sortMsgOpt); diff != "" { if diff := cmp.Diff(tc.wantEnqueued, gotEnqueued, sortMsgOpt); diff != "" {
t.Errorf("mismatch found in %q (-want, +got)\n%s", defaultQ, diff) t.Errorf("mismatch found in %q (-want, +got)\n%s", defaultQ, diff)
} }
} }
@ -322,45 +205,45 @@ func TestRestoreUnfinished(t *testing.T) {
func TestCheckAndEnqueue(t *testing.T) { func TestCheckAndEnqueue(t *testing.T) {
r := setup(t) r := setup(t)
t1 := randomTask("send_email", "default", nil) t1 := newTaskMessage("send_email", nil)
t2 := randomTask("generate_csv", "default", nil) t2 := newTaskMessage("generate_csv", nil)
t3 := randomTask("gen_thumbnail", "default", nil) t3 := newTaskMessage("gen_thumbnail", nil)
secondAgo := time.Now().Add(-time.Second) secondAgo := time.Now().Add(-time.Second)
hourFromNow := time.Now().Add(time.Hour) hourFromNow := time.Now().Add(time.Hour)
tests := []struct { tests := []struct {
initScheduled []*redis.Z // tasks to be processed later scheduled []sortedSetEntry
initRetry []*redis.Z // tasks to be retired later retry []sortedSetEntry
wantQueued []*TaskMessage // queue after calling forward wantQueued []*TaskMessage
wantScheduled []*TaskMessage // tasks in scheduled queue after calling the method wantScheduled []*TaskMessage
wantRetry []*TaskMessage // tasks in retry queue after calling the method wantRetry []*TaskMessage
}{ }{
{ {
initScheduled: []*redis.Z{ scheduled: []sortedSetEntry{
&redis.Z{Member: mustMarshal(t, t1), Score: float64(secondAgo.Unix())}, {t1, secondAgo.Unix()},
&redis.Z{Member: mustMarshal(t, t2), Score: float64(secondAgo.Unix())}}, {t2, secondAgo.Unix()}},
initRetry: []*redis.Z{ retry: []sortedSetEntry{
&redis.Z{Member: mustMarshal(t, t3), Score: float64(secondAgo.Unix())}}, {t3, secondAgo.Unix()}},
wantQueued: []*TaskMessage{t1, t2, t3}, wantQueued: []*TaskMessage{t1, t2, t3},
wantScheduled: []*TaskMessage{}, wantScheduled: []*TaskMessage{},
wantRetry: []*TaskMessage{}, wantRetry: []*TaskMessage{},
}, },
{ {
initScheduled: []*redis.Z{ scheduled: []sortedSetEntry{
&redis.Z{Member: mustMarshal(t, t1), Score: float64(hourFromNow.Unix())}, {t1, hourFromNow.Unix()},
&redis.Z{Member: mustMarshal(t, t2), Score: float64(secondAgo.Unix())}}, {t2, secondAgo.Unix()}},
initRetry: []*redis.Z{ retry: []sortedSetEntry{
&redis.Z{Member: mustMarshal(t, t3), Score: float64(secondAgo.Unix())}}, {t3, secondAgo.Unix()}},
wantQueued: []*TaskMessage{t2, t3}, wantQueued: []*TaskMessage{t2, t3},
wantScheduled: []*TaskMessage{t1}, wantScheduled: []*TaskMessage{t1},
wantRetry: []*TaskMessage{}, wantRetry: []*TaskMessage{},
}, },
{ {
initScheduled: []*redis.Z{ scheduled: []sortedSetEntry{
&redis.Z{Member: mustMarshal(t, t1), Score: float64(hourFromNow.Unix())}, {t1, hourFromNow.Unix()},
&redis.Z{Member: mustMarshal(t, t2), Score: float64(hourFromNow.Unix())}}, {t2, hourFromNow.Unix()}},
initRetry: []*redis.Z{ retry: []sortedSetEntry{
&redis.Z{Member: mustMarshal(t, t3), Score: float64(hourFromNow.Unix())}}, {t3, hourFromNow.Unix()}},
wantQueued: []*TaskMessage{}, wantQueued: []*TaskMessage{},
wantScheduled: []*TaskMessage{t1, t2}, wantScheduled: []*TaskMessage{t1, t2},
wantRetry: []*TaskMessage{t3}, wantRetry: []*TaskMessage{t3},
@ -368,18 +251,9 @@ func TestCheckAndEnqueue(t *testing.T) {
} }
for _, tc := range tests { for _, tc := range tests {
// clean up db before each test case. flushDB(t, r) // clean up db before each test case
if err := r.client.FlushDB().Err(); err != nil { seedScheduledQueue(t, r, tc.scheduled)
t.Fatal(err) seedRetryQueue(t, r, tc.retry)
}
if err := r.client.ZAdd(scheduledQ, tc.initScheduled...).Err(); err != nil {
t.Error(err)
continue
}
if err := r.client.ZAdd(retryQ, tc.initRetry...).Err(); err != nil {
t.Error(err)
continue
}
err := r.CheckAndEnqueue() err := r.CheckAndEnqueue()
if err != nil { if err != nil {
@ -389,12 +263,17 @@ func TestCheckAndEnqueue(t *testing.T) {
queued := r.client.LRange(defaultQ, 0, -1).Val() queued := r.client.LRange(defaultQ, 0, -1).Val()
gotQueued := mustUnmarshalSlice(t, queued) gotQueued := mustUnmarshalSlice(t, queued)
if diff := cmp.Diff(tc.wantQueued, gotQueued, sortMsgOpt); diff != "" { if diff := cmp.Diff(tc.wantQueued, gotQueued, sortMsgOpt); diff != "" {
t.Errorf("%q has %d tasks, want %d tasks; (-want, +got)\n%s", defaultQ, len(gotQueued), len(tc.wantQueued), diff) t.Errorf("mismatch found in %q; (-want, +got)\n%s", defaultQ, diff)
} }
scheduled := r.client.ZRangeByScore(scheduledQ, &redis.ZRangeBy{Min: "-inf", Max: "+inf"}).Val() scheduled := r.client.ZRange(scheduledQ, 0, -1).Val()
gotScheduled := mustUnmarshalSlice(t, scheduled) gotScheduled := mustUnmarshalSlice(t, scheduled)
if diff := cmp.Diff(tc.wantScheduled, gotScheduled, sortMsgOpt); diff != "" { if diff := cmp.Diff(tc.wantScheduled, gotScheduled, sortMsgOpt); diff != "" {
t.Errorf("%q has %d tasks, want %d tasks; (-want, +got)\n%s", scheduled, len(gotScheduled), len(tc.wantScheduled), diff) t.Errorf("mismatch found in %q; (-want, +got)\n%s", scheduledQ, diff)
}
retry := r.client.ZRange(retryQ, 0, -1).Val()
gotRetry := mustUnmarshalSlice(t, retry)
if diff := cmp.Diff(tc.wantRetry, gotRetry, sortMsgOpt); diff != "" {
t.Errorf("mismatch found in %q; (-want, +got)\n%s", retryQ, diff)
} }
} }
} }
@ -406,35 +285,26 @@ func TestSchedule(t *testing.T) {
processAt time.Time processAt time.Time
}{ }{
{ {
randomTask("send_email", "default", map[string]interface{}{"subject": "hello"}), newTaskMessage("send_email", map[string]interface{}{"subject": "hello"}),
time.Now().Add(15 * time.Minute), time.Now().Add(15 * time.Minute),
}, },
} }
for _, tc := range tests { for _, tc := range tests {
// clean up db before each test case. flushDB(t, r) // clean up db before each test case
if err := r.client.FlushDB().Err(); err != nil {
t.Fatal(err)
}
err := r.Schedule(tc.msg, tc.processAt)
if err != nil {
t.Error(err)
continue
}
res, err := r.client.ZRangeWithScores(scheduledQ, 0, -1).Result()
if err != nil {
t.Error(err)
continue
}
desc := fmt.Sprintf("(*RDB).Schedule(%v, %v)", tc.msg, tc.processAt) desc := fmt.Sprintf("(*RDB).Schedule(%v, %v)", tc.msg, tc.processAt)
err := r.Schedule(tc.msg, tc.processAt)
if err != nil {
t.Errorf("%s = %v, want nil", desc, err)
continue
}
res := r.client.ZRangeWithScores(scheduledQ, 0, -1).Val()
if len(res) != 1 { if len(res) != 1 {
t.Errorf("%s inserted %d items to %q, want 1 items inserted", desc, len(res), scheduledQ) t.Errorf("%s inserted %d items to %q, want 1 items inserted", desc, len(res), scheduledQ)
continue continue
} }
if res[0].Score != float64(tc.processAt.Unix()) { if res[0].Score != float64(tc.processAt.Unix()) {
t.Errorf("%s inserted an item with score %f, want %f", desc, res[0].Score, float64(tc.processAt.Unix())) t.Errorf("%s inserted an item with score %f, want %f", desc, res[0].Score, float64(tc.processAt.Unix()))
continue continue
@ -449,35 +319,26 @@ func TestRetryLater(t *testing.T) {
processAt time.Time processAt time.Time
}{ }{
{ {
randomTask("send_email", "default", map[string]interface{}{"subject": "hello"}), newTaskMessage("send_email", map[string]interface{}{"subject": "hello"}),
time.Now().Add(15 * time.Minute), time.Now().Add(15 * time.Minute),
}, },
} }
for _, tc := range tests { for _, tc := range tests {
// clean up db before each test case. flushDB(t, r) // clean up db before each test case
if err := r.client.FlushDB().Err(); err != nil {
t.Fatal(err)
}
err := r.RetryLater(tc.msg, tc.processAt)
if err != nil {
t.Error(err)
continue
}
res, err := r.client.ZRangeWithScores(retryQ, 0, -1).Result()
if err != nil {
t.Error(err)
continue
}
desc := fmt.Sprintf("(*RDB).RetryLater(%v, %v)", tc.msg, tc.processAt) desc := fmt.Sprintf("(*RDB).RetryLater(%v, %v)", tc.msg, tc.processAt)
err := r.RetryLater(tc.msg, tc.processAt)
if err != nil {
t.Errorf("%s = %v, want nil", desc, err)
continue
}
res := r.client.ZRangeWithScores(retryQ, 0, -1).Val()
if len(res) != 1 { if len(res) != 1 {
t.Errorf("%s inserted %d items to %q, want 1 items inserted", desc, len(res), retryQ) t.Errorf("%s inserted %d items to %q, want 1 items inserted", desc, len(res), retryQ)
continue continue
} }
if res[0].Score != float64(tc.processAt.Unix()) { if res[0].Score != float64(tc.processAt.Unix()) {
t.Errorf("%s inserted an item with score %f, want %f", desc, res[0].Score, float64(tc.processAt.Unix())) t.Errorf("%s inserted an item with score %f, want %f", desc, res[0].Score, float64(tc.processAt.Unix()))
continue continue