From a28f61f313c2654717625e53cc195a72390228a3 Mon Sep 17 00:00:00 2001 From: Ken Hibino Date: Mon, 13 Jul 2020 06:29:41 -0700 Subject: [PATCH] Add Inspector type --- client_test.go | 16 +- inspector.go | 488 +++++++++ inspector_test.go | 1636 +++++++++++++++++++++++++++++++ internal/asynqtest/asynqtest.go | 51 +- internal/base/base.go | 6 + internal/rdb/inspect.go | 228 ++--- internal/rdb/inspect_test.go | 717 +++++++------- internal/rdb/rdb_test.go | 264 ++--- processor_test.go | 10 +- recoverer_test.go | 70 +- scheduler_test.go | 24 +- tools/asynq/cmd/del.go | 26 +- tools/asynq/cmd/delall.go | 19 +- tools/asynq/cmd/enq.go | 26 +- tools/asynq/cmd/enqall.go | 18 +- tools/asynq/cmd/history.go | 10 +- tools/asynq/cmd/kill.go | 24 +- tools/asynq/cmd/killall.go | 16 +- tools/asynq/cmd/ls.go | 111 +-- tools/go.sum | 1 + 20 files changed, 2830 insertions(+), 931 deletions(-) create mode 100644 inspector.go create mode 100644 inspector_test.go diff --git a/client_test.go b/client_test.go index c461dff..18d27a0 100644 --- a/client_test.go +++ b/client_test.go @@ -36,7 +36,7 @@ func TestClientEnqueueAt(t *testing.T) { opts []Option wantRes *Result wantEnqueued map[string][]*base.TaskMessage - wantScheduled []h.ZSetEntry + wantScheduled []base.Z }{ { desc: "Process task immediately", @@ -75,9 +75,9 @@ func TestClientEnqueueAt(t *testing.T) { Deadline: noDeadline, }, wantEnqueued: nil, // db is flushed in setup so list does not exist hence nil - wantScheduled: []h.ZSetEntry{ + wantScheduled: []base.Z{ { - Msg: &base.TaskMessage{ + Message: &base.TaskMessage{ Type: task.Type, Payload: task.Payload.data, Retry: defaultMaxRetry, @@ -85,7 +85,7 @@ func TestClientEnqueueAt(t *testing.T) { Timeout: int64(defaultTimeout.Seconds()), Deadline: noDeadline.Unix(), }, - Score: float64(oneHourLater.Unix()), + Score: oneHourLater.Unix(), }, }, }, @@ -376,7 +376,7 @@ func TestClientEnqueueIn(t *testing.T) { opts []Option wantRes *Result wantEnqueued map[string][]*base.TaskMessage - wantScheduled []h.ZSetEntry + wantScheduled []base.Z }{ { desc: "schedule a task to be enqueued in one hour", @@ -390,9 +390,9 @@ func TestClientEnqueueIn(t *testing.T) { Deadline: noDeadline, }, wantEnqueued: nil, // db is flushed in setup so list does not exist hence nil - wantScheduled: []h.ZSetEntry{ + wantScheduled: []base.Z{ { - Msg: &base.TaskMessage{ + Message: &base.TaskMessage{ Type: task.Type, Payload: task.Payload.data, Retry: defaultMaxRetry, @@ -400,7 +400,7 @@ func TestClientEnqueueIn(t *testing.T) { Timeout: int64(defaultTimeout.Seconds()), Deadline: noDeadline.Unix(), }, - Score: float64(time.Now().Add(time.Hour).Unix()), + Score: time.Now().Add(time.Hour).Unix(), }, }, }, diff --git a/inspector.go b/inspector.go new file mode 100644 index 0000000..eaa2f15 --- /dev/null +++ b/inspector.go @@ -0,0 +1,488 @@ +// Copyright 2020 Kentaro Hibino. All rights reserved. +// Use of this source code is governed by a MIT license +// that can be found in the LICENSE file. + +package asynq + +import ( + "fmt" + "strconv" + "strings" + "time" + + "github.com/google/uuid" + "github.com/hibiken/asynq/internal/rdb" +) + +// Inspector is a client interface to inspect and mutate the state of +// queues and tasks. +type Inspector struct { + rdb *rdb.RDB +} + +// New returns a new instance of Inspector. +func NewInspector(r RedisConnOpt) *Inspector { + return &Inspector{ + rdb: rdb.NewRDB(createRedisClient(r)), + } +} + +// Stats represents a state of queues at a certain time. +type Stats struct { + Enqueued int + InProgress int + Scheduled int + Retry int + Dead int + Processed int + Failed int + Queues []*QueueInfo + Timestamp time.Time +} + +// QueueInfo holds information about a queue. +type QueueInfo struct { + // Name of the queue (e.g. "default", "critical"). + // Note: It doesn't include the prefix "asynq:queues:". + Name string + + // Paused indicates whether the queue is paused. + // If true, tasks in the queue should not be processed. + Paused bool + + // Size is the number of tasks in the queue. + Size int +} + +// CurrentStats returns a current stats of the queues. +func (i *Inspector) CurrentStats() (*Stats, error) { + stats, err := i.rdb.CurrentStats() + if err != nil { + return nil, err + } + var qs []*QueueInfo + for _, q := range stats.Queues { + qs = append(qs, (*QueueInfo)(q)) + } + return &Stats{ + Enqueued: stats.Enqueued, + InProgress: stats.InProgress, + Scheduled: stats.Scheduled, + Retry: stats.Retry, + Dead: stats.Dead, + Processed: stats.Processed, + Failed: stats.Failed, + Queues: qs, + Timestamp: stats.Timestamp, + }, nil +} + +// DailyStats holds aggregate data for a given day. +type DailyStats struct { + Processed int + Failed int + Date time.Time +} + +// History returns a list of stats from the last n days. +func (i *Inspector) History(n int) ([]*DailyStats, error) { + stats, err := i.rdb.HistoricalStats(n) + if err != nil { + return nil, err + } + var res []*DailyStats + for _, s := range stats { + res = append(res, &DailyStats{ + Processed: s.Processed, + Failed: s.Failed, + Date: s.Time, + }) + } + return res, nil +} + +// EnqueuedTask is a task in a queue and is ready to be processed. +type EnqueuedTask struct { + *Task + ID string + Queue string +} + +// InProgressTask is a task that's currently being processed. +type InProgressTask struct { + *Task + ID string +} + +// ScheduledTask is a task scheduled to be processed in the future. +type ScheduledTask struct { + *Task + ID string + Queue string + NextEnqueueAt time.Time + + score int64 +} + +// RetryTask is a task scheduled to be retried in the future. +type RetryTask struct { + *Task + ID string + Queue string + NextEnqueueAt time.Time + MaxRetry int + Retried int + ErrorMsg string + // TODO: LastFailedAt time.Time + + score int64 +} + +// DeadTask is a task exhausted its retries. +// DeadTask won't be retried automatically. +type DeadTask struct { + *Task + ID string + Queue string + MaxRetry int + Retried int + LastFailedAt time.Time + ErrorMsg string + + score int64 +} + +// Key returns a key used to delete, enqueue, and kill the task. +func (t *ScheduledTask) Key() string { + return fmt.Sprintf("s:%v:%v", t.ID, t.score) +} + +// Key returns a key used to delete, enqueue, and kill the task. +func (t *RetryTask) Key() string { + return fmt.Sprintf("r:%v:%v", t.ID, t.score) +} + +// Key returns a key used to delete, enqueue, and kill the task. +func (t *DeadTask) Key() string { + return fmt.Sprintf("d:%v:%v", t.ID, t.score) +} + +// parseTaskKey parses a key string and returns each part of key with proper +// type if valid, otherwise it reports an error. +func parseTaskKey(key string) (id uuid.UUID, score int64, qtype string, err error) { + parts := strings.Split(key, ":") + if len(parts) != 3 { + return uuid.Nil, 0, "", fmt.Errorf("invalid id") + } + id, err = uuid.Parse(parts[1]) + if err != nil { + return uuid.Nil, 0, "", fmt.Errorf("invalid id") + } + score, err = strconv.ParseInt(parts[2], 10, 64) + if err != nil { + return uuid.Nil, 0, "", fmt.Errorf("invalid id") + } + qtype = parts[0] + if len(qtype) != 1 || !strings.Contains("srd", qtype) { + return uuid.Nil, 0, "", fmt.Errorf("invalid id") + } + return id, score, qtype, nil +} + +// ListOption specifies behavior of list operation. +type ListOption interface{} + +// Internal list option representations. +type ( + pageSizeOpt int + pageNumOpt int +) + +type listOption struct { + pageSize int + pageNum int +} + +const ( + // Page size used by default in list operation. + defaultPageSize = 30 + + // Page number used by default in list operation. + defaultPageNum = 1 +) + +func composeListOptions(opts ...ListOption) listOption { + res := listOption{ + pageSize: defaultPageSize, + pageNum: defaultPageNum, + } + for _, opt := range opts { + switch opt := opt.(type) { + case pageSizeOpt: + res.pageSize = int(opt) + case pageNumOpt: + res.pageNum = int(opt) + default: + // ignore unexpected option + } + } + return res +} + +// PageSize returns an option to specify the page size for list operation. +// +// Negative page size is treated as zero. +func PageSize(n int) ListOption { + if n < 0 { + n = 0 + } + return pageSizeOpt(n) +} + +// Page returns an option to specify the page number for list operation. +// The value 1 fetches the first page. +// +// Negative page number is treated as one. +func Page(n int) ListOption { + if n < 0 { + n = 1 + } + return pageNumOpt(n) +} + +// ListScheduledTasks retrieves tasks in the specified queue. +// +// By default, it retrieves the first 30 tasks. +func (i *Inspector) ListEnqueuedTasks(qname string, opts ...ListOption) ([]*EnqueuedTask, error) { + opt := composeListOptions(opts...) + pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1} + msgs, err := i.rdb.ListEnqueued(qname, pgn) + if err != nil { + return nil, err + } + var tasks []*EnqueuedTask + for _, m := range msgs { + tasks = append(tasks, &EnqueuedTask{ + Task: NewTask(m.Type, m.Payload), + ID: m.ID.String(), + Queue: m.Queue, + }) + } + return tasks, err +} + +// ListScheduledTasks retrieves tasks currently being processed. +// +// By default, it retrieves the first 30 tasks. +func (i *Inspector) ListInProgressTasks(opts ...ListOption) ([]*InProgressTask, error) { + opt := composeListOptions(opts...) + pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1} + msgs, err := i.rdb.ListInProgress(pgn) + if err != nil { + return nil, err + } + var tasks []*InProgressTask + for _, m := range msgs { + tasks = append(tasks, &InProgressTask{ + Task: NewTask(m.Type, m.Payload), + ID: m.ID.String(), + }) + } + return tasks, err +} + +// ListScheduledTasks retrieves tasks in scheduled state. +// Tasks are sorted by NextEnqueueAt field in ascending order. +// +// By default, it retrieves the first 30 tasks. +func (i *Inspector) ListScheduledTasks(opts ...ListOption) ([]*ScheduledTask, error) { + opt := composeListOptions(opts...) + pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1} + zs, err := i.rdb.ListScheduled(pgn) + if err != nil { + return nil, err + } + var tasks []*ScheduledTask + for _, z := range zs { + enqueueAt := time.Unix(z.Score, 0) + t := NewTask(z.Message.Type, z.Message.Payload) + tasks = append(tasks, &ScheduledTask{ + Task: t, + ID: z.Message.ID.String(), + Queue: z.Message.Queue, + NextEnqueueAt: enqueueAt, + score: z.Score, + }) + } + return tasks, nil +} + +// ListScheduledTasks retrieves tasks in retry state. +// Tasks are sorted by NextEnqueueAt field in ascending order. +// +// By default, it retrieves the first 30 tasks. +func (i *Inspector) ListRetryTasks(opts ...ListOption) ([]*RetryTask, error) { + opt := composeListOptions(opts...) + pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1} + zs, err := i.rdb.ListRetry(pgn) + if err != nil { + return nil, err + } + var tasks []*RetryTask + for _, z := range zs { + enqueueAt := time.Unix(z.Score, 0) + t := NewTask(z.Message.Type, z.Message.Payload) + tasks = append(tasks, &RetryTask{ + Task: t, + ID: z.Message.ID.String(), + Queue: z.Message.Queue, + NextEnqueueAt: enqueueAt, + MaxRetry: z.Message.Retry, + Retried: z.Message.Retried, + // TODO: LastFailedAt: z.Message.LastFailedAt + ErrorMsg: z.Message.ErrorMsg, + score: z.Score, + }) + } + return tasks, nil +} + +// ListScheduledTasks retrieves tasks in retry state. +// Tasks are sorted by LastFailedAt field in descending order. +// +// By default, it retrieves the first 30 tasks. +func (i *Inspector) ListDeadTasks(opts ...ListOption) ([]*DeadTask, error) { + opt := composeListOptions(opts...) + pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1} + zs, err := i.rdb.ListDead(pgn) + if err != nil { + return nil, err + } + var tasks []*DeadTask + for _, z := range zs { + failedAt := time.Unix(z.Score, 0) + t := NewTask(z.Message.Type, z.Message.Payload) + tasks = append(tasks, &DeadTask{ + Task: t, + ID: z.Message.ID.String(), + Queue: z.Message.Queue, + MaxRetry: z.Message.Retry, + Retried: z.Message.Retried, + LastFailedAt: failedAt, + ErrorMsg: z.Message.ErrorMsg, + score: z.Score, + }) + } + return tasks, nil + return nil, nil +} + +// DeleteAllScheduledTasks deletes all tasks in scheduled state, +// and reports the number tasks deleted. +func (i *Inspector) DeleteAllScheduledTasks() (int, error) { + n, err := i.rdb.DeleteAllScheduledTasks() + return int(n), err +} + +// DeleteAllRetryTasks deletes all tasks in retry state, +// and reports the number tasks deleted. +func (i *Inspector) DeleteAllRetryTasks() (int, error) { + n, err := i.rdb.DeleteAllRetryTasks() + return int(n), err +} + +// DeleteAllDeadTasks deletes all tasks in dead state, +// and reports the number tasks deleted. +func (i *Inspector) DeleteAllDeadTasks() (int, error) { + n, err := i.rdb.DeleteAllDeadTasks() + return int(n), err +} + +// DeleteTaskByKey deletes a task with the given key. +func (i *Inspector) DeleteTaskByKey(key string) error { + id, score, qtype, err := parseTaskKey(key) + if err != nil { + return err + } + switch qtype { + case "s": + return i.rdb.DeleteScheduledTask(id, score) + case "r": + return i.rdb.DeleteRetryTask(id, score) + case "d": + return i.rdb.DeleteDeadTask(id, score) + default: + return fmt.Errorf("invalid key") + } +} + +// EnqueueAllScheduledTasks enqueues all tasks in the scheduled state, +// and reports the number of tasks enqueued. +func (i *Inspector) EnqueueAllScheduledTasks() (int, error) { + n, err := i.rdb.EnqueueAllScheduledTasks() + return int(n), err +} + +// EnqueueAllRetryTasks enqueues all tasks in the retry state, +// and reports the number of tasks enqueued. +func (i *Inspector) EnqueueAllRetryTasks() (int, error) { + n, err := i.rdb.EnqueueAllRetryTasks() + return int(n), err +} + +// EnqueueAllDeadTasks enqueues all tasks in the dead state, +// and reports the number of tasks enqueued. +func (i *Inspector) EnqueueAllDeadTasks() (int, error) { + n, err := i.rdb.EnqueueAllDeadTasks() + return int(n), err +} + +// EnqueueTaskByKey enqueues a task with the given key. +func (i *Inspector) EnqueueTaskByKey(key string) error { + id, score, qtype, err := parseTaskKey(key) + if err != nil { + return err + } + switch qtype { + case "s": + return i.rdb.EnqueueScheduledTask(id, score) + case "r": + return i.rdb.EnqueueRetryTask(id, score) + case "d": + return i.rdb.EnqueueDeadTask(id, score) + default: + return fmt.Errorf("invalid key") + } +} + +// KillAllScheduledTasks kills all tasks in scheduled state, +// and reports the number of tasks killed. +func (i *Inspector) KillAllScheduledTasks() (int, error) { + n, err := i.rdb.KillAllScheduledTasks() + return int(n), err +} + +// KillAllRetryTasks kills all tasks in retry state, +// and reports the number of tasks killed. +func (i *Inspector) KillAllRetryTasks() (int, error) { + n, err := i.rdb.KillAllRetryTasks() + return int(n), err +} + +// KillTaskByKey kills a task with the given key. +func (i *Inspector) KillTaskByKey(key string) error { + id, score, qtype, err := parseTaskKey(key) + if err != nil { + return err + } + switch qtype { + case "s": + return i.rdb.KillScheduledTask(id, score) + case "r": + return i.rdb.KillRetryTask(id, score) + case "d": + return fmt.Errorf("task already dead") + default: + return fmt.Errorf("invalid key") + } +} diff --git a/inspector_test.go b/inspector_test.go new file mode 100644 index 0000000..6ed420a --- /dev/null +++ b/inspector_test.go @@ -0,0 +1,1636 @@ +// Copyright 2020 Kentaro Hibino. All rights reserved. +// Use of this source code is governed by a MIT license +// that can be found in the LICENSE file. + +package asynq + +import ( + "fmt" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/hibiken/asynq/internal/asynqtest" + h "github.com/hibiken/asynq/internal/asynqtest" + "github.com/hibiken/asynq/internal/base" +) + +func TestInspectorCurrentStats(t *testing.T) { + r := setup(t) + m1 := asynqtest.NewTaskMessage("task1", nil) + m2 := asynqtest.NewTaskMessage("task2", nil) + m3 := asynqtest.NewTaskMessage("task3", nil) + m4 := asynqtest.NewTaskMessage("task4", nil) + m5 := asynqtest.NewTaskMessageWithQueue("task5", nil, "critical") + m6 := h.NewTaskMessageWithQueue("task6", nil, "low") + now := time.Now() + timeCmpOpt := cmpopts.EquateApproxTime(time.Second) + + inspector := NewInspector(RedisClientOpt{ + Addr: redisAddr, + DB: redisDB, + }) + + tests := []struct { + enqueued map[string][]*base.TaskMessage + inProgress []*base.TaskMessage + scheduled []base.Z + retry []base.Z + dead []base.Z + processed int + failed int + allQueues []interface{} + want *Stats + }{ + { + enqueued: map[string][]*base.TaskMessage{ + base.DefaultQueueName: {m1}, + "critical": {m5}, + "low": {m6}, + }, + inProgress: []*base.TaskMessage{m2}, + scheduled: []base.Z{ + {Message: m3, Score: now.Add(time.Hour).Unix()}, + {Message: m4, Score: now.Unix()}}, + retry: []base.Z{}, + dead: []base.Z{}, + processed: 120, + failed: 2, + allQueues: []interface{}{base.DefaultQueue, base.QueueKey("critical"), base.QueueKey("low")}, + want: &Stats{ + Enqueued: 3, + InProgress: 1, + Scheduled: 2, + Retry: 0, + Dead: 0, + Processed: 120, + Failed: 2, + Timestamp: now, + // Queues should be sorted by name. + Queues: []*QueueInfo{ + {Name: "critical", Paused: false, Size: 1}, + {Name: "default", Paused: false, Size: 1}, + {Name: "low", Paused: false, Size: 1}, + }, + }, + }, + } + + for _, tc := range tests { + asynqtest.FlushDB(t, r) + asynqtest.SeedAllEnqueuedQueues(t, r, tc.enqueued) + asynqtest.SeedInProgressQueue(t, r, tc.inProgress) + asynqtest.SeedScheduledQueue(t, r, tc.scheduled) + asynqtest.SeedRetryQueue(t, r, tc.retry) + asynqtest.SeedDeadQueue(t, r, tc.dead) + processedKey := base.ProcessedKey(now) + failedKey := base.FailureKey(now) + r.Set(processedKey, tc.processed, 0) + r.Set(failedKey, tc.failed, 0) + r.SAdd(base.AllQueues, tc.allQueues...) + + got, err := inspector.CurrentStats() + if err != nil { + t.Errorf("r.CurrentStats() = %v, %v, want %v, nil", + got, err, tc.want) + continue + } + if diff := cmp.Diff(tc.want, got, timeCmpOpt); diff != "" { + t.Errorf("r.CurrentStats() = %v, %v, want %v, nil; (-want, +got)\n%s", + got, err, tc.want, diff) + continue + } + } + +} + +func TestInspectorHistory(t *testing.T) { + r := setup(t) + now := time.Now().UTC() + timeCmpOpt := cmpopts.EquateApproxTime(time.Second) + + inspector := NewInspector(RedisClientOpt{ + Addr: redisAddr, + DB: redisDB, + }) + + tests := []struct { + n int // number of days + }{ + {90}, + {7}, + {0}, + } + + for _, tc := range tests { + asynqtest.FlushDB(t, r) + + // populate last n days data + for i := 0; i < tc.n; i++ { + ts := now.Add(-time.Duration(i) * 24 * time.Hour) + processedKey := base.ProcessedKey(ts) + failedKey := base.FailureKey(ts) + r.Set(processedKey, (i+1)*1000, 0) + r.Set(failedKey, (i+1)*10, 0) + } + + got, err := inspector.History(tc.n) + if err != nil { + t.Errorf("Inspector.History(%d) returned error: %v", tc.n, err) + continue + } + if len(got) != tc.n { + t.Errorf("Inspector.History(%d) returned %d daily stats, want %d", + tc.n, len(got), tc.n) + continue + } + for i := 0; i < tc.n; i++ { + want := &DailyStats{ + Processed: (i + 1) * 1000, + Failed: (i + 1) * 10, + Date: now.Add(-time.Duration(i) * 24 * time.Hour), + } + if diff := cmp.Diff(want, got[i], timeCmpOpt); diff != "" { + t.Errorf("Inspector.History %d days ago data; got %+v, want %+v; (-want,+got):\n%s", + i, got[i], want, diff) + } + } + } +} + +func createEnqueuedTask(msg *base.TaskMessage) *EnqueuedTask { + return &EnqueuedTask{ + Task: NewTask(msg.Type, msg.Payload), + ID: msg.ID.String(), + Queue: msg.Queue, + } +} + +func TestInspectorListEnqueuedTasks(t *testing.T) { + r := setup(t) + m1 := asynqtest.NewTaskMessage("task1", nil) + m2 := asynqtest.NewTaskMessage("task2", nil) + m3 := asynqtest.NewTaskMessage("task3", nil) + m4 := asynqtest.NewTaskMessage("task4", nil) + + inspector := NewInspector(RedisClientOpt{ + Addr: redisAddr, + DB: redisDB, + }) + + tests := []struct { + desc string + enqueued map[string][]*base.TaskMessage + qname string + want []*EnqueuedTask + }{ + { + desc: "with default queue", + enqueued: map[string][]*base.TaskMessage{ + "default": {m1, m2}, + }, + qname: "default", + want: []*EnqueuedTask{ + createEnqueuedTask(m1), + createEnqueuedTask(m2), + }, + }, + { + desc: "with named queue", + enqueued: map[string][]*base.TaskMessage{ + "default": {m1, m2}, + "critical": {m3}, + "low": {m4}, + }, + qname: "critical", + want: []*EnqueuedTask{ + createEnqueuedTask(m3), + }, + }, + { + desc: "with empty queue", + enqueued: map[string][]*base.TaskMessage{ + "default": {}, + }, + qname: "default", + want: []*EnqueuedTask(nil), + }, + } + + for _, tc := range tests { + asynqtest.FlushDB(t, r) + for q, msgs := range tc.enqueued { + asynqtest.SeedEnqueuedQueue(t, r, msgs, q) + } + + got, err := inspector.ListEnqueuedTasks(tc.qname) + if err != nil { + t.Errorf("%s; ListEnqueuedTasks(%q) returned error: %v", + tc.desc, tc.qname, err) + continue + } + ignoreOpt := cmpopts.IgnoreUnexported(Payload{}) + if diff := cmp.Diff(tc.want, got, ignoreOpt); diff != "" { + t.Errorf("%s; ListEnqueuedTasks(%q) = %v, want %v; (-want,+got)\n%s", + tc.desc, tc.qname, got, tc.want, diff) + } + } +} + +func TestInspectorListInProgressTasks(t *testing.T) { + r := setup(t) + m1 := asynqtest.NewTaskMessage("task1", nil) + m2 := asynqtest.NewTaskMessage("task2", nil) + + inspector := NewInspector(RedisClientOpt{ + Addr: redisAddr, + DB: redisDB, + }) + + createInProgressTask := func(msg *base.TaskMessage) *InProgressTask { + return &InProgressTask{ + Task: NewTask(msg.Type, msg.Payload), + ID: msg.ID.String(), + } + } + + tests := []struct { + desc string + inProgress []*base.TaskMessage + want []*InProgressTask + }{ + { + desc: "with a few in-progress tasks", + inProgress: []*base.TaskMessage{m1, m2}, + want: []*InProgressTask{ + createInProgressTask(m1), + createInProgressTask(m2), + }, + }, + } + + for _, tc := range tests { + asynqtest.FlushDB(t, r) + asynqtest.SeedInProgressQueue(t, r, tc.inProgress) + + got, err := inspector.ListInProgressTasks() + if err != nil { + t.Errorf("%s; ListInProgressTasks() returned error: %v", tc.desc, err) + continue + } + ignoreOpt := cmpopts.IgnoreUnexported(Payload{}) + if diff := cmp.Diff(tc.want, got, ignoreOpt); diff != "" { + t.Errorf("%s; ListInProgressTask() = %v, want %v; (-want,+got)\n%s", + tc.desc, got, tc.want, diff) + } + } +} + +func TestInspectorListScheduledTasks(t *testing.T) { + r := setup(t) + m1 := asynqtest.NewTaskMessage("task1", nil) + m2 := asynqtest.NewTaskMessage("task2", nil) + m3 := asynqtest.NewTaskMessage("task3", nil) + now := time.Now() + z1 := base.Z{Message: m1, Score: now.Add(5 * time.Minute).Unix()} + z2 := base.Z{Message: m2, Score: now.Add(15 * time.Minute).Unix()} + z3 := base.Z{Message: m3, Score: now.Add(2 * time.Minute).Unix()} + + inspector := NewInspector(RedisClientOpt{ + Addr: redisAddr, + DB: redisDB, + }) + + createScheduledTask := func(z base.Z) *ScheduledTask { + msg := z.Message + return &ScheduledTask{ + Task: NewTask(msg.Type, msg.Payload), + ID: msg.ID.String(), + Queue: msg.Queue, + NextEnqueueAt: time.Unix(z.Score, 0), + } + } + + tests := []struct { + desc string + scheduled []base.Z + want []*ScheduledTask + }{ + { + desc: "with a few scheduled tasks", + scheduled: []base.Z{z1, z2, z3}, + // Should be sorted by NextEnqueuedAt. + want: []*ScheduledTask{ + createScheduledTask(z3), + createScheduledTask(z1), + createScheduledTask(z2), + }, + }, + { + desc: "with empty scheduled queue", + scheduled: []base.Z{}, + want: []*ScheduledTask(nil), + }, + } + + for _, tc := range tests { + asynqtest.FlushDB(t, r) + asynqtest.SeedScheduledQueue(t, r, tc.scheduled) + + got, err := inspector.ListScheduledTasks() + if err != nil { + t.Errorf("%s; ListScheduledTasks() returned error: %v", tc.desc, err) + continue + } + ignoreOpt := cmpopts.IgnoreUnexported(Payload{}, ScheduledTask{}) + if diff := cmp.Diff(tc.want, got, ignoreOpt); diff != "" { + t.Errorf("%s; ListScheduledTask() = %v, want %v; (-want,+got)\n%s", + tc.desc, got, tc.want, diff) + } + } +} + +func TestInspectorListRetryTasks(t *testing.T) { + r := setup(t) + m1 := asynqtest.NewTaskMessage("task1", nil) + m2 := asynqtest.NewTaskMessage("task2", nil) + m3 := asynqtest.NewTaskMessage("task3", nil) + now := time.Now() + z1 := base.Z{Message: m1, Score: now.Add(5 * time.Minute).Unix()} + z2 := base.Z{Message: m2, Score: now.Add(15 * time.Minute).Unix()} + z3 := base.Z{Message: m3, Score: now.Add(2 * time.Minute).Unix()} + + inspector := NewInspector(RedisClientOpt{ + Addr: redisAddr, + DB: redisDB, + }) + + createRetryTask := func(z base.Z) *RetryTask { + msg := z.Message + return &RetryTask{ + Task: NewTask(msg.Type, msg.Payload), + ID: msg.ID.String(), + Queue: msg.Queue, + NextEnqueueAt: time.Unix(z.Score, 0), + MaxRetry: msg.Retry, + Retried: msg.Retried, + ErrorMsg: msg.ErrorMsg, + } + } + + tests := []struct { + desc string + retry []base.Z + want []*RetryTask + }{ + { + desc: "with a few retry tasks", + retry: []base.Z{z1, z2, z3}, + // Should be sorted by NextEnqueuedAt. + want: []*RetryTask{ + createRetryTask(z3), + createRetryTask(z1), + createRetryTask(z2), + }, + }, + { + desc: "with empty retry queue", + retry: []base.Z{}, + want: []*RetryTask(nil), + }, + } + + for _, tc := range tests { + asynqtest.FlushDB(t, r) + asynqtest.SeedRetryQueue(t, r, tc.retry) + + got, err := inspector.ListRetryTasks() + if err != nil { + t.Errorf("%s; ListRetryTasks() returned error: %v", tc.desc, err) + continue + } + ignoreOpt := cmpopts.IgnoreUnexported(Payload{}, RetryTask{}) + if diff := cmp.Diff(tc.want, got, ignoreOpt); diff != "" { + t.Errorf("%s; ListRetryTask() = %v, want %v; (-want,+got)\n%s", + tc.desc, got, tc.want, diff) + } + } +} + +func TestInspectorListDeadTasks(t *testing.T) { + r := setup(t) + m1 := asynqtest.NewTaskMessage("task1", nil) + m2 := asynqtest.NewTaskMessage("task2", nil) + m3 := asynqtest.NewTaskMessage("task3", nil) + now := time.Now() + z1 := base.Z{Message: m1, Score: now.Add(-5 * time.Minute).Unix()} + z2 := base.Z{Message: m2, Score: now.Add(-15 * time.Minute).Unix()} + z3 := base.Z{Message: m3, Score: now.Add(-2 * time.Minute).Unix()} + + inspector := NewInspector(RedisClientOpt{ + Addr: redisAddr, + DB: redisDB, + }) + + createDeadTask := func(z base.Z) *DeadTask { + msg := z.Message + return &DeadTask{ + Task: NewTask(msg.Type, msg.Payload), + ID: msg.ID.String(), + Queue: msg.Queue, + MaxRetry: msg.Retry, + Retried: msg.Retried, + LastFailedAt: time.Unix(z.Score, 0), + ErrorMsg: msg.ErrorMsg, + } + } + + tests := []struct { + desc string + retry []base.Z + want []*DeadTask + }{ + { + desc: "with a few dead tasks", + retry: []base.Z{z1, z2, z3}, + // Should be sorted by LastFailedAt. + want: []*DeadTask{ + createDeadTask(z2), + createDeadTask(z1), + createDeadTask(z3), + }, + }, + { + desc: "with empty dead queue", + retry: []base.Z{}, + want: []*DeadTask(nil), + }, + } + + for _, tc := range tests { + asynqtest.FlushDB(t, r) + asynqtest.SeedDeadQueue(t, r, tc.retry) + + got, err := inspector.ListDeadTasks() + if err != nil { + t.Errorf("%s; ListDeadTasks() returned error: %v", tc.desc, err) + continue + } + ignoreOpt := cmpopts.IgnoreUnexported(Payload{}, DeadTask{}) + if diff := cmp.Diff(tc.want, got, ignoreOpt); diff != "" { + t.Errorf("%s; ListDeadTask() = %v, want %v; (-want,+got)\n%s", + tc.desc, got, tc.want, diff) + } + } +} + +func TestInspectorListPagination(t *testing.T) { + // Create 100 tasks. + var msgs []*base.TaskMessage + for i := 0; i <= 99; i++ { + msgs = append(msgs, + asynqtest.NewTaskMessage(fmt.Sprintf("task%d", i), nil)) + } + r := setup(t) + asynqtest.SeedEnqueuedQueue(t, r, msgs) + + inspector := NewInspector(RedisClientOpt{ + Addr: redisAddr, + DB: redisDB, + }) + + tests := []struct { + page int + pageSize int + want []*EnqueuedTask + }{ + { + page: 1, + pageSize: 5, + want: []*EnqueuedTask{ + createEnqueuedTask(msgs[0]), + createEnqueuedTask(msgs[1]), + createEnqueuedTask(msgs[2]), + createEnqueuedTask(msgs[3]), + createEnqueuedTask(msgs[4]), + }, + }, + { + page: 3, + pageSize: 10, + want: []*EnqueuedTask{ + createEnqueuedTask(msgs[20]), + createEnqueuedTask(msgs[21]), + createEnqueuedTask(msgs[22]), + createEnqueuedTask(msgs[23]), + createEnqueuedTask(msgs[24]), + createEnqueuedTask(msgs[25]), + createEnqueuedTask(msgs[26]), + createEnqueuedTask(msgs[27]), + createEnqueuedTask(msgs[28]), + createEnqueuedTask(msgs[29]), + }, + }, + } + + for _, tc := range tests { + got, err := inspector.ListEnqueuedTasks("default", Page(tc.page), PageSize(tc.pageSize)) + if err != nil { + t.Errorf("ListEnqueuedTask('default') returned error: %v", err) + continue + } + ignoreOpt := cmpopts.IgnoreUnexported(Payload{}) + if diff := cmp.Diff(tc.want, got, ignoreOpt); diff != "" { + t.Errorf("ListEnqueuedTask('default') = %v, want %v; (-want,+got)\n%s", + got, tc.want, diff) + } + } +} + +func TestInspectorDeleteAllScheduledTasks(t *testing.T) { + r := setup(t) + m1 := asynqtest.NewTaskMessage("task1", nil) + m2 := asynqtest.NewTaskMessage("task2", nil) + m3 := asynqtest.NewTaskMessage("task3", nil) + now := time.Now() + z1 := base.Z{Message: m1, Score: now.Add(5 * time.Minute).Unix()} + z2 := base.Z{Message: m2, Score: now.Add(15 * time.Minute).Unix()} + z3 := base.Z{Message: m3, Score: now.Add(2 * time.Minute).Unix()} + + inspector := NewInspector(RedisClientOpt{ + Addr: redisAddr, + DB: redisDB, + }) + + tests := []struct { + scheduled []base.Z + want int + }{ + { + scheduled: []base.Z{z1, z2, z3}, + want: 3, + }, + { + scheduled: []base.Z{}, + want: 0, + }, + } + + for _, tc := range tests { + asynqtest.FlushDB(t, r) + asynqtest.SeedScheduledQueue(t, r, tc.scheduled) + + got, err := inspector.DeleteAllScheduledTasks() + if err != nil { + t.Errorf("DeleteAllScheduledTasks() returned error: %v", err) + continue + } + if got != tc.want { + t.Errorf("DeleteAllScheduledTasks() = %d, want %d", got, tc.want) + } + gotScheduled := asynqtest.GetScheduledEntries(t, r) + if len(gotScheduled) != 0 { + t.Errorf("There are still %d entries in dead queue, want empty", + len(gotScheduled)) + } + } +} + +func TestInspectorDeleteAllRetryTasks(t *testing.T) { + r := setup(t) + m1 := asynqtest.NewTaskMessage("task1", nil) + m2 := asynqtest.NewTaskMessage("task2", nil) + m3 := asynqtest.NewTaskMessage("task3", nil) + now := time.Now() + z1 := base.Z{Message: m1, Score: now.Add(5 * time.Minute).Unix()} + z2 := base.Z{Message: m2, Score: now.Add(15 * time.Minute).Unix()} + z3 := base.Z{Message: m3, Score: now.Add(2 * time.Minute).Unix()} + + inspector := NewInspector(RedisClientOpt{ + Addr: redisAddr, + DB: redisDB, + }) + + tests := []struct { + retry []base.Z + want int + }{ + { + retry: []base.Z{z1, z2, z3}, + want: 3, + }, + { + retry: []base.Z{}, + want: 0, + }, + } + + for _, tc := range tests { + asynqtest.FlushDB(t, r) + asynqtest.SeedRetryQueue(t, r, tc.retry) + + got, err := inspector.DeleteAllRetryTasks() + if err != nil { + t.Errorf("DeleteAllRetryTasks() returned error: %v", err) + continue + } + if got != tc.want { + t.Errorf("DeleteAllRetryTasks() = %d, want %d", got, tc.want) + } + gotRetry := asynqtest.GetRetryEntries(t, r) + if len(gotRetry) != 0 { + t.Errorf("There are still %d entries in dead queue, want empty", + len(gotRetry)) + } + } +} + +func TestInspectorDeleteAllDeadTasks(t *testing.T) { + r := setup(t) + m1 := asynqtest.NewTaskMessage("task1", nil) + m2 := asynqtest.NewTaskMessage("task2", nil) + m3 := asynqtest.NewTaskMessage("task3", nil) + now := time.Now() + z1 := base.Z{Message: m1, Score: now.Add(5 * time.Minute).Unix()} + z2 := base.Z{Message: m2, Score: now.Add(15 * time.Minute).Unix()} + z3 := base.Z{Message: m3, Score: now.Add(2 * time.Minute).Unix()} + + inspector := NewInspector(RedisClientOpt{ + Addr: redisAddr, + DB: redisDB, + }) + + tests := []struct { + dead []base.Z + want int + }{ + { + dead: []base.Z{z1, z2, z3}, + want: 3, + }, + { + dead: []base.Z{}, + want: 0, + }, + } + + for _, tc := range tests { + asynqtest.FlushDB(t, r) + asynqtest.SeedDeadQueue(t, r, tc.dead) + + got, err := inspector.DeleteAllDeadTasks() + if err != nil { + t.Errorf("DeleteAllDeadTasks() returned error: %v", err) + continue + } + if got != tc.want { + t.Errorf("DeleteAllDeadTasks() = %d, want %d", got, tc.want) + } + gotDead := asynqtest.GetDeadEntries(t, r) + if len(gotDead) != 0 { + t.Errorf("There are still %d entries in dead queue, want empty", + len(gotDead)) + } + } +} + +func TestInspectorKillAllScheduledTasks(t *testing.T) { + r := setup(t) + m1 := asynqtest.NewTaskMessage("task1", nil) + m2 := asynqtest.NewTaskMessage("task2", nil) + m3 := asynqtest.NewTaskMessage("task3", nil) + now := time.Now() + z1 := base.Z{Message: m1, Score: now.Add(5 * time.Minute).Unix()} + z2 := base.Z{Message: m2, Score: now.Add(15 * time.Minute).Unix()} + z3 := base.Z{Message: m3, Score: now.Add(2 * time.Minute).Unix()} + + inspector := NewInspector(RedisClientOpt{ + Addr: redisAddr, + DB: redisDB, + }) + + tests := []struct { + scheduled []base.Z + dead []base.Z + want int + wantDead []base.Z + }{ + { + scheduled: []base.Z{z1, z2, z3}, + dead: []base.Z{}, + want: 3, + wantDead: []base.Z{ + base.Z{Message: m1, Score: now.Unix()}, + base.Z{Message: m2, Score: now.Unix()}, + base.Z{Message: m3, Score: now.Unix()}, + }, + }, + { + scheduled: []base.Z{z1, z2}, + dead: []base.Z{z3}, + want: 2, + wantDead: []base.Z{ + z3, + base.Z{Message: m1, Score: now.Unix()}, + base.Z{Message: m2, Score: now.Unix()}, + }, + }, + { + scheduled: []base.Z(nil), + dead: []base.Z(nil), + want: 0, + wantDead: []base.Z(nil), + }, + { + scheduled: []base.Z(nil), + dead: []base.Z{z1, z2}, + want: 0, + wantDead: []base.Z{z1, z2}, + }, + } + + for _, tc := range tests { + asynqtest.FlushDB(t, r) + asynqtest.SeedScheduledQueue(t, r, tc.scheduled) + asynqtest.SeedDeadQueue(t, r, tc.dead) + + got, err := inspector.KillAllScheduledTasks() + if err != nil { + t.Errorf("KillAllScheduledTasks() returned error: %v", err) + continue + } + if got != tc.want { + t.Errorf("KillAllScheduledTasks() = %d, want %d", got, tc.want) + } + gotScheduled := asynqtest.GetScheduledEntries(t, r) + if len(gotScheduled) != 0 { + t.Errorf("There are still %d entries in scheduled queue, want empty", + len(gotScheduled)) + } + gotDead := asynqtest.GetDeadEntries(t, r) + if diff := cmp.Diff(tc.wantDead, gotDead, asynqtest.SortZSetEntryOpt); diff != "" { + t.Errorf("mismatch in %q; (-want,+got)\n%s", base.DeadQueue, diff) + } + } +} + +func TestInspectorKillAllRetryTasks(t *testing.T) { + r := setup(t) + m1 := asynqtest.NewTaskMessage("task1", nil) + m2 := asynqtest.NewTaskMessage("task2", nil) + m3 := asynqtest.NewTaskMessage("task3", nil) + now := time.Now() + z1 := base.Z{Message: m1, Score: now.Add(5 * time.Minute).Unix()} + z2 := base.Z{Message: m2, Score: now.Add(15 * time.Minute).Unix()} + z3 := base.Z{Message: m3, Score: now.Add(2 * time.Minute).Unix()} + + inspector := NewInspector(RedisClientOpt{ + Addr: redisAddr, + DB: redisDB, + }) + + tests := []struct { + retry []base.Z + dead []base.Z + want int + wantDead []base.Z + }{ + { + retry: []base.Z{z1, z2, z3}, + dead: []base.Z{}, + want: 3, + wantDead: []base.Z{ + base.Z{Message: m1, Score: now.Unix()}, + base.Z{Message: m2, Score: now.Unix()}, + base.Z{Message: m3, Score: now.Unix()}, + }, + }, + { + retry: []base.Z{z1, z2}, + dead: []base.Z{z3}, + want: 2, + wantDead: []base.Z{ + z3, + base.Z{Message: m1, Score: now.Unix()}, + base.Z{Message: m2, Score: now.Unix()}, + }, + }, + { + retry: []base.Z(nil), + dead: []base.Z(nil), + want: 0, + wantDead: []base.Z(nil), + }, + { + retry: []base.Z(nil), + dead: []base.Z{z1, z2}, + want: 0, + wantDead: []base.Z{z1, z2}, + }, + } + + for _, tc := range tests { + asynqtest.FlushDB(t, r) + asynqtest.SeedRetryQueue(t, r, tc.retry) + asynqtest.SeedDeadQueue(t, r, tc.dead) + + got, err := inspector.KillAllRetryTasks() + if err != nil { + t.Errorf("KillAllRetryTasks() returned error: %v", err) + continue + } + if got != tc.want { + t.Errorf("KillAllRetryTasks() = %d, want %d", got, tc.want) + } + gotRetry := asynqtest.GetRetryEntries(t, r) + if len(gotRetry) != 0 { + t.Errorf("There are still %d entries in retry queue, want empty", + len(gotRetry)) + } + gotDead := asynqtest.GetDeadEntries(t, r) + if diff := cmp.Diff(tc.wantDead, gotDead, asynqtest.SortZSetEntryOpt); diff != "" { + t.Errorf("mismatch in %q; (-want,+got)\n%s", base.DeadQueue, diff) + } + } +} + +func TestInspectorEnqueueAllScheduledTasks(t *testing.T) { + r := setup(t) + m1 := asynqtest.NewTaskMessage("task1", nil) + m2 := asynqtest.NewTaskMessageWithQueue("task2", nil, "critical") + m3 := asynqtest.NewTaskMessageWithQueue("task3", nil, "low") + m4 := asynqtest.NewTaskMessage("task4", nil) + now := time.Now() + z1 := base.Z{Message: m1, Score: now.Add(5 * time.Minute).Unix()} + z2 := base.Z{Message: m2, Score: now.Add(15 * time.Minute).Unix()} + z3 := base.Z{Message: m3, Score: now.Add(2 * time.Minute).Unix()} + + inspector := NewInspector(RedisClientOpt{ + Addr: redisAddr, + DB: redisDB, + }) + + tests := []struct { + scheduled []base.Z + enqueued map[string][]*base.TaskMessage + want int + wantEnqueued map[string][]*base.TaskMessage + }{ + { + scheduled: []base.Z{z1, z2, z3}, + enqueued: map[string][]*base.TaskMessage{ + "default": {}, + "critical": {}, + "low": {}, + }, + want: 3, + wantEnqueued: map[string][]*base.TaskMessage{ + "default": {m1}, + "critical": {m2}, + "low": {m3}, + }, + }, + { + scheduled: []base.Z{z1, z2, z3}, + enqueued: map[string][]*base.TaskMessage{ + "default": {m4}, + "critical": {}, + "low": {}, + }, + want: 3, + wantEnqueued: map[string][]*base.TaskMessage{ + "default": {m4, m1}, + "critical": {m2}, + "low": {m3}, + }, + }, + { + scheduled: []base.Z{}, + enqueued: map[string][]*base.TaskMessage{ + "default": {m1, m4}, + }, + want: 0, + wantEnqueued: map[string][]*base.TaskMessage{ + "default": {m1, m4}, + }, + }, + } + + for _, tc := range tests { + asynqtest.FlushDB(t, r) + asynqtest.SeedScheduledQueue(t, r, tc.scheduled) + for q, msgs := range tc.enqueued { + asynqtest.SeedEnqueuedQueue(t, r, msgs, q) + } + + got, err := inspector.EnqueueAllScheduledTasks() + if err != nil { + t.Errorf("EnqueueAllScheduledTasks() returned error: %v", err) + continue + } + if got != tc.want { + t.Errorf("EnqueueAllScheduledTasks() = %d, want %d", got, tc.want) + } + gotScheduled := asynqtest.GetScheduledEntries(t, r) + if len(gotScheduled) != 0 { + t.Errorf("There are still %d entries in scheduled queue, want empty", + len(gotScheduled)) + } + for qname, want := range tc.wantEnqueued { + gotEnqueued := asynqtest.GetEnqueuedMessages(t, r, qname) + if diff := cmp.Diff(want, gotEnqueued, asynqtest.SortMsgOpt); diff != "" { + t.Errorf("mismatch found in %q; (-want, +got)\n%s", base.QueueKey(qname), diff) + } + } + } +} + +func TestInspectorEnqueueAllRetryTasks(t *testing.T) { + r := setup(t) + m1 := asynqtest.NewTaskMessage("task1", nil) + m2 := asynqtest.NewTaskMessageWithQueue("task2", nil, "critical") + m3 := asynqtest.NewTaskMessageWithQueue("task3", nil, "low") + m4 := asynqtest.NewTaskMessage("task2", nil) + now := time.Now() + z1 := base.Z{Message: m1, Score: now.Add(5 * time.Minute).Unix()} + z2 := base.Z{Message: m2, Score: now.Add(15 * time.Minute).Unix()} + z3 := base.Z{Message: m3, Score: now.Add(2 * time.Minute).Unix()} + + inspector := NewInspector(RedisClientOpt{ + Addr: redisAddr, + DB: redisDB, + }) + + tests := []struct { + retry []base.Z + enqueued map[string][]*base.TaskMessage + want int + wantEnqueued map[string][]*base.TaskMessage + }{ + { + retry: []base.Z{z1, z2, z3}, + enqueued: map[string][]*base.TaskMessage{ + "default": {}, + "critical": {}, + "low": {}, + }, + want: 3, + wantEnqueued: map[string][]*base.TaskMessage{ + "default": {m1}, + "critical": {m2}, + "low": {m3}, + }, + }, + { + retry: []base.Z{z1, z2, z3}, + enqueued: map[string][]*base.TaskMessage{ + "default": {m4}, + "critical": {}, + "low": {}, + }, + want: 3, + wantEnqueued: map[string][]*base.TaskMessage{ + "default": {m4, m1}, + "critical": {m2}, + "low": {m3}, + }, + }, + { + retry: []base.Z{}, + enqueued: map[string][]*base.TaskMessage{ + "default": {m1, m4}, + }, + want: 0, + wantEnqueued: map[string][]*base.TaskMessage{ + "default": {m1, m4}, + }, + }, + } + + for _, tc := range tests { + asynqtest.FlushDB(t, r) + asynqtest.SeedRetryQueue(t, r, tc.retry) + for q, msgs := range tc.enqueued { + asynqtest.SeedEnqueuedQueue(t, r, msgs, q) + } + + got, err := inspector.EnqueueAllRetryTasks() + if err != nil { + t.Errorf("EnqueueAllRetryTasks() returned error: %v", err) + continue + } + if got != tc.want { + t.Errorf("EnqueueAllRetryTasks() = %d, want %d", got, tc.want) + } + gotRetry := asynqtest.GetRetryEntries(t, r) + if len(gotRetry) != 0 { + t.Errorf("There are still %d entries in retry queue, want empty", + len(gotRetry)) + } + for qname, want := range tc.wantEnqueued { + gotEnqueued := asynqtest.GetEnqueuedMessages(t, r, qname) + if diff := cmp.Diff(want, gotEnqueued, asynqtest.SortMsgOpt); diff != "" { + t.Errorf("mismatch found in %q; (-want, +got)\n%s", base.QueueKey(qname), diff) + } + } + } +} +func TestInspectorEnqueueAllDeadTasks(t *testing.T) { + r := setup(t) + m1 := asynqtest.NewTaskMessage("task1", nil) + m2 := asynqtest.NewTaskMessageWithQueue("task2", nil, "critical") + m3 := asynqtest.NewTaskMessageWithQueue("task3", nil, "low") + m4 := asynqtest.NewTaskMessage("task2", nil) + now := time.Now() + z1 := base.Z{Message: m1, Score: now.Add(-5 * time.Minute).Unix()} + z2 := base.Z{Message: m2, Score: now.Add(-15 * time.Minute).Unix()} + z3 := base.Z{Message: m3, Score: now.Add(-2 * time.Minute).Unix()} + + inspector := NewInspector(RedisClientOpt{ + Addr: redisAddr, + DB: redisDB, + }) + + tests := []struct { + dead []base.Z + enqueued map[string][]*base.TaskMessage + want int + wantEnqueued map[string][]*base.TaskMessage + }{ + { + dead: []base.Z{z1, z2, z3}, + enqueued: map[string][]*base.TaskMessage{ + "default": {}, + "critical": {}, + "low": {}, + }, + want: 3, + wantEnqueued: map[string][]*base.TaskMessage{ + "default": {m1}, + "critical": {m2}, + "low": {m3}, + }, + }, + { + dead: []base.Z{z1, z2, z3}, + enqueued: map[string][]*base.TaskMessage{ + "default": {m4}, + "critical": {}, + "low": {}, + }, + want: 3, + wantEnqueued: map[string][]*base.TaskMessage{ + "default": {m4, m1}, + "critical": {m2}, + "low": {m3}, + }, + }, + { + dead: []base.Z{}, + enqueued: map[string][]*base.TaskMessage{ + "default": {m1, m4}, + }, + want: 0, + wantEnqueued: map[string][]*base.TaskMessage{ + "default": {m1, m4}, + }, + }, + } + + for _, tc := range tests { + asynqtest.FlushDB(t, r) + asynqtest.SeedDeadQueue(t, r, tc.dead) + for q, msgs := range tc.enqueued { + asynqtest.SeedEnqueuedQueue(t, r, msgs, q) + } + + got, err := inspector.EnqueueAllDeadTasks() + if err != nil { + t.Errorf("EnqueueAllDeadTasks() returned error: %v", err) + continue + } + if got != tc.want { + t.Errorf("EnqueueAllDeadTasks() = %d, want %d", got, tc.want) + } + gotDead := asynqtest.GetDeadEntries(t, r) + if len(gotDead) != 0 { + t.Errorf("There are still %d entries in dead queue, want empty", + len(gotDead)) + } + for qname, want := range tc.wantEnqueued { + gotEnqueued := asynqtest.GetEnqueuedMessages(t, r, qname) + if diff := cmp.Diff(want, gotEnqueued, asynqtest.SortMsgOpt); diff != "" { + t.Errorf("mismatch found in %q; (-want, +got)\n%s", base.QueueKey(qname), diff) + } + } + } +} + +func TestInspectorDeleteTaskByKeyDeletesScheduledTask(t *testing.T) { + r := setup(t) + m1 := asynqtest.NewTaskMessage("task1", nil) + m2 := asynqtest.NewTaskMessage("task2", nil) + m3 := asynqtest.NewTaskMessage("task3", nil) + now := time.Now() + z1 := base.Z{Message: m1, Score: now.Add(5 * time.Minute).Unix()} + z2 := base.Z{Message: m2, Score: now.Add(15 * time.Minute).Unix()} + z3 := base.Z{Message: m3, Score: now.Add(2 * time.Minute).Unix()} + + inspector := NewInspector(RedisClientOpt{ + Addr: redisAddr, + DB: redisDB, + }) + + tests := []struct { + scheduled []base.Z + target *base.TaskMessage + wantScheduled []base.Z + }{ + { + scheduled: []base.Z{z1, z2, z3}, + target: m2, + wantScheduled: []base.Z{z1, z3}, + }, + } + +loop: + for _, tc := range tests { + asynqtest.FlushDB(t, r) + asynqtest.SeedScheduledQueue(t, r, tc.scheduled) + + tasks, err := inspector.ListScheduledTasks() + if err != nil { + t.Errorf("ListScheduledTasks() returned error: %v", err) + continue + } + for _, task := range tasks { + if task.ID == tc.target.ID.String() { + if err := inspector.DeleteTaskByKey(task.Key()); err != nil { + t.Errorf("DeleteTaskByKey(%q) returned error: %v", + task.Key(), err) + continue loop + } + } + } + gotScheduled := asynqtest.GetScheduledEntries(t, r) + if diff := cmp.Diff(tc.wantScheduled, gotScheduled, asynqtest.SortZSetEntryOpt); diff != "" { + t.Errorf("mismatch found in %q; (-want,+got)\n%s", + base.ScheduledQueue, diff) + } + } +} + +func TestInspectorDeleteTaskByKeyDeletesRetryTask(t *testing.T) { + r := setup(t) + m1 := asynqtest.NewTaskMessage("task1", nil) + m2 := asynqtest.NewTaskMessage("task2", nil) + m3 := asynqtest.NewTaskMessage("task3", nil) + now := time.Now() + z1 := base.Z{Message: m1, Score: now.Add(5 * time.Minute).Unix()} + z2 := base.Z{Message: m2, Score: now.Add(15 * time.Minute).Unix()} + z3 := base.Z{Message: m3, Score: now.Add(2 * time.Minute).Unix()} + + inspector := NewInspector(RedisClientOpt{ + Addr: redisAddr, + DB: redisDB, + }) + + tests := []struct { + retry []base.Z + target *base.TaskMessage + wantRetry []base.Z + }{ + { + retry: []base.Z{z1, z2, z3}, + target: m2, + wantRetry: []base.Z{z1, z3}, + }, + } + +loop: + for _, tc := range tests { + asynqtest.FlushDB(t, r) + asynqtest.SeedRetryQueue(t, r, tc.retry) + + tasks, err := inspector.ListRetryTasks() + if err != nil { + t.Errorf("ListRetryTasks() returned error: %v", err) + continue + } + for _, task := range tasks { + if task.ID == tc.target.ID.String() { + if err := inspector.DeleteTaskByKey(task.Key()); err != nil { + t.Errorf("DeleteTaskByKey(%q) returned error: %v", + task.Key(), err) + continue loop + } + } + } + gotRetry := asynqtest.GetRetryEntries(t, r) + if diff := cmp.Diff(tc.wantRetry, gotRetry, asynqtest.SortZSetEntryOpt); diff != "" { + t.Errorf("mismatch found in %q; (-want,+got)\n%s", + base.RetryQueue, diff) + } + } +} + +func TestInspectorDeleteTaskByKeyDeletesDeadTask(t *testing.T) { + r := setup(t) + m1 := asynqtest.NewTaskMessage("task1", nil) + m2 := asynqtest.NewTaskMessage("task2", nil) + m3 := asynqtest.NewTaskMessage("task3", nil) + now := time.Now() + z1 := base.Z{Message: m1, Score: now.Add(-5 * time.Minute).Unix()} + z2 := base.Z{Message: m2, Score: now.Add(-15 * time.Minute).Unix()} + z3 := base.Z{Message: m3, Score: now.Add(-2 * time.Minute).Unix()} + + inspector := NewInspector(RedisClientOpt{ + Addr: redisAddr, + DB: redisDB, + }) + + tests := []struct { + dead []base.Z + target *base.TaskMessage + wantDead []base.Z + }{ + { + dead: []base.Z{z1, z2, z3}, + target: m2, + wantDead: []base.Z{z1, z3}, + }, + } + +loop: + for _, tc := range tests { + asynqtest.FlushDB(t, r) + asynqtest.SeedDeadQueue(t, r, tc.dead) + + tasks, err := inspector.ListDeadTasks() + if err != nil { + t.Errorf("ListDeadTasks() returned error: %v", err) + continue + } + for _, task := range tasks { + if task.ID == tc.target.ID.String() { + if err := inspector.DeleteTaskByKey(task.Key()); err != nil { + t.Errorf("DeleteTaskByKey(%q) returned error: %v", + task.Key(), err) + continue loop + } + } + } + gotDead := asynqtest.GetDeadEntries(t, r) + if diff := cmp.Diff(tc.wantDead, gotDead, asynqtest.SortZSetEntryOpt); diff != "" { + t.Errorf("mismatch found in %q; (-want,+got)\n%s", + base.DeadQueue, diff) + } + } +} + +func TestInspectorEnqueueTaskByKeyEnqueuesScheduledTask(t *testing.T) { + r := setup(t) + m1 := asynqtest.NewTaskMessage("task1", nil) + m2 := asynqtest.NewTaskMessageWithQueue("task2", nil, "critical") + m3 := asynqtest.NewTaskMessageWithQueue("task3", nil, "low") + now := time.Now() + z1 := base.Z{Message: m1, Score: now.Add(5 * time.Minute).Unix()} + z2 := base.Z{Message: m2, Score: now.Add(15 * time.Minute).Unix()} + z3 := base.Z{Message: m3, Score: now.Add(2 * time.Minute).Unix()} + + inspector := NewInspector(RedisClientOpt{ + Addr: redisAddr, + DB: redisDB, + }) + + tests := []struct { + scheduled []base.Z + enqueued map[string][]*base.TaskMessage + target *base.TaskMessage + wantScheduled []base.Z + wantEnqueued map[string][]*base.TaskMessage + }{ + { + scheduled: []base.Z{z1, z2, z3}, + enqueued: map[string][]*base.TaskMessage{ + "default": {}, + "critical": {}, + "low": {}, + }, + target: m2, + wantScheduled: []base.Z{z1, z3}, + wantEnqueued: map[string][]*base.TaskMessage{ + "default": {}, + "critical": {m2}, + "low": {}, + }, + }, + } + +loop: + for _, tc := range tests { + asynqtest.FlushDB(t, r) + asynqtest.SeedScheduledQueue(t, r, tc.scheduled) + asynqtest.SeedAllEnqueuedQueues(t, r, tc.enqueued) + + tasks, err := inspector.ListScheduledTasks() + if err != nil { + t.Errorf("ListScheduledTasks() returned error: %v", err) + continue + } + for _, task := range tasks { + if task.ID == tc.target.ID.String() { + if err := inspector.EnqueueTaskByKey(task.Key()); err != nil { + t.Errorf("EnqueueTaskByKey(%q) returned error: %v", + task.Key(), err) + continue loop + } + } + } + gotScheduled := asynqtest.GetScheduledEntries(t, r) + if diff := cmp.Diff(tc.wantScheduled, gotScheduled, asynqtest.SortZSetEntryOpt); diff != "" { + t.Errorf("mismatch found in %q; (-want,+got)\n%s", base.ScheduledQueue, diff) + } + for qname, want := range tc.wantEnqueued { + gotEnqueued := asynqtest.GetEnqueuedMessages(t, r, qname) + if diff := cmp.Diff(want, gotEnqueued, asynqtest.SortMsgOpt); diff != "" { + t.Errorf("mismatch found in %q; (-want, +got)\n%s", base.QueueKey(qname), diff) + } + } + } +} + +func TestInspectorEnqueueTaskByKeyEnqueuesRetryTask(t *testing.T) { + r := setup(t) + m1 := asynqtest.NewTaskMessage("task1", nil) + m2 := asynqtest.NewTaskMessageWithQueue("task2", nil, "critical") + m3 := asynqtest.NewTaskMessageWithQueue("task3", nil, "low") + now := time.Now() + z1 := base.Z{Message: m1, Score: now.Add(5 * time.Minute).Unix()} + z2 := base.Z{Message: m2, Score: now.Add(15 * time.Minute).Unix()} + z3 := base.Z{Message: m3, Score: now.Add(2 * time.Minute).Unix()} + + inspector := NewInspector(RedisClientOpt{ + Addr: redisAddr, + DB: redisDB, + }) + + tests := []struct { + retry []base.Z + enqueued map[string][]*base.TaskMessage + target *base.TaskMessage + wantRetry []base.Z + wantEnqueued map[string][]*base.TaskMessage + }{ + { + retry: []base.Z{z1, z2, z3}, + enqueued: map[string][]*base.TaskMessage{ + "default": {}, + "critical": {}, + "low": {}, + }, + target: m2, + wantRetry: []base.Z{z1, z3}, + wantEnqueued: map[string][]*base.TaskMessage{ + "default": {}, + "critical": {m2}, + "low": {}, + }, + }, + } + +loop: + for _, tc := range tests { + asynqtest.FlushDB(t, r) + asynqtest.SeedRetryQueue(t, r, tc.retry) + asynqtest.SeedAllEnqueuedQueues(t, r, tc.enqueued) + + tasks, err := inspector.ListRetryTasks() + if err != nil { + t.Errorf("ListRetryTasks() returned error: %v", err) + continue + } + for _, task := range tasks { + if task.ID == tc.target.ID.String() { + if err := inspector.EnqueueTaskByKey(task.Key()); err != nil { + t.Errorf("EnqueueTaskByKey(%q) returned error: %v", + task.Key(), err) + continue loop + } + } + } + gotRetry := asynqtest.GetRetryEntries(t, r) + if diff := cmp.Diff(tc.wantRetry, gotRetry, asynqtest.SortZSetEntryOpt); diff != "" { + t.Errorf("mismatch found in %q; (-want,+got)\n%s", base.RetryQueue, diff) + } + for qname, want := range tc.wantEnqueued { + gotEnqueued := asynqtest.GetEnqueuedMessages(t, r, qname) + if diff := cmp.Diff(want, gotEnqueued, asynqtest.SortMsgOpt); diff != "" { + t.Errorf("mismatch found in %q; (-want, +got)\n%s", base.QueueKey(qname), diff) + } + } + } +} + +func TestInspectorEnqueueTaskByKeyEnqueuesDeadTask(t *testing.T) { + r := setup(t) + m1 := asynqtest.NewTaskMessage("task1", nil) + m2 := asynqtest.NewTaskMessageWithQueue("task2", nil, "critical") + m3 := asynqtest.NewTaskMessageWithQueue("task3", nil, "low") + now := time.Now() + z1 := base.Z{Message: m1, Score: now.Add(-5 * time.Minute).Unix()} + z2 := base.Z{Message: m2, Score: now.Add(-15 * time.Minute).Unix()} + z3 := base.Z{Message: m3, Score: now.Add(-2 * time.Minute).Unix()} + + inspector := NewInspector(RedisClientOpt{ + Addr: redisAddr, + DB: redisDB, + }) + + tests := []struct { + dead []base.Z + enqueued map[string][]*base.TaskMessage + target *base.TaskMessage + wantDead []base.Z + wantEnqueued map[string][]*base.TaskMessage + }{ + { + dead: []base.Z{z1, z2, z3}, + enqueued: map[string][]*base.TaskMessage{ + "default": {}, + "critical": {}, + "low": {}, + }, + target: m2, + wantDead: []base.Z{z1, z3}, + wantEnqueued: map[string][]*base.TaskMessage{ + "default": {}, + "critical": {m2}, + "low": {}, + }, + }, + } + +loop: + for _, tc := range tests { + asynqtest.FlushDB(t, r) + asynqtest.SeedDeadQueue(t, r, tc.dead) + asynqtest.SeedAllEnqueuedQueues(t, r, tc.enqueued) + + tasks, err := inspector.ListDeadTasks() + if err != nil { + t.Errorf("ListDeadTasks() returned error: %v", err) + continue + } + for _, task := range tasks { + if task.ID == tc.target.ID.String() { + if err := inspector.EnqueueTaskByKey(task.Key()); err != nil { + t.Errorf("EnqueueTaskByKey(%q) returned error: %v", + task.Key(), err) + continue loop + } + } + } + gotDead := asynqtest.GetDeadEntries(t, r) + if diff := cmp.Diff(tc.wantDead, gotDead, asynqtest.SortZSetEntryOpt); diff != "" { + t.Errorf("mismatch found in %q; (-want,+got)\n%s", base.DeadQueue, diff) + } + for qname, want := range tc.wantEnqueued { + gotEnqueued := asynqtest.GetEnqueuedMessages(t, r, qname) + if diff := cmp.Diff(want, gotEnqueued, asynqtest.SortMsgOpt); diff != "" { + t.Errorf("mismatch found in %q; (-want, +got)\n%s", base.QueueKey(qname), diff) + } + } + } +} + +func TestInspectorKillTaskByKeyKillsScheduledTask(t *testing.T) { + r := setup(t) + m1 := asynqtest.NewTaskMessage("task1", nil) + m2 := asynqtest.NewTaskMessageWithQueue("task2", nil, "critical") + m3 := asynqtest.NewTaskMessageWithQueue("task3", nil, "low") + now := time.Now() + z1 := base.Z{Message: m1, Score: now.Add(5 * time.Minute).Unix()} + z2 := base.Z{Message: m2, Score: now.Add(15 * time.Minute).Unix()} + z3 := base.Z{Message: m3, Score: now.Add(2 * time.Minute).Unix()} + + inspector := NewInspector(RedisClientOpt{ + Addr: redisAddr, + DB: redisDB, + }) + + tests := []struct { + scheduled []base.Z + dead []base.Z + target *base.TaskMessage + wantScheduled []base.Z + wantDead []base.Z + }{ + { + scheduled: []base.Z{z1, z2, z3}, + dead: []base.Z{}, + target: m2, + wantScheduled: []base.Z{z1, z3}, + wantDead: []base.Z{ + {m2, now.Unix()}, + }, + }, + } + +loop: + for _, tc := range tests { + asynqtest.FlushDB(t, r) + asynqtest.SeedScheduledQueue(t, r, tc.scheduled) + asynqtest.SeedDeadQueue(t, r, tc.dead) + + tasks, err := inspector.ListScheduledTasks() + if err != nil { + t.Errorf("ListScheduledTasks() returned error: %v", err) + continue + } + for _, task := range tasks { + if task.ID == tc.target.ID.String() { + if err := inspector.KillTaskByKey(task.Key()); err != nil { + t.Errorf("KillTaskByKey(%q) returned error: %v", + task.Key(), err) + continue loop + } + } + } + gotScheduled := asynqtest.GetScheduledEntries(t, r) + if diff := cmp.Diff(tc.wantScheduled, gotScheduled, asynqtest.SortZSetEntryOpt); diff != "" { + t.Errorf("mismatch found in %q; (-want,+got)\n%s", base.ScheduledQueue, diff) + } + gotDead := asynqtest.GetDeadEntries(t, r) + if diff := cmp.Diff(tc.wantDead, gotDead, asynqtest.SortZSetEntryOpt); diff != "" { + t.Errorf("mismatch found in %q; (-want,+got)\n%s", base.DeadQueue, diff) + } + + } +} + +func TestInspectorKillTaskByKeyKillsRetryTask(t *testing.T) { + r := setup(t) + m1 := asynqtest.NewTaskMessage("task1", nil) + m2 := asynqtest.NewTaskMessageWithQueue("task2", nil, "critical") + m3 := asynqtest.NewTaskMessageWithQueue("task3", nil, "low") + now := time.Now() + z1 := base.Z{Message: m1, Score: now.Add(5 * time.Minute).Unix()} + z2 := base.Z{Message: m2, Score: now.Add(15 * time.Minute).Unix()} + z3 := base.Z{Message: m3, Score: now.Add(2 * time.Minute).Unix()} + + inspector := NewInspector(RedisClientOpt{ + Addr: redisAddr, + DB: redisDB, + }) + + tests := []struct { + retry []base.Z + dead []base.Z + target *base.TaskMessage + wantRetry []base.Z + wantDead []base.Z + }{ + { + retry: []base.Z{z1, z2, z3}, + dead: []base.Z{}, + target: m2, + wantRetry: []base.Z{z1, z3}, + wantDead: []base.Z{ + {m2, now.Unix()}, + }, + }, + } + +loop: + for _, tc := range tests { + asynqtest.FlushDB(t, r) + asynqtest.SeedRetryQueue(t, r, tc.retry) + asynqtest.SeedDeadQueue(t, r, tc.dead) + + tasks, err := inspector.ListRetryTasks() + if err != nil { + t.Errorf("ListRetryTasks() returned error: %v", err) + continue + } + for _, task := range tasks { + if task.ID == tc.target.ID.String() { + if err := inspector.KillTaskByKey(task.Key()); err != nil { + t.Errorf("KillTaskByKey(%q) returned error: %v", + task.Key(), err) + continue loop + } + } + } + gotRetry := asynqtest.GetRetryEntries(t, r) + if diff := cmp.Diff(tc.wantRetry, gotRetry, asynqtest.SortZSetEntryOpt); diff != "" { + t.Errorf("mismatch found in %q; (-want,+got)\n%s", base.RetryQueue, diff) + } + gotDead := asynqtest.GetDeadEntries(t, r) + if diff := cmp.Diff(tc.wantDead, gotDead, asynqtest.SortZSetEntryOpt); diff != "" { + t.Errorf("mismatch found in %q; (-want,+got)\n%s", base.DeadQueue, diff) + } + + } +} diff --git a/internal/asynqtest/asynqtest.go b/internal/asynqtest/asynqtest.go index d811a46..23945ae 100644 --- a/internal/asynqtest/asynqtest.go +++ b/internal/asynqtest/asynqtest.go @@ -17,12 +17,6 @@ import ( "github.com/hibiken/asynq/internal/base" ) -// ZSetEntry is an entry in redis sorted set. -type ZSetEntry struct { - Msg *base.TaskMessage - Score float64 -} - // SortMsgOpt is a cmp.Option to sort base.TaskMessage for comparing slice of task messages. var SortMsgOpt = cmp.Transformer("SortTaskMessages", func(in []*base.TaskMessage) []*base.TaskMessage { out := append([]*base.TaskMessage(nil), in...) // Copy input to avoid mutating it @@ -33,10 +27,10 @@ var SortMsgOpt = cmp.Transformer("SortTaskMessages", func(in []*base.TaskMessage }) // SortZSetEntryOpt is an cmp.Option to sort ZSetEntry for comparing slice of zset entries. -var SortZSetEntryOpt = cmp.Transformer("SortZSetEntries", func(in []ZSetEntry) []ZSetEntry { - out := append([]ZSetEntry(nil), in...) // Copy input to avoid mutating it +var SortZSetEntryOpt = cmp.Transformer("SortZSetEntries", func(in []base.Z) []base.Z { + out := append([]base.Z(nil), in...) // Copy input to avoid mutating it sort.Slice(out, func(i, j int) bool { - return out[i].Msg.ID.String() < out[j].Msg.ID.String() + return out[i].Message.ID.String() < out[j].Message.ID.String() }) return out }) @@ -177,6 +171,15 @@ func SeedEnqueuedQueue(tb testing.TB, r *redis.Client, msgs []*base.TaskMessage, seedRedisList(tb, r, queue, msgs) } +// SeedAllEnqueuedQueues initializes all of the specified queues with the given messages. +// +// enqueued map maps a queue name a list of messages. +func SeedAllEnqueuedQueues(tb testing.TB, r *redis.Client, enqueued map[string][]*base.TaskMessage) { + for q, msgs := range enqueued { + SeedEnqueuedQueue(tb, r, msgs, q) + } +} + // SeedInProgressQueue initializes the in-progress queue with the given messages. func SeedInProgressQueue(tb testing.TB, r *redis.Client, msgs []*base.TaskMessage) { tb.Helper() @@ -184,25 +187,25 @@ func SeedInProgressQueue(tb testing.TB, r *redis.Client, msgs []*base.TaskMessag } // SeedScheduledQueue initializes the scheduled queue with the given messages. -func SeedScheduledQueue(tb testing.TB, r *redis.Client, entries []ZSetEntry) { +func SeedScheduledQueue(tb testing.TB, r *redis.Client, entries []base.Z) { tb.Helper() seedRedisZSet(tb, r, base.ScheduledQueue, entries) } // SeedRetryQueue initializes the retry queue with the given messages. -func SeedRetryQueue(tb testing.TB, r *redis.Client, entries []ZSetEntry) { +func SeedRetryQueue(tb testing.TB, r *redis.Client, entries []base.Z) { tb.Helper() seedRedisZSet(tb, r, base.RetryQueue, entries) } // SeedDeadQueue initializes the dead queue with the given messages. -func SeedDeadQueue(tb testing.TB, r *redis.Client, entries []ZSetEntry) { +func SeedDeadQueue(tb testing.TB, r *redis.Client, entries []base.Z) { tb.Helper() seedRedisZSet(tb, r, base.DeadQueue, entries) } // SeedDeadlines initializes the deadlines set with the given entries. -func SeedDeadlines(tb testing.TB, r *redis.Client, entries []ZSetEntry) { +func SeedDeadlines(tb testing.TB, r *redis.Client, entries []base.Z) { tb.Helper() seedRedisZSet(tb, r, base.KeyDeadlines, entries) } @@ -216,9 +219,9 @@ func seedRedisList(tb testing.TB, c *redis.Client, key string, msgs []*base.Task } } -func seedRedisZSet(tb testing.TB, c *redis.Client, key string, items []ZSetEntry) { +func seedRedisZSet(tb testing.TB, c *redis.Client, key string, items []base.Z) { for _, item := range items { - z := &redis.Z{Member: MustMarshal(tb, item.Msg), Score: float64(item.Score)} + z := &redis.Z{Member: MustMarshal(tb, item.Message), Score: float64(item.Score)} if err := c.ZAdd(key, z).Err(); err != nil { tb.Fatal(err) } @@ -262,25 +265,25 @@ func GetDeadMessages(tb testing.TB, r *redis.Client) []*base.TaskMessage { } // GetScheduledEntries returns all task messages and its score in the scheduled queue. -func GetScheduledEntries(tb testing.TB, r *redis.Client) []ZSetEntry { +func GetScheduledEntries(tb testing.TB, r *redis.Client) []base.Z { tb.Helper() return getZSetEntries(tb, r, base.ScheduledQueue) } // GetRetryEntries returns all task messages and its score in the retry queue. -func GetRetryEntries(tb testing.TB, r *redis.Client) []ZSetEntry { +func GetRetryEntries(tb testing.TB, r *redis.Client) []base.Z { tb.Helper() return getZSetEntries(tb, r, base.RetryQueue) } // GetDeadEntries returns all task messages and its score in the dead queue. -func GetDeadEntries(tb testing.TB, r *redis.Client) []ZSetEntry { +func GetDeadEntries(tb testing.TB, r *redis.Client) []base.Z { tb.Helper() return getZSetEntries(tb, r, base.DeadQueue) } // GetDeadlinesEntries returns all task messages and its score in the deadlines set. -func GetDeadlinesEntries(tb testing.TB, r *redis.Client) []ZSetEntry { +func GetDeadlinesEntries(tb testing.TB, r *redis.Client) []base.Z { tb.Helper() return getZSetEntries(tb, r, base.KeyDeadlines) } @@ -295,13 +298,13 @@ func getZSetMessages(tb testing.TB, r *redis.Client, zset string) []*base.TaskMe return MustUnmarshalSlice(tb, data) } -func getZSetEntries(tb testing.TB, r *redis.Client, zset string) []ZSetEntry { +func getZSetEntries(tb testing.TB, r *redis.Client, zset string) []base.Z { data := r.ZRangeWithScores(zset, 0, -1).Val() - var entries []ZSetEntry + var entries []base.Z for _, z := range data { - entries = append(entries, ZSetEntry{ - Msg: MustUnmarshal(tb, z.Member.(string)), - Score: z.Score, + entries = append(entries, base.Z{ + Message: MustUnmarshal(tb, z.Member.(string)), + Score: int64(z.Score), }) } return entries diff --git a/internal/base/base.go b/internal/base/base.go index 85c28d0..7277051 100644 --- a/internal/base/base.go +++ b/internal/base/base.go @@ -133,6 +133,12 @@ func DecodeMessage(s string) (*TaskMessage, error) { return &msg, nil } +// Z represents sorted set member. +type Z struct { + Message *TaskMessage + Score int64 +} + // ServerStatus represents status of a server. // ServerStatus methods are concurrency safe. type ServerStatus struct { diff --git a/internal/rdb/inspect.go b/internal/rdb/inspect.go index 158d22a..c7e5862 100644 --- a/internal/rdb/inspect.go +++ b/internal/rdb/inspect.go @@ -51,56 +51,6 @@ type DailyStats struct { Time time.Time } -// EnqueuedTask is a task in a queue and is ready to be processed. -type EnqueuedTask struct { - ID uuid.UUID - Type string - Payload map[string]interface{} - Queue string -} - -// InProgressTask is a task that's currently being processed. -type InProgressTask struct { - ID uuid.UUID - Type string - Payload map[string]interface{} -} - -// ScheduledTask is a task that's scheduled to be processed in the future. -type ScheduledTask struct { - ID uuid.UUID - Type string - Payload map[string]interface{} - ProcessAt time.Time - Score int64 - Queue string -} - -// RetryTask is a task that's in retry queue because worker failed to process the task. -type RetryTask struct { - ID uuid.UUID - Type string - Payload map[string]interface{} - // TODO(hibiken): add LastFailedAt time.Time - ProcessAt time.Time - ErrorMsg string - Retried int - Retry int - Score int64 - Queue string -} - -// DeadTask is a task in that has exhausted all retries. -type DeadTask struct { - ID uuid.UUID - Type string - Payload map[string]interface{} - LastFailedAt time.Time - ErrorMsg string - Score int64 - Queue string -} - // KEYS[1] -> asynq:queues // KEYS[2] -> asynq:in_progress // KEYS[3] -> asynq:scheduled @@ -289,158 +239,79 @@ func (p Pagination) stop() int64 { } // ListEnqueued returns enqueued tasks that are ready to be processed. -func (r *RDB) ListEnqueued(qname string, pgn Pagination) ([]*EnqueuedTask, error) { +func (r *RDB) ListEnqueued(qname string, pgn Pagination) ([]*base.TaskMessage, error) { qkey := base.QueueKey(qname) if !r.client.SIsMember(base.AllQueues, qkey).Val() { return nil, fmt.Errorf("queue %q does not exist", qname) } - // Note: Because we use LPUSH to redis list, we need to calculate the - // correct range and reverse the list to get the tasks with pagination. - stop := -pgn.start() - 1 - start := -pgn.stop() - 1 - data, err := r.client.LRange(qkey, start, stop).Result() - if err != nil { - return nil, err - } - reverse(data) - var tasks []*EnqueuedTask - for _, s := range data { - var msg base.TaskMessage - err := json.Unmarshal([]byte(s), &msg) - if err != nil { - continue // bad data, ignore and continue - } - tasks = append(tasks, &EnqueuedTask{ - ID: msg.ID, - Type: msg.Type, - Payload: msg.Payload, - Queue: msg.Queue, - }) - } - return tasks, nil + return r.listMessages(qkey, pgn) } // ListInProgress returns all tasks that are currently being processed. -func (r *RDB) ListInProgress(pgn Pagination) ([]*InProgressTask, error) { +func (r *RDB) ListInProgress(pgn Pagination) ([]*base.TaskMessage, error) { + return r.listMessages(base.InProgressQueue, pgn) +} + +// listMessages returns a list of TaskMessage in Redis list with the given key. +func (r *RDB) listMessages(key string, pgn Pagination) ([]*base.TaskMessage, error) { // Note: Because we use LPUSH to redis list, we need to calculate the // correct range and reverse the list to get the tasks with pagination. stop := -pgn.start() - 1 start := -pgn.stop() - 1 - data, err := r.client.LRange(base.InProgressQueue, start, stop).Result() + data, err := r.client.LRange(key, start, stop).Result() if err != nil { return nil, err } reverse(data) - var tasks []*InProgressTask + var msgs []*base.TaskMessage for _, s := range data { - var msg base.TaskMessage - err := json.Unmarshal([]byte(s), &msg) + m, err := base.DecodeMessage(s) if err != nil { continue // bad data, ignore and continue } - tasks = append(tasks, &InProgressTask{ - ID: msg.ID, - Type: msg.Type, - Payload: msg.Payload, - }) + msgs = append(msgs, m) } - return tasks, nil + return msgs, nil + } // ListScheduled returns all tasks that are scheduled to be processed // in the future. -func (r *RDB) ListScheduled(pgn Pagination) ([]*ScheduledTask, error) { - data, err := r.client.ZRangeWithScores(base.ScheduledQueue, pgn.start(), pgn.stop()).Result() - if err != nil { - return nil, err - } - var tasks []*ScheduledTask - for _, z := range data { - s, ok := z.Member.(string) - if !ok { - continue // bad data, ignore and continue - } - var msg base.TaskMessage - err := json.Unmarshal([]byte(s), &msg) - if err != nil { - continue // bad data, ignore and continue - } - processAt := time.Unix(int64(z.Score), 0) - tasks = append(tasks, &ScheduledTask{ - ID: msg.ID, - Type: msg.Type, - Payload: msg.Payload, - Queue: msg.Queue, - ProcessAt: processAt, - Score: int64(z.Score), - }) - } - return tasks, nil +func (r *RDB) ListScheduled(pgn Pagination) ([]base.Z, error) { + return r.listZSetEntries(base.ScheduledQueue, pgn) } // ListRetry returns all tasks that have failed before and willl be retried // in the future. -func (r *RDB) ListRetry(pgn Pagination) ([]*RetryTask, error) { - data, err := r.client.ZRangeWithScores(base.RetryQueue, pgn.start(), pgn.stop()).Result() - if err != nil { - return nil, err - } - var tasks []*RetryTask - for _, z := range data { - s, ok := z.Member.(string) - if !ok { - continue // bad data, ignore and continue - } - var msg base.TaskMessage - err := json.Unmarshal([]byte(s), &msg) - if err != nil { - continue // bad data, ignore and continue - } - processAt := time.Unix(int64(z.Score), 0) - tasks = append(tasks, &RetryTask{ - ID: msg.ID, - Type: msg.Type, - Payload: msg.Payload, - ErrorMsg: msg.ErrorMsg, - Retry: msg.Retry, - Retried: msg.Retried, - Queue: msg.Queue, - ProcessAt: processAt, - Score: int64(z.Score), - }) - } - return tasks, nil +func (r *RDB) ListRetry(pgn Pagination) ([]base.Z, error) { + return r.listZSetEntries(base.RetryQueue, pgn) } // ListDead returns all tasks that have exhausted its retry limit. -func (r *RDB) ListDead(pgn Pagination) ([]*DeadTask, error) { - data, err := r.client.ZRangeWithScores(base.DeadQueue, pgn.start(), pgn.stop()).Result() +func (r *RDB) ListDead(pgn Pagination) ([]base.Z, error) { + return r.listZSetEntries(base.DeadQueue, pgn) +} + +// listZSetEntries returns a list of message and score pairs in Redis sorted-set +// with the given key. +func (r *RDB) listZSetEntries(key string, pgn Pagination) ([]base.Z, error) { + data, err := r.client.ZRangeWithScores(key, pgn.start(), pgn.stop()).Result() if err != nil { return nil, err } - var tasks []*DeadTask + var res []base.Z for _, z := range data { s, ok := z.Member.(string) if !ok { continue // bad data, ignore and continue } - var msg base.TaskMessage - err := json.Unmarshal([]byte(s), &msg) + msg, err := base.DecodeMessage(s) if err != nil { continue // bad data, ignore and continue } - lastFailedAt := time.Unix(int64(z.Score), 0) - tasks = append(tasks, &DeadTask{ - ID: msg.ID, - Type: msg.Type, - Payload: msg.Payload, - ErrorMsg: msg.ErrorMsg, - Queue: msg.Queue, - LastFailedAt: lastFailedAt, - Score: int64(z.Score), - }) + res = append(res, base.Z{msg, int64(z.Score)}) } - return tasks, nil + return res, nil } // EnqueueDeadTask finds a task that matches the given id and score from dead queue @@ -704,19 +575,40 @@ func (r *RDB) deleteTask(zset, id string, score float64) error { return nil } -// DeleteAllDeadTasks deletes all tasks from the dead queue. -func (r *RDB) DeleteAllDeadTasks() error { - return r.client.Del(base.DeadQueue).Err() +// KEYS[1] -> queue to delete +var deleteAllCmd = redis.NewScript(` +local n = redis.call("ZCARD", KEYS[1]) +redis.call("DEL", KEYS[1]) +return n`) + +// DeleteAllDeadTasks deletes all tasks from the dead queue +// and returns the number of tasks deleted. +func (r *RDB) DeleteAllDeadTasks() (int64, error) { + return r.deleteAll(base.DeadQueue) } -// DeleteAllRetryTasks deletes all tasks from the dead queue. -func (r *RDB) DeleteAllRetryTasks() error { - return r.client.Del(base.RetryQueue).Err() +// DeleteAllRetryTasks deletes all tasks from the dead queue +// and returns the number of tasks deleted. +func (r *RDB) DeleteAllRetryTasks() (int64, error) { + return r.deleteAll(base.RetryQueue) } -// DeleteAllScheduledTasks deletes all tasks from the dead queue. -func (r *RDB) DeleteAllScheduledTasks() error { - return r.client.Del(base.ScheduledQueue).Err() +// DeleteAllScheduledTasks deletes all tasks from the dead queue +// and returns the number of tasks deleted. +func (r *RDB) DeleteAllScheduledTasks() (int64, error) { + return r.deleteAll(base.ScheduledQueue) +} + +func (r *RDB) deleteAll(key string) (int64, error) { + res, err := deleteAllCmd.Run(r.client, []string{key}).Result() + if err != nil { + return 0, err + } + n, ok := res.(int64) + if !ok { + return 0, fmt.Errorf("could not cast %v to int64", res) + } + return n, nil } // ErrQueueNotFound indicates specified queue does not exist. diff --git a/internal/rdb/inspect_test.go b/internal/rdb/inspect_test.go index 1b451b1..dbb50e3 100644 --- a/internal/rdb/inspect_test.go +++ b/internal/rdb/inspect_test.go @@ -6,7 +6,6 @@ package rdb import ( "fmt" - "sort" "testing" "time" @@ -32,9 +31,9 @@ func TestCurrentStats(t *testing.T) { tests := []struct { enqueued map[string][]*base.TaskMessage inProgress []*base.TaskMessage - scheduled []h.ZSetEntry - retry []h.ZSetEntry - dead []h.ZSetEntry + scheduled []base.Z + retry []base.Z + dead []base.Z processed int failed int allQueues []interface{} @@ -48,11 +47,11 @@ func TestCurrentStats(t *testing.T) { "low": {m6}, }, inProgress: []*base.TaskMessage{m2}, - scheduled: []h.ZSetEntry{ - {Msg: m3, Score: float64(now.Add(time.Hour).Unix())}, - {Msg: m4, Score: float64(now.Unix())}}, - retry: []h.ZSetEntry{}, - dead: []h.ZSetEntry{}, + scheduled: []base.Z{ + {Message: m3, Score: now.Add(time.Hour).Unix()}, + {Message: m4, Score: now.Unix()}}, + retry: []base.Z{}, + dead: []base.Z{}, processed: 120, failed: 2, allQueues: []interface{}{base.DefaultQueue, base.QueueKey("critical"), base.QueueKey("low")}, @@ -79,13 +78,13 @@ func TestCurrentStats(t *testing.T) { base.DefaultQueueName: {}, }, inProgress: []*base.TaskMessage{}, - scheduled: []h.ZSetEntry{ - {Msg: m3, Score: float64(now.Unix())}, - {Msg: m4, Score: float64(now.Unix())}}, - retry: []h.ZSetEntry{ - {Msg: m1, Score: float64(now.Add(time.Minute).Unix())}}, - dead: []h.ZSetEntry{ - {Msg: m2, Score: float64(now.Add(-time.Hour).Unix())}}, + scheduled: []base.Z{ + {Message: m3, Score: now.Unix()}, + {Message: m4, Score: now.Unix()}}, + retry: []base.Z{ + {Message: m1, Score: now.Add(time.Minute).Unix()}}, + dead: []base.Z{ + {Message: m2, Score: now.Add(-time.Hour).Unix()}}, processed: 90, failed: 10, allQueues: []interface{}{base.DefaultQueue}, @@ -111,11 +110,11 @@ func TestCurrentStats(t *testing.T) { "low": {m6}, }, inProgress: []*base.TaskMessage{m2}, - scheduled: []h.ZSetEntry{ - {Msg: m3, Score: float64(now.Add(time.Hour).Unix())}, - {Msg: m4, Score: float64(now.Unix())}}, - retry: []h.ZSetEntry{}, - dead: []h.ZSetEntry{}, + scheduled: []base.Z{ + {Message: m3, Score: now.Add(time.Hour).Unix()}, + {Message: m4, Score: now.Unix()}}, + retry: []base.Z{}, + dead: []base.Z{}, processed: 120, failed: 2, allQueues: []interface{}{base.DefaultQueue, base.QueueKey("critical"), base.QueueKey("low")}, @@ -276,27 +275,25 @@ func TestListEnqueued(t *testing.T) { m2 := h.NewTaskMessage("reindex", nil) m3 := h.NewTaskMessageWithQueue("important_notification", nil, "critical") m4 := h.NewTaskMessageWithQueue("minor_notification", nil, "low") - t1 := &EnqueuedTask{ID: m1.ID, Type: m1.Type, Payload: m1.Payload, Queue: m1.Queue} - t2 := &EnqueuedTask{ID: m2.ID, Type: m2.Type, Payload: m2.Payload, Queue: m2.Queue} - t3 := &EnqueuedTask{ID: m3.ID, Type: m3.Type, Payload: m3.Payload, Queue: m3.Queue} + tests := []struct { enqueued map[string][]*base.TaskMessage qname string - want []*EnqueuedTask + want []*base.TaskMessage }{ { enqueued: map[string][]*base.TaskMessage{ base.DefaultQueueName: {m1, m2}, }, qname: base.DefaultQueueName, - want: []*EnqueuedTask{t1, t2}, + want: []*base.TaskMessage{m1, m2}, }, { enqueued: map[string][]*base.TaskMessage{ - base.DefaultQueueName: {}, + base.DefaultQueueName: nil, }, qname: base.DefaultQueueName, - want: []*EnqueuedTask{}, + want: []*base.TaskMessage(nil), }, { enqueued: map[string][]*base.TaskMessage{ @@ -305,7 +302,7 @@ func TestListEnqueued(t *testing.T) { "low": {m4}, }, qname: base.DefaultQueueName, - want: []*EnqueuedTask{t1, t2}, + want: []*base.TaskMessage{m1, m2}, }, { enqueued: map[string][]*base.TaskMessage{ @@ -314,7 +311,7 @@ func TestListEnqueued(t *testing.T) { "low": {m4}, }, qname: "critical", - want: []*EnqueuedTask{t3}, + want: []*base.TaskMessage{m3}, }, } @@ -330,19 +327,13 @@ func TestListEnqueued(t *testing.T) { t.Errorf("%s = %v, %v, want %v, nil", op, got, err, tc.want) continue } - sortOpt := cmp.Transformer("SortMsg", func(in []*EnqueuedTask) []*EnqueuedTask { - out := append([]*EnqueuedTask(nil), in...) // Copy input to avoid mutating it - sort.Slice(out, func(i, j int) bool { - return out[i].ID.String() < out[j].ID.String() - }) - return out - }) - if diff := cmp.Diff(tc.want, got, sortOpt); diff != "" { + if diff := cmp.Diff(tc.want, got); diff != "" { t.Errorf("%s = %v, %v, want %v, nil; (-want, +got)\n%s", op, got, err, tc.want, diff) continue } } } + func TestListEnqueuedPagination(t *testing.T) { r := setup(t) var msgs []*base.TaskMessage @@ -412,22 +403,14 @@ func TestListEnqueuedPagination(t *testing.T) { func TestListInProgress(t *testing.T) { r := setup(t) - m1 := h.NewTaskMessage("send_email", map[string]interface{}{"subject": "hello"}) - m2 := h.NewTaskMessage("reindex", nil) - t1 := &InProgressTask{ID: m1.ID, Type: m1.Type, Payload: m1.Payload} - t2 := &InProgressTask{ID: m2.ID, Type: m2.Type, Payload: m2.Payload} + m1 := h.NewTaskMessage("task1", nil) + m2 := h.NewTaskMessage("task2", nil) + tests := []struct { inProgress []*base.TaskMessage - want []*InProgressTask }{ - { - inProgress: []*base.TaskMessage{m1, m2}, - want: []*InProgressTask{t1, t2}, - }, - { - inProgress: []*base.TaskMessage{}, - want: []*InProgressTask{}, - }, + {inProgress: []*base.TaskMessage{m1, m2}}, + {inProgress: []*base.TaskMessage(nil)}, } for _, tc := range tests { @@ -437,18 +420,11 @@ func TestListInProgress(t *testing.T) { got, err := r.ListInProgress(Pagination{Size: 20, Page: 0}) op := "r.ListInProgress(Pagination{Size: 20, Page: 0})" if err != nil { - t.Errorf("%s = %v, %v, want %v, nil", op, got, err, tc.want) + t.Errorf("%s = %v, %v, want %v, nil", op, got, err, tc.inProgress) continue } - sortOpt := cmp.Transformer("SortMsg", func(in []*InProgressTask) []*InProgressTask { - out := append([]*InProgressTask(nil), in...) // Copy input to avoid mutating it - sort.Slice(out, func(i, j int) bool { - return out[i].ID.String() < out[j].ID.String() - }) - return out - }) - if diff := cmp.Diff(tc.want, got, sortOpt); diff != "" { - t.Errorf("%s = %v, %v, want %v, nil; (-want, +got)\n%s", op, got, err, tc.want, diff) + if diff := cmp.Diff(tc.inProgress, got); diff != "" { + t.Errorf("%s = %v, %v, want %v, nil; (-want, +got)\n%s", op, got, err, tc.inProgress, diff) continue } } @@ -511,27 +487,33 @@ func TestListInProgressPagination(t *testing.T) { func TestListScheduled(t *testing.T) { r := setup(t) - m1 := h.NewTaskMessage("send_email", map[string]interface{}{"subject": "hello"}) - m2 := h.NewTaskMessage("reindex", nil) + m1 := h.NewTaskMessage("task1", nil) + m2 := h.NewTaskMessage("task2", nil) + m3 := h.NewTaskMessage("task3", nil) p1 := time.Now().Add(30 * time.Minute) p2 := time.Now().Add(24 * time.Hour) - t1 := &ScheduledTask{ID: m1.ID, Type: m1.Type, Payload: m1.Payload, ProcessAt: p1, Score: p1.Unix(), Queue: m1.Queue} - t2 := &ScheduledTask{ID: m2.ID, Type: m2.Type, Payload: m2.Payload, ProcessAt: p2, Score: p2.Unix(), Queue: m2.Queue} + p3 := time.Now().Add(5 * time.Minute) tests := []struct { - scheduled []h.ZSetEntry - want []*ScheduledTask + scheduled []base.Z + want []base.Z }{ { - scheduled: []h.ZSetEntry{ - {Msg: m1, Score: float64(p1.Unix())}, - {Msg: m2, Score: float64(p2.Unix())}, + scheduled: []base.Z{ + {Message: m1, Score: p1.Unix()}, + {Message: m2, Score: p2.Unix()}, + {Message: m3, Score: p3.Unix()}, + }, + // should be sorted by score in ascending order + want: []base.Z{ + {Message: m3, Score: p3.Unix()}, + {Message: m1, Score: p1.Unix()}, + {Message: m2, Score: p2.Unix()}, }, - want: []*ScheduledTask{t1, t2}, }, { - scheduled: []h.ZSetEntry{}, - want: []*ScheduledTask{}, + scheduled: []base.Z(nil), + want: []base.Z(nil), }, } @@ -545,14 +527,7 @@ func TestListScheduled(t *testing.T) { t.Errorf("%s = %v, %v, want %v, nil", op, got, err, tc.want) continue } - sortOpt := cmp.Transformer("SortMsg", func(in []*ScheduledTask) []*ScheduledTask { - out := append([]*ScheduledTask(nil), in...) // Copy input to avoid mutating it - sort.Slice(out, func(i, j int) bool { - return out[i].ID.String() < out[j].ID.String() - }) - return out - }) - if diff := cmp.Diff(tc.want, got, sortOpt, timeCmpOpt); diff != "" { + if diff := cmp.Diff(tc.want, got, timeCmpOpt); diff != "" { t.Errorf("%s = %v, %v, want %v, nil; (-want, +got)\n%s", op, got, err, tc.want, diff) continue } @@ -601,13 +576,13 @@ func TestListScheduledPagination(t *testing.T) { continue } - first := got[0] + first := got[0].Message if first.Type != tc.wantFirst { t.Errorf("%s; %s returned a list with first message %q, want %q", tc.desc, op, first.Type, tc.wantFirst) } - last := got[len(got)-1] + last := got[len(got)-1].Message if last.Type != tc.wantLast { t.Errorf("%s; %s returned a list with the last message %q, want %q", tc.desc, op, last.Type, tc.wantLast) @@ -637,43 +612,24 @@ func TestListRetry(t *testing.T) { } p1 := time.Now().Add(5 * time.Minute) p2 := time.Now().Add(24 * time.Hour) - t1 := &RetryTask{ - ID: m1.ID, - Type: m1.Type, - Payload: m1.Payload, - ProcessAt: p1, - ErrorMsg: m1.ErrorMsg, - Retried: m1.Retried, - Retry: m1.Retry, - Score: p1.Unix(), - Queue: m1.Queue, - } - t2 := &RetryTask{ - ID: m2.ID, - Type: m2.Type, - Payload: m2.Payload, - ProcessAt: p2, - ErrorMsg: m2.ErrorMsg, - Retried: m2.Retried, - Retry: m2.Retry, - Score: p2.Unix(), - Queue: m1.Queue, - } tests := []struct { - retry []h.ZSetEntry - want []*RetryTask + retry []base.Z + want []base.Z }{ { - retry: []h.ZSetEntry{ - {Msg: m1, Score: float64(p1.Unix())}, - {Msg: m2, Score: float64(p2.Unix())}, + retry: []base.Z{ + {Message: m1, Score: p1.Unix()}, + {Message: m2, Score: p2.Unix()}, + }, + want: []base.Z{ + {Message: m1, Score: p1.Unix()}, + {Message: m2, Score: p2.Unix()}, }, - want: []*RetryTask{t1, t2}, }, { - retry: []h.ZSetEntry{}, - want: []*RetryTask{}, + retry: []base.Z(nil), + want: []base.Z(nil), }, } @@ -687,16 +643,9 @@ func TestListRetry(t *testing.T) { t.Errorf("%s = %v, %v, want %v, nil", op, got, err, tc.want) continue } - sortOpt := cmp.Transformer("SortMsg", func(in []*RetryTask) []*RetryTask { - out := append([]*RetryTask(nil), in...) // Copy input to avoid mutating it - sort.Slice(out, func(i, j int) bool { - return out[i].ID.String() < out[j].ID.String() - }) - return out - }) - - if diff := cmp.Diff(tc.want, got, sortOpt, timeCmpOpt); diff != "" { - t.Errorf("%s = %v, %v, want %v, nil; (-want, +got)\n%s", op, got, err, tc.want, diff) + if diff := cmp.Diff(tc.want, got, timeCmpOpt); diff != "" { + t.Errorf("%s = %v, %v, want %v, nil; (-want, +got)\n%s", + op, got, err, tc.want, diff) continue } } @@ -706,11 +655,11 @@ func TestListRetryPagination(t *testing.T) { r := setup(t) // create 100 tasks with an increasing number of wait time. now := time.Now() - var seed []h.ZSetEntry + var seed []base.Z for i := 0; i < 100; i++ { msg := h.NewTaskMessage(fmt.Sprintf("task %d", i), nil) processAt := now.Add(time.Duration(i) * time.Second) - seed = append(seed, h.ZSetEntry{Msg: msg, Score: float64(processAt.Unix())}) + seed = append(seed, base.Z{Message: msg, Score: processAt.Unix()}) } h.SeedRetryQueue(t, r.client, seed) @@ -731,14 +680,16 @@ func TestListRetryPagination(t *testing.T) { for _, tc := range tests { got, err := r.ListRetry(Pagination{Size: tc.size, Page: tc.page}) - op := fmt.Sprintf("r.ListRetry(Pagination{Size: %d, Page: %d})", tc.size, tc.page) + op := fmt.Sprintf("r.ListRetry(Pagination{Size: %d, Page: %d})", + tc.size, tc.page) if err != nil { t.Errorf("%s; %s returned error %v", tc.desc, op, err) continue } if len(got) != tc.wantSize { - t.Errorf("%s; %s returned list of size %d, want %d", tc.desc, op, len(got), tc.wantSize) + t.Errorf("%s; %s returned list of size %d, want %d", + tc.desc, op, len(got), tc.wantSize) continue } @@ -746,13 +697,13 @@ func TestListRetryPagination(t *testing.T) { continue } - first := got[0] + first := got[0].Message if first.Type != tc.wantFirst { t.Errorf("%s; %s returned a list with first message %q, want %q", tc.desc, op, first.Type, tc.wantFirst) } - last := got[len(got)-1] + last := got[len(got)-1].Message if last.Type != tc.wantLast { t.Errorf("%s; %s returned a list with the last message %q, want %q", tc.desc, op, last.Type, tc.wantLast) @@ -778,39 +729,24 @@ func TestListDead(t *testing.T) { } f1 := time.Now().Add(-5 * time.Minute) f2 := time.Now().Add(-24 * time.Hour) - t1 := &DeadTask{ - ID: m1.ID, - Type: m1.Type, - Payload: m1.Payload, - LastFailedAt: f1, - ErrorMsg: m1.ErrorMsg, - Score: f1.Unix(), - Queue: m1.Queue, - } - t2 := &DeadTask{ - ID: m2.ID, - Type: m2.Type, - Payload: m2.Payload, - LastFailedAt: f2, - ErrorMsg: m2.ErrorMsg, - Score: f2.Unix(), - Queue: m2.Queue, - } tests := []struct { - dead []h.ZSetEntry - want []*DeadTask + dead []base.Z + want []base.Z }{ { - dead: []h.ZSetEntry{ - {Msg: m1, Score: float64(f1.Unix())}, - {Msg: m2, Score: float64(f2.Unix())}, + dead: []base.Z{ + {Message: m1, Score: f1.Unix()}, + {Message: m2, Score: f2.Unix()}, + }, + want: []base.Z{ + {Message: m2, Score: f2.Unix()}, // FIXME: shouldn't be sorted in the other order? + {Message: m1, Score: f1.Unix()}, }, - want: []*DeadTask{t1, t2}, }, { - dead: []h.ZSetEntry{}, - want: []*DeadTask{}, + dead: []base.Z(nil), + want: []base.Z(nil), }, } @@ -824,15 +760,9 @@ func TestListDead(t *testing.T) { t.Errorf("%s = %v, %v, want %v, nil", op, got, err, tc.want) continue } - sortOpt := cmp.Transformer("SortMsg", func(in []*DeadTask) []*DeadTask { - out := append([]*DeadTask(nil), in...) // Copy input to avoid mutating it - sort.Slice(out, func(i, j int) bool { - return out[i].ID.String() < out[j].ID.String() - }) - return out - }) - if diff := cmp.Diff(tc.want, got, sortOpt, timeCmpOpt); diff != "" { - t.Errorf("%s = %v, %v, want %v, nil; (-want, +got)\n%s", op, got, err, tc.want, diff) + if diff := cmp.Diff(tc.want, got, timeCmpOpt); diff != "" { + t.Errorf("%s = %v, %v, want %v, nil; (-want, +got)\n%s", + op, got, err, tc.want, diff) continue } } @@ -840,10 +770,10 @@ func TestListDead(t *testing.T) { func TestListDeadPagination(t *testing.T) { r := setup(t) - var entries []h.ZSetEntry + var entries []base.Z for i := 0; i < 100; i++ { msg := h.NewTaskMessage(fmt.Sprintf("task %d", i), nil) - entries = append(entries, h.ZSetEntry{Msg: msg, Score: float64(i)}) + entries = append(entries, base.Z{Message: msg, Score: int64(i)}) } h.SeedDeadQueue(t, r.client, entries) @@ -864,14 +794,16 @@ func TestListDeadPagination(t *testing.T) { for _, tc := range tests { got, err := r.ListDead(Pagination{Size: tc.size, Page: tc.page}) - op := fmt.Sprintf("r.ListDead(Pagination{Size: %d, Page: %d})", tc.size, tc.page) + op := fmt.Sprintf("r.ListDead(Pagination{Size: %d, Page: %d})", + tc.size, tc.page) if err != nil { t.Errorf("%s; %s returned error %v", tc.desc, op, err) continue } if len(got) != tc.wantSize { - t.Errorf("%s; %s returned list of size %d, want %d", tc.desc, op, len(got), tc.wantSize) + t.Errorf("%s; %s returned list of size %d, want %d", + tc.desc, op, len(got), tc.wantSize) continue } @@ -879,13 +811,13 @@ func TestListDeadPagination(t *testing.T) { continue } - first := got[0] + first := got[0].Message if first.Type != tc.wantFirst { t.Errorf("%s; %s returned a list with first message %q, want %q", tc.desc, op, first.Type, tc.wantFirst) } - last := got[len(got)-1] + last := got[len(got)-1].Message if last.Type != tc.wantLast { t.Errorf("%s; %s returned a list with the last message %q, want %q", tc.desc, op, last.Type, tc.wantLast) @@ -905,7 +837,7 @@ func TestEnqueueDeadTask(t *testing.T) { s2 := time.Now().Add(-time.Hour).Unix() tests := []struct { - dead []h.ZSetEntry + dead []base.Z score int64 id uuid.UUID want error // expected return value from calling EnqueueDeadTask @@ -913,9 +845,9 @@ func TestEnqueueDeadTask(t *testing.T) { wantEnqueued map[string][]*base.TaskMessage }{ { - dead: []h.ZSetEntry{ - {Msg: t1, Score: float64(s1)}, - {Msg: t2, Score: float64(s2)}, + dead: []base.Z{ + {Message: t1, Score: s1}, + {Message: t2, Score: s2}, }, score: s2, id: t2.ID, @@ -926,9 +858,9 @@ func TestEnqueueDeadTask(t *testing.T) { }, }, { - dead: []h.ZSetEntry{ - {Msg: t1, Score: float64(s1)}, - {Msg: t2, Score: float64(s2)}, + dead: []base.Z{ + {Message: t1, Score: s1}, + {Message: t2, Score: s2}, }, score: 123, id: t2.ID, @@ -939,10 +871,10 @@ func TestEnqueueDeadTask(t *testing.T) { }, }, { - dead: []h.ZSetEntry{ - {Msg: t1, Score: float64(s1)}, - {Msg: t2, Score: float64(s2)}, - {Msg: t3, Score: float64(s1)}, + dead: []base.Z{ + {Message: t1, Score: s1}, + {Message: t2, Score: s2}, + {Message: t3, Score: s1}, }, score: s1, id: t3.ID, @@ -989,7 +921,7 @@ func TestEnqueueRetryTask(t *testing.T) { s1 := time.Now().Add(-5 * time.Minute).Unix() s2 := time.Now().Add(-time.Hour).Unix() tests := []struct { - retry []h.ZSetEntry + retry []base.Z score int64 id uuid.UUID want error // expected return value from calling EnqueueRetryTask @@ -997,9 +929,9 @@ func TestEnqueueRetryTask(t *testing.T) { wantEnqueued map[string][]*base.TaskMessage }{ { - retry: []h.ZSetEntry{ - {Msg: t1, Score: float64(s1)}, - {Msg: t2, Score: float64(s2)}, + retry: []base.Z{ + {Message: t1, Score: s1}, + {Message: t2, Score: s2}, }, score: s2, id: t2.ID, @@ -1010,9 +942,9 @@ func TestEnqueueRetryTask(t *testing.T) { }, }, { - retry: []h.ZSetEntry{ - {Msg: t1, Score: float64(s1)}, - {Msg: t2, Score: float64(s2)}, + retry: []base.Z{ + {Message: t1, Score: s1}, + {Message: t2, Score: s2}, }, score: 123, id: t2.ID, @@ -1023,10 +955,10 @@ func TestEnqueueRetryTask(t *testing.T) { }, }, { - retry: []h.ZSetEntry{ - {Msg: t1, Score: float64(s1)}, - {Msg: t2, Score: float64(s2)}, - {Msg: t3, Score: float64(s2)}, + retry: []base.Z{ + {Message: t1, Score: s1}, + {Message: t2, Score: s2}, + {Message: t3, Score: s2}, }, score: s2, id: t3.ID, @@ -1073,7 +1005,7 @@ func TestEnqueueScheduledTask(t *testing.T) { s2 := time.Now().Add(-time.Hour).Unix() tests := []struct { - scheduled []h.ZSetEntry + scheduled []base.Z score int64 id uuid.UUID want error // expected return value from calling EnqueueScheduledTask @@ -1081,9 +1013,9 @@ func TestEnqueueScheduledTask(t *testing.T) { wantEnqueued map[string][]*base.TaskMessage }{ { - scheduled: []h.ZSetEntry{ - {Msg: t1, Score: float64(s1)}, - {Msg: t2, Score: float64(s2)}, + scheduled: []base.Z{ + {Message: t1, Score: s1}, + {Message: t2, Score: s2}, }, score: s2, id: t2.ID, @@ -1094,9 +1026,9 @@ func TestEnqueueScheduledTask(t *testing.T) { }, }, { - scheduled: []h.ZSetEntry{ - {Msg: t1, Score: float64(s1)}, - {Msg: t2, Score: float64(s2)}, + scheduled: []base.Z{ + {Message: t1, Score: s1}, + {Message: t2, Score: s2}, }, score: 123, id: t2.ID, @@ -1107,10 +1039,10 @@ func TestEnqueueScheduledTask(t *testing.T) { }, }, { - scheduled: []h.ZSetEntry{ - {Msg: t1, Score: float64(s1)}, - {Msg: t2, Score: float64(s2)}, - {Msg: t3, Score: float64(s1)}, + scheduled: []base.Z{ + {Message: t1, Score: s1}, + {Message: t2, Score: s2}, + {Message: t3, Score: s1}, }, score: s1, id: t3.ID, @@ -1159,16 +1091,16 @@ func TestEnqueueAllScheduledTasks(t *testing.T) { tests := []struct { desc string - scheduled []h.ZSetEntry + scheduled []base.Z want int64 wantEnqueued map[string][]*base.TaskMessage }{ { desc: "with tasks in scheduled queue", - scheduled: []h.ZSetEntry{ - {Msg: t1, Score: float64(time.Now().Add(time.Hour).Unix())}, - {Msg: t2, Score: float64(time.Now().Add(time.Hour).Unix())}, - {Msg: t3, Score: float64(time.Now().Add(time.Hour).Unix())}, + scheduled: []base.Z{ + {Message: t1, Score: time.Now().Add(time.Hour).Unix()}, + {Message: t2, Score: time.Now().Add(time.Hour).Unix()}, + {Message: t3, Score: time.Now().Add(time.Hour).Unix()}, }, want: 3, wantEnqueued: map[string][]*base.TaskMessage{ @@ -1177,7 +1109,7 @@ func TestEnqueueAllScheduledTasks(t *testing.T) { }, { desc: "with empty scheduled queue", - scheduled: []h.ZSetEntry{}, + scheduled: []base.Z{}, want: 0, wantEnqueued: map[string][]*base.TaskMessage{ base.DefaultQueueName: {}, @@ -1185,12 +1117,12 @@ func TestEnqueueAllScheduledTasks(t *testing.T) { }, { desc: "with custom queues", - scheduled: []h.ZSetEntry{ - {Msg: t1, Score: float64(time.Now().Add(time.Hour).Unix())}, - {Msg: t2, Score: float64(time.Now().Add(time.Hour).Unix())}, - {Msg: t3, Score: float64(time.Now().Add(time.Hour).Unix())}, - {Msg: t4, Score: float64(time.Now().Add(time.Hour).Unix())}, - {Msg: t5, Score: float64(time.Now().Add(time.Hour).Unix())}, + scheduled: []base.Z{ + {Message: t1, Score: time.Now().Add(time.Hour).Unix()}, + {Message: t2, Score: time.Now().Add(time.Hour).Unix()}, + {Message: t3, Score: time.Now().Add(time.Hour).Unix()}, + {Message: t4, Score: time.Now().Add(time.Hour).Unix()}, + {Message: t5, Score: time.Now().Add(time.Hour).Unix()}, }, want: 5, wantEnqueued: map[string][]*base.TaskMessage{ @@ -1238,16 +1170,16 @@ func TestEnqueueAllRetryTasks(t *testing.T) { tests := []struct { desc string - retry []h.ZSetEntry + retry []base.Z want int64 wantEnqueued map[string][]*base.TaskMessage }{ { desc: "with tasks in retry queue", - retry: []h.ZSetEntry{ - {Msg: t1, Score: float64(time.Now().Add(time.Hour).Unix())}, - {Msg: t2, Score: float64(time.Now().Add(time.Hour).Unix())}, - {Msg: t3, Score: float64(time.Now().Add(time.Hour).Unix())}, + retry: []base.Z{ + {Message: t1, Score: time.Now().Add(time.Hour).Unix()}, + {Message: t2, Score: time.Now().Add(time.Hour).Unix()}, + {Message: t3, Score: time.Now().Add(time.Hour).Unix()}, }, want: 3, wantEnqueued: map[string][]*base.TaskMessage{ @@ -1256,7 +1188,7 @@ func TestEnqueueAllRetryTasks(t *testing.T) { }, { desc: "with empty retry queue", - retry: []h.ZSetEntry{}, + retry: []base.Z{}, want: 0, wantEnqueued: map[string][]*base.TaskMessage{ base.DefaultQueueName: {}, @@ -1264,12 +1196,12 @@ func TestEnqueueAllRetryTasks(t *testing.T) { }, { desc: "with custom queues", - retry: []h.ZSetEntry{ - {Msg: t1, Score: float64(time.Now().Add(time.Hour).Unix())}, - {Msg: t2, Score: float64(time.Now().Add(time.Hour).Unix())}, - {Msg: t3, Score: float64(time.Now().Add(time.Hour).Unix())}, - {Msg: t4, Score: float64(time.Now().Add(time.Hour).Unix())}, - {Msg: t5, Score: float64(time.Now().Add(time.Hour).Unix())}, + retry: []base.Z{ + {Message: t1, Score: time.Now().Add(time.Hour).Unix()}, + {Message: t2, Score: time.Now().Add(time.Hour).Unix()}, + {Message: t3, Score: time.Now().Add(time.Hour).Unix()}, + {Message: t4, Score: time.Now().Add(time.Hour).Unix()}, + {Message: t5, Score: time.Now().Add(time.Hour).Unix()}, }, want: 5, wantEnqueued: map[string][]*base.TaskMessage{ @@ -1317,16 +1249,16 @@ func TestEnqueueAllDeadTasks(t *testing.T) { tests := []struct { desc string - dead []h.ZSetEntry + dead []base.Z want int64 wantEnqueued map[string][]*base.TaskMessage }{ { desc: "with tasks in dead queue", - dead: []h.ZSetEntry{ - {Msg: t1, Score: float64(time.Now().Add(-time.Minute).Unix())}, - {Msg: t2, Score: float64(time.Now().Add(-time.Minute).Unix())}, - {Msg: t3, Score: float64(time.Now().Add(-time.Minute).Unix())}, + dead: []base.Z{ + {Message: t1, Score: time.Now().Add(-time.Minute).Unix()}, + {Message: t2, Score: time.Now().Add(-time.Minute).Unix()}, + {Message: t3, Score: time.Now().Add(-time.Minute).Unix()}, }, want: 3, wantEnqueued: map[string][]*base.TaskMessage{ @@ -1335,7 +1267,7 @@ func TestEnqueueAllDeadTasks(t *testing.T) { }, { desc: "with empty dead queue", - dead: []h.ZSetEntry{}, + dead: []base.Z{}, want: 0, wantEnqueued: map[string][]*base.TaskMessage{ base.DefaultQueueName: {}, @@ -1343,12 +1275,12 @@ func TestEnqueueAllDeadTasks(t *testing.T) { }, { desc: "with custom queues", - dead: []h.ZSetEntry{ - {Msg: t1, Score: float64(time.Now().Add(-time.Minute).Unix())}, - {Msg: t2, Score: float64(time.Now().Add(-time.Minute).Unix())}, - {Msg: t3, Score: float64(time.Now().Add(-time.Minute).Unix())}, - {Msg: t4, Score: float64(time.Now().Add(-time.Minute).Unix())}, - {Msg: t5, Score: float64(time.Now().Add(-time.Minute).Unix())}, + dead: []base.Z{ + {Message: t1, Score: time.Now().Add(-time.Minute).Unix()}, + {Message: t2, Score: time.Now().Add(-time.Minute).Unix()}, + {Message: t3, Score: time.Now().Add(-time.Minute).Unix()}, + {Message: t4, Score: time.Now().Add(-time.Minute).Unix()}, + {Message: t5, Score: time.Now().Add(-time.Minute).Unix()}, }, want: 5, wantEnqueued: map[string][]*base.TaskMessage{ @@ -1392,45 +1324,45 @@ func TestKillRetryTask(t *testing.T) { t2 := time.Now().Add(time.Hour) tests := []struct { - retry []h.ZSetEntry - dead []h.ZSetEntry + retry []base.Z + dead []base.Z id uuid.UUID score int64 want error - wantRetry []h.ZSetEntry - wantDead []h.ZSetEntry + wantRetry []base.Z + wantDead []base.Z }{ { - retry: []h.ZSetEntry{ - {Msg: m1, Score: float64(t1.Unix())}, - {Msg: m2, Score: float64(t2.Unix())}, + retry: []base.Z{ + {Message: m1, Score: t1.Unix()}, + {Message: m2, Score: t2.Unix()}, }, - dead: []h.ZSetEntry{}, + dead: []base.Z{}, id: m1.ID, score: t1.Unix(), want: nil, - wantRetry: []h.ZSetEntry{ - {Msg: m2, Score: float64(t2.Unix())}, + wantRetry: []base.Z{ + {Message: m2, Score: t2.Unix()}, }, - wantDead: []h.ZSetEntry{ - {Msg: m1, Score: float64(time.Now().Unix())}, + wantDead: []base.Z{ + {Message: m1, Score: time.Now().Unix()}, }, }, { - retry: []h.ZSetEntry{ - {Msg: m1, Score: float64(t1.Unix())}, + retry: []base.Z{ + {Message: m1, Score: t1.Unix()}, }, - dead: []h.ZSetEntry{ - {Msg: m2, Score: float64(t2.Unix())}, + dead: []base.Z{ + {Message: m2, Score: t2.Unix()}, }, id: m2.ID, score: t2.Unix(), want: ErrTaskNotFound, - wantRetry: []h.ZSetEntry{ - {Msg: m1, Score: float64(t1.Unix())}, + wantRetry: []base.Z{ + {Message: m1, Score: t1.Unix()}, }, - wantDead: []h.ZSetEntry{ - {Msg: m2, Score: float64(t2.Unix())}, + wantDead: []base.Z{ + {Message: m2, Score: t2.Unix()}, }, }, } @@ -1469,45 +1401,45 @@ func TestKillScheduledTask(t *testing.T) { t2 := time.Now().Add(time.Hour) tests := []struct { - scheduled []h.ZSetEntry - dead []h.ZSetEntry + scheduled []base.Z + dead []base.Z id uuid.UUID score int64 want error - wantScheduled []h.ZSetEntry - wantDead []h.ZSetEntry + wantScheduled []base.Z + wantDead []base.Z }{ { - scheduled: []h.ZSetEntry{ - {Msg: m1, Score: float64(t1.Unix())}, - {Msg: m2, Score: float64(t2.Unix())}, + scheduled: []base.Z{ + {Message: m1, Score: t1.Unix()}, + {Message: m2, Score: t2.Unix()}, }, - dead: []h.ZSetEntry{}, + dead: []base.Z{}, id: m1.ID, score: t1.Unix(), want: nil, - wantScheduled: []h.ZSetEntry{ - {Msg: m2, Score: float64(t2.Unix())}, + wantScheduled: []base.Z{ + {Message: m2, Score: t2.Unix()}, }, - wantDead: []h.ZSetEntry{ - {Msg: m1, Score: float64(time.Now().Unix())}, + wantDead: []base.Z{ + {Message: m1, Score: time.Now().Unix()}, }, }, { - scheduled: []h.ZSetEntry{ - {Msg: m1, Score: float64(t1.Unix())}, + scheduled: []base.Z{ + {Message: m1, Score: t1.Unix()}, }, - dead: []h.ZSetEntry{ - {Msg: m2, Score: float64(t2.Unix())}, + dead: []base.Z{ + {Message: m2, Score: t2.Unix()}, }, id: m2.ID, score: t2.Unix(), want: ErrTaskNotFound, - wantScheduled: []h.ZSetEntry{ - {Msg: m1, Score: float64(t1.Unix())}, + wantScheduled: []base.Z{ + {Message: m1, Score: t1.Unix()}, }, - wantDead: []h.ZSetEntry{ - {Msg: m2, Score: float64(t2.Unix())}, + wantDead: []base.Z{ + {Message: m2, Score: t2.Unix()}, }, }, } @@ -1546,50 +1478,50 @@ func TestKillAllRetryTasks(t *testing.T) { t2 := time.Now().Add(time.Hour) tests := []struct { - retry []h.ZSetEntry - dead []h.ZSetEntry + retry []base.Z + dead []base.Z want int64 - wantRetry []h.ZSetEntry - wantDead []h.ZSetEntry + wantRetry []base.Z + wantDead []base.Z }{ { - retry: []h.ZSetEntry{ - {Msg: m1, Score: float64(t1.Unix())}, - {Msg: m2, Score: float64(t2.Unix())}, + retry: []base.Z{ + {Message: m1, Score: t1.Unix()}, + {Message: m2, Score: t2.Unix()}, }, - dead: []h.ZSetEntry{}, + dead: []base.Z{}, want: 2, - wantRetry: []h.ZSetEntry{}, - wantDead: []h.ZSetEntry{ - {Msg: m1, Score: float64(time.Now().Unix())}, - {Msg: m2, Score: float64(time.Now().Unix())}, + wantRetry: []base.Z{}, + wantDead: []base.Z{ + {Message: m1, Score: time.Now().Unix()}, + {Message: m2, Score: time.Now().Unix()}, }, }, { - retry: []h.ZSetEntry{ - {Msg: m1, Score: float64(t1.Unix())}, + retry: []base.Z{ + {Message: m1, Score: t1.Unix()}, }, - dead: []h.ZSetEntry{ - {Msg: m2, Score: float64(t2.Unix())}, + dead: []base.Z{ + {Message: m2, Score: t2.Unix()}, }, want: 1, - wantRetry: []h.ZSetEntry{}, - wantDead: []h.ZSetEntry{ - {Msg: m1, Score: float64(time.Now().Unix())}, - {Msg: m2, Score: float64(t2.Unix())}, + wantRetry: []base.Z{}, + wantDead: []base.Z{ + {Message: m1, Score: time.Now().Unix()}, + {Message: m2, Score: t2.Unix()}, }, }, { - retry: []h.ZSetEntry{}, - dead: []h.ZSetEntry{ - {Msg: m1, Score: float64(t1.Unix())}, - {Msg: m2, Score: float64(t2.Unix())}, + retry: []base.Z{}, + dead: []base.Z{ + {Message: m1, Score: t1.Unix()}, + {Message: m2, Score: t2.Unix()}, }, want: 0, - wantRetry: []h.ZSetEntry{}, - wantDead: []h.ZSetEntry{ - {Msg: m1, Score: float64(t1.Unix())}, - {Msg: m2, Score: float64(t2.Unix())}, + wantRetry: []base.Z{}, + wantDead: []base.Z{ + {Message: m1, Score: t1.Unix()}, + {Message: m2, Score: t2.Unix()}, }, }, } @@ -1628,50 +1560,50 @@ func TestKillAllScheduledTasks(t *testing.T) { t2 := time.Now().Add(time.Hour) tests := []struct { - scheduled []h.ZSetEntry - dead []h.ZSetEntry + scheduled []base.Z + dead []base.Z want int64 - wantScheduled []h.ZSetEntry - wantDead []h.ZSetEntry + wantScheduled []base.Z + wantDead []base.Z }{ { - scheduled: []h.ZSetEntry{ - {Msg: m1, Score: float64(t1.Unix())}, - {Msg: m2, Score: float64(t2.Unix())}, + scheduled: []base.Z{ + {Message: m1, Score: t1.Unix()}, + {Message: m2, Score: t2.Unix()}, }, - dead: []h.ZSetEntry{}, + dead: []base.Z{}, want: 2, - wantScheduled: []h.ZSetEntry{}, - wantDead: []h.ZSetEntry{ - {Msg: m1, Score: float64(time.Now().Unix())}, - {Msg: m2, Score: float64(time.Now().Unix())}, + wantScheduled: []base.Z{}, + wantDead: []base.Z{ + {Message: m1, Score: time.Now().Unix()}, + {Message: m2, Score: time.Now().Unix()}, }, }, { - scheduled: []h.ZSetEntry{ - {Msg: m1, Score: float64(t1.Unix())}, + scheduled: []base.Z{ + {Message: m1, Score: t1.Unix()}, }, - dead: []h.ZSetEntry{ - {Msg: m2, Score: float64(t2.Unix())}, + dead: []base.Z{ + {Message: m2, Score: t2.Unix()}, }, want: 1, - wantScheduled: []h.ZSetEntry{}, - wantDead: []h.ZSetEntry{ - {Msg: m1, Score: float64(time.Now().Unix())}, - {Msg: m2, Score: float64(t2.Unix())}, + wantScheduled: []base.Z{}, + wantDead: []base.Z{ + {Message: m1, Score: time.Now().Unix()}, + {Message: m2, Score: t2.Unix()}, }, }, { - scheduled: []h.ZSetEntry{}, - dead: []h.ZSetEntry{ - {Msg: m1, Score: float64(t1.Unix())}, - {Msg: m2, Score: float64(t2.Unix())}, + scheduled: []base.Z{}, + dead: []base.Z{ + {Message: m1, Score: t1.Unix()}, + {Message: m2, Score: t2.Unix()}, }, want: 0, - wantScheduled: []h.ZSetEntry{}, - wantDead: []h.ZSetEntry{ - {Msg: m1, Score: float64(t1.Unix())}, - {Msg: m2, Score: float64(t2.Unix())}, + wantScheduled: []base.Z{}, + wantDead: []base.Z{ + {Message: m1, Score: t1.Unix()}, + {Message: m2, Score: t2.Unix()}, }, }, } @@ -1710,16 +1642,16 @@ func TestDeleteDeadTask(t *testing.T) { t2 := time.Now().Add(-time.Hour) tests := []struct { - dead []h.ZSetEntry + dead []base.Z id uuid.UUID score int64 want error wantDead []*base.TaskMessage }{ { - dead: []h.ZSetEntry{ - {Msg: m1, Score: float64(t1.Unix())}, - {Msg: m2, Score: float64(t2.Unix())}, + dead: []base.Z{ + {Message: m1, Score: t1.Unix()}, + {Message: m2, Score: t2.Unix()}, }, id: m1.ID, score: t1.Unix(), @@ -1727,9 +1659,9 @@ func TestDeleteDeadTask(t *testing.T) { wantDead: []*base.TaskMessage{m2}, }, { - dead: []h.ZSetEntry{ - {Msg: m1, Score: float64(t1.Unix())}, - {Msg: m2, Score: float64(t2.Unix())}, + dead: []base.Z{ + {Message: m1, Score: t1.Unix()}, + {Message: m2, Score: t2.Unix()}, }, id: m1.ID, score: t2.Unix(), // id and score mismatch @@ -1737,7 +1669,7 @@ func TestDeleteDeadTask(t *testing.T) { wantDead: []*base.TaskMessage{m1, m2}, }, { - dead: []h.ZSetEntry{}, + dead: []base.Z{}, id: m1.ID, score: t1.Unix(), want: ErrTaskNotFound, @@ -1770,16 +1702,16 @@ func TestDeleteRetryTask(t *testing.T) { t2 := time.Now().Add(time.Hour) tests := []struct { - retry []h.ZSetEntry + retry []base.Z id uuid.UUID score int64 want error wantRetry []*base.TaskMessage }{ { - retry: []h.ZSetEntry{ - {Msg: m1, Score: float64(t1.Unix())}, - {Msg: m2, Score: float64(t2.Unix())}, + retry: []base.Z{ + {Message: m1, Score: t1.Unix()}, + {Message: m2, Score: t2.Unix()}, }, id: m1.ID, score: t1.Unix(), @@ -1787,8 +1719,8 @@ func TestDeleteRetryTask(t *testing.T) { wantRetry: []*base.TaskMessage{m2}, }, { - retry: []h.ZSetEntry{ - {Msg: m1, Score: float64(t1.Unix())}, + retry: []base.Z{ + {Message: m1, Score: t1.Unix()}, }, id: m2.ID, score: t2.Unix(), @@ -1822,16 +1754,16 @@ func TestDeleteScheduledTask(t *testing.T) { t2 := time.Now().Add(time.Hour) tests := []struct { - scheduled []h.ZSetEntry + scheduled []base.Z id uuid.UUID score int64 want error wantScheduled []*base.TaskMessage }{ { - scheduled: []h.ZSetEntry{ - {Msg: m1, Score: float64(t1.Unix())}, - {Msg: m2, Score: float64(t2.Unix())}, + scheduled: []base.Z{ + {Message: m1, Score: t1.Unix()}, + {Message: m2, Score: t2.Unix()}, }, id: m1.ID, score: t1.Unix(), @@ -1839,8 +1771,8 @@ func TestDeleteScheduledTask(t *testing.T) { wantScheduled: []*base.TaskMessage{m2}, }, { - scheduled: []h.ZSetEntry{ - {Msg: m1, Score: float64(t1.Unix())}, + scheduled: []base.Z{ + {Message: m1, Score: t1.Unix()}, }, id: m2.ID, score: t2.Unix(), @@ -1873,15 +1805,22 @@ func TestDeleteAllDeadTasks(t *testing.T) { m3 := h.NewTaskMessage("gen_thumbnail", nil) tests := []struct { - dead []h.ZSetEntry + dead []base.Z + want int64 wantDead []*base.TaskMessage }{ { - dead: []h.ZSetEntry{ - {Msg: m1, Score: float64(time.Now().Unix())}, - {Msg: m2, Score: float64(time.Now().Unix())}, - {Msg: m3, Score: float64(time.Now().Unix())}, + dead: []base.Z{ + {Message: m1, Score: time.Now().Unix()}, + {Message: m2, Score: time.Now().Unix()}, + {Message: m3, Score: time.Now().Unix()}, }, + want: 3, + wantDead: []*base.TaskMessage{}, + }, + { + dead: []base.Z{}, + want: 0, wantDead: []*base.TaskMessage{}, }, } @@ -1890,11 +1829,13 @@ func TestDeleteAllDeadTasks(t *testing.T) { h.FlushDB(t, r.client) // clean up db before each test case h.SeedDeadQueue(t, r.client, tc.dead) - err := r.DeleteAllDeadTasks() + got, err := r.DeleteAllDeadTasks() if err != nil { - t.Errorf("r.DeleteAllDeaadTasks = %v, want nil", err) + t.Errorf("r.DeleteAllDeadTasks returned error: %v", err) + } + if got != tc.want { + t.Errorf("r.DeleteAllDeadTasks() = %d, nil, want %d, nil", got, tc.want) } - gotDead := h.GetDeadMessages(t, r.client) if diff := cmp.Diff(tc.wantDead, gotDead, h.SortMsgOpt); diff != "" { t.Errorf("mismatch found in %q; (-want, +got)\n%s", base.DeadQueue, diff) @@ -1909,15 +1850,22 @@ func TestDeleteAllRetryTasks(t *testing.T) { m3 := h.NewTaskMessage("gen_thumbnail", nil) tests := []struct { - retry []h.ZSetEntry + retry []base.Z + want int64 wantRetry []*base.TaskMessage }{ { - retry: []h.ZSetEntry{ - {Msg: m1, Score: float64(time.Now().Unix())}, - {Msg: m2, Score: float64(time.Now().Unix())}, - {Msg: m3, Score: float64(time.Now().Unix())}, + retry: []base.Z{ + {Message: m1, Score: time.Now().Unix()}, + {Message: m2, Score: time.Now().Unix()}, + {Message: m3, Score: time.Now().Unix()}, }, + want: 3, + wantRetry: []*base.TaskMessage{}, + }, + { + retry: []base.Z{}, + want: 0, wantRetry: []*base.TaskMessage{}, }, } @@ -1926,11 +1874,13 @@ func TestDeleteAllRetryTasks(t *testing.T) { h.FlushDB(t, r.client) // clean up db before each test case h.SeedRetryQueue(t, r.client, tc.retry) - err := r.DeleteAllRetryTasks() + got, err := r.DeleteAllRetryTasks() if err != nil { - t.Errorf("r.DeleteAllDeaadTasks = %v, want nil", err) + t.Errorf("r.DeleteAllRetryTasks returned error: %v", err) + } + if got != tc.want { + t.Errorf("r.DeleteAllRetryTasks() = %d, nil, want %d, nil", got, tc.want) } - gotRetry := h.GetRetryMessages(t, r.client) if diff := cmp.Diff(tc.wantRetry, gotRetry, h.SortMsgOpt); diff != "" { t.Errorf("mismatch found in %q; (-want, +got)\n%s", base.RetryQueue, diff) @@ -1945,15 +1895,22 @@ func TestDeleteAllScheduledTasks(t *testing.T) { m3 := h.NewTaskMessage("gen_thumbnail", nil) tests := []struct { - scheduled []h.ZSetEntry + scheduled []base.Z + want int64 wantScheduled []*base.TaskMessage }{ { - scheduled: []h.ZSetEntry{ - {Msg: m1, Score: float64(time.Now().Add(time.Minute).Unix())}, - {Msg: m2, Score: float64(time.Now().Add(time.Minute).Unix())}, - {Msg: m3, Score: float64(time.Now().Add(time.Minute).Unix())}, + scheduled: []base.Z{ + {Message: m1, Score: time.Now().Add(time.Minute).Unix()}, + {Message: m2, Score: time.Now().Add(time.Minute).Unix()}, + {Message: m3, Score: time.Now().Add(time.Minute).Unix()}, }, + want: 3, + wantScheduled: []*base.TaskMessage{}, + }, + { + scheduled: []base.Z{}, + want: 0, wantScheduled: []*base.TaskMessage{}, }, } @@ -1962,11 +1919,13 @@ func TestDeleteAllScheduledTasks(t *testing.T) { h.FlushDB(t, r.client) // clean up db before each test case h.SeedScheduledQueue(t, r.client, tc.scheduled) - err := r.DeleteAllScheduledTasks() + got, err := r.DeleteAllScheduledTasks() if err != nil { - t.Errorf("r.DeleteAllDeaadTasks = %v, want nil", err) + t.Errorf("r.DeleteAllScheduledTasks returned error: %v", err) + } + if got != tc.want { + t.Errorf("r.DeleteAllScheduledTasks() = %d, nil, want %d, nil", got, tc.want) } - gotScheduled := h.GetScheduledMessages(t, r.client) if diff := cmp.Diff(tc.wantScheduled, gotScheduled, h.SortMsgOpt); diff != "" { t.Errorf("mismatch found in %q; (-want, +got)\n%s", base.ScheduledQueue, diff) diff --git a/internal/rdb/rdb_test.go b/internal/rdb/rdb_test.go index 805748f..db4fa54 100644 --- a/internal/rdb/rdb_test.go +++ b/internal/rdb/rdb_test.go @@ -148,7 +148,7 @@ func TestDequeue(t *testing.T) { err error wantEnqueued map[string][]*base.TaskMessage wantInProgress []*base.TaskMessage - wantDeadlines []h.ZSetEntry + wantDeadlines []base.Z }{ { enqueued: map[string][]*base.TaskMessage{ @@ -162,10 +162,10 @@ func TestDequeue(t *testing.T) { "default": {}, }, wantInProgress: []*base.TaskMessage{t1}, - wantDeadlines: []h.ZSetEntry{ + wantDeadlines: []base.Z{ { - Msg: t1, - Score: float64(t1Deadline), + Message: t1, + Score: t1Deadline, }, }, }, @@ -181,7 +181,7 @@ func TestDequeue(t *testing.T) { "default": {}, }, wantInProgress: []*base.TaskMessage{}, - wantDeadlines: []h.ZSetEntry{}, + wantDeadlines: []base.Z{}, }, { enqueued: map[string][]*base.TaskMessage{ @@ -199,10 +199,10 @@ func TestDequeue(t *testing.T) { "low": {t3}, }, wantInProgress: []*base.TaskMessage{t2}, - wantDeadlines: []h.ZSetEntry{ + wantDeadlines: []base.Z{ { - Msg: t2, - Score: float64(t2Deadline), + Message: t2, + Score: t2Deadline, }, }, }, @@ -222,10 +222,10 @@ func TestDequeue(t *testing.T) { "low": {t2, t1}, }, wantInProgress: []*base.TaskMessage{t3}, - wantDeadlines: []h.ZSetEntry{ + wantDeadlines: []base.Z{ { - Msg: t3, - Score: float64(t3Deadline), + Message: t3, + Score: t3Deadline, }, }, }, @@ -245,7 +245,7 @@ func TestDequeue(t *testing.T) { "low": {}, }, wantInProgress: []*base.TaskMessage{}, - wantDeadlines: []h.ZSetEntry{}, + wantDeadlines: []base.Z{}, }, } @@ -412,70 +412,70 @@ func TestDone(t *testing.T) { tests := []struct { inProgress []*base.TaskMessage // initial state of the in-progress list - deadlines []h.ZSetEntry // initial state of deadlines set + deadlines []base.Z // initial state of deadlines set target *base.TaskMessage // task to remove wantInProgress []*base.TaskMessage // final state of the in-progress list - wantDeadlines []h.ZSetEntry // final state of the deadline set + wantDeadlines []base.Z // final state of the deadline set }{ { inProgress: []*base.TaskMessage{t1, t2}, - deadlines: []h.ZSetEntry{ + deadlines: []base.Z{ { - Msg: t1, - Score: float64(t1Deadline), + Message: t1, + Score: t1Deadline, }, { - Msg: t2, - Score: float64(t2Deadline), + Message: t2, + Score: t2Deadline, }, }, target: t1, wantInProgress: []*base.TaskMessage{t2}, - wantDeadlines: []h.ZSetEntry{ + wantDeadlines: []base.Z{ { - Msg: t2, - Score: float64(t2Deadline), + Message: t2, + Score: t2Deadline, }, }, }, { inProgress: []*base.TaskMessage{t1}, - deadlines: []h.ZSetEntry{ + deadlines: []base.Z{ { - Msg: t1, - Score: float64(t1Deadline), + Message: t1, + Score: t1Deadline, }, }, target: t1, wantInProgress: []*base.TaskMessage{}, - wantDeadlines: []h.ZSetEntry{}, + wantDeadlines: []base.Z{}, }, { inProgress: []*base.TaskMessage{t1, t2, t3}, - deadlines: []h.ZSetEntry{ + deadlines: []base.Z{ { - Msg: t1, - Score: float64(t1Deadline), + Message: t1, + Score: t1Deadline, }, { - Msg: t2, - Score: float64(t2Deadline), + Message: t2, + Score: t2Deadline, }, { - Msg: t3, - Score: float64(t3Deadline), + Message: t3, + Score: t3Deadline, }, }, target: t3, wantInProgress: []*base.TaskMessage{t1, t2}, - wantDeadlines: []h.ZSetEntry{ + wantDeadlines: []base.Z{ { - Msg: t1, - Score: float64(t1Deadline), + Message: t1, + Score: t1Deadline, }, { - Msg: t2, - Score: float64(t2Deadline), + Message: t2, + Score: t2Deadline, }, }, }, @@ -560,28 +560,28 @@ func TestRequeue(t *testing.T) { tests := []struct { enqueued map[string][]*base.TaskMessage // initial state of queues inProgress []*base.TaskMessage // initial state of the in-progress list - deadlines []h.ZSetEntry // initial state of the deadlines set + deadlines []base.Z // initial state of the deadlines set target *base.TaskMessage // task to requeue wantEnqueued map[string][]*base.TaskMessage // final state of queues wantInProgress []*base.TaskMessage // final state of the in-progress list - wantDeadlines []h.ZSetEntry // final state of the deadlines set + wantDeadlines []base.Z // final state of the deadlines set }{ { enqueued: map[string][]*base.TaskMessage{ base.DefaultQueueName: {}, }, inProgress: []*base.TaskMessage{t1, t2}, - deadlines: []h.ZSetEntry{ - {Msg: t1, Score: float64(t1Deadline)}, - {Msg: t2, Score: float64(t2Deadline)}, + deadlines: []base.Z{ + {Message: t1, Score: t1Deadline}, + {Message: t2, Score: t2Deadline}, }, target: t1, wantEnqueued: map[string][]*base.TaskMessage{ base.DefaultQueueName: {t1}, }, wantInProgress: []*base.TaskMessage{t2}, - wantDeadlines: []h.ZSetEntry{ - {Msg: t2, Score: float64(t2Deadline)}, + wantDeadlines: []base.Z{ + {Message: t2, Score: t2Deadline}, }, }, { @@ -589,15 +589,15 @@ func TestRequeue(t *testing.T) { base.DefaultQueueName: {t1}, }, inProgress: []*base.TaskMessage{t2}, - deadlines: []h.ZSetEntry{ - {Msg: t2, Score: float64(t2Deadline)}, + deadlines: []base.Z{ + {Message: t2, Score: t2Deadline}, }, target: t2, wantEnqueued: map[string][]*base.TaskMessage{ base.DefaultQueueName: {t1, t2}, }, wantInProgress: []*base.TaskMessage{}, - wantDeadlines: []h.ZSetEntry{}, + wantDeadlines: []base.Z{}, }, { enqueued: map[string][]*base.TaskMessage{ @@ -605,9 +605,9 @@ func TestRequeue(t *testing.T) { "critical": {}, }, inProgress: []*base.TaskMessage{t2, t3}, - deadlines: []h.ZSetEntry{ - {Msg: t2, Score: float64(t2Deadline)}, - {Msg: t3, Score: float64(t3Deadline)}, + deadlines: []base.Z{ + {Message: t2, Score: t2Deadline}, + {Message: t3, Score: t3Deadline}, }, target: t3, wantEnqueued: map[string][]*base.TaskMessage{ @@ -615,8 +615,8 @@ func TestRequeue(t *testing.T) { "critical": {t3}, }, wantInProgress: []*base.TaskMessage{t2}, - wantDeadlines: []h.ZSetEntry{ - {Msg: t2, Score: float64(t2Deadline)}, + wantDeadlines: []base.Z{ + {Message: t2, Score: t2Deadline}, }, }, } @@ -765,42 +765,42 @@ func TestRetry(t *testing.T) { tests := []struct { inProgress []*base.TaskMessage - deadlines []h.ZSetEntry - retry []h.ZSetEntry + deadlines []base.Z + retry []base.Z msg *base.TaskMessage processAt time.Time errMsg string wantInProgress []*base.TaskMessage - wantDeadlines []h.ZSetEntry - wantRetry []h.ZSetEntry + wantDeadlines []base.Z + wantRetry []base.Z }{ { inProgress: []*base.TaskMessage{t1, t2}, - deadlines: []h.ZSetEntry{ - {Msg: t1, Score: float64(t1Deadline)}, - {Msg: t2, Score: float64(t2Deadline)}, + deadlines: []base.Z{ + {Message: t1, Score: t1Deadline}, + {Message: t2, Score: t2Deadline}, }, - retry: []h.ZSetEntry{ + retry: []base.Z{ { - Msg: t3, - Score: float64(now.Add(time.Minute).Unix()), + Message: t3, + Score: now.Add(time.Minute).Unix(), }, }, msg: t1, processAt: now.Add(5 * time.Minute), errMsg: errMsg, wantInProgress: []*base.TaskMessage{t2}, - wantDeadlines: []h.ZSetEntry{ - {Msg: t2, Score: float64(t2Deadline)}, + wantDeadlines: []base.Z{ + {Message: t2, Score: t2Deadline}, }, - wantRetry: []h.ZSetEntry{ + wantRetry: []base.Z{ { - Msg: h.TaskMessageAfterRetry(*t1, errMsg), - Score: float64(now.Add(5 * time.Minute).Unix()), + Message: h.TaskMessageAfterRetry(*t1, errMsg), + Score: now.Add(5 * time.Minute).Unix(), }, { - Msg: t3, - Score: float64(now.Add(time.Minute).Unix()), + Message: t3, + Score: now.Add(time.Minute).Unix(), }, }, }, @@ -891,59 +891,59 @@ func TestKill(t *testing.T) { // TODO(hibiken): add test cases for trimming tests := []struct { inProgress []*base.TaskMessage - deadlines []h.ZSetEntry - dead []h.ZSetEntry + deadlines []base.Z + dead []base.Z target *base.TaskMessage // task to kill wantInProgress []*base.TaskMessage - wantDeadlines []h.ZSetEntry - wantDead []h.ZSetEntry + wantDeadlines []base.Z + wantDead []base.Z }{ { inProgress: []*base.TaskMessage{t1, t2}, - deadlines: []h.ZSetEntry{ - {Msg: t1, Score: float64(t1Deadline)}, - {Msg: t2, Score: float64(t2Deadline)}, + deadlines: []base.Z{ + {Message: t1, Score: t1Deadline}, + {Message: t2, Score: t2Deadline}, }, - dead: []h.ZSetEntry{ + dead: []base.Z{ { - Msg: t3, - Score: float64(now.Add(-time.Hour).Unix()), + Message: t3, + Score: now.Add(-time.Hour).Unix(), }, }, target: t1, wantInProgress: []*base.TaskMessage{t2}, - wantDeadlines: []h.ZSetEntry{ - {Msg: t2, Score: float64(t2Deadline)}, + wantDeadlines: []base.Z{ + {Message: t2, Score: t2Deadline}, }, - wantDead: []h.ZSetEntry{ + wantDead: []base.Z{ { - Msg: h.TaskMessageWithError(*t1, errMsg), - Score: float64(now.Unix()), + Message: h.TaskMessageWithError(*t1, errMsg), + Score: now.Unix(), }, { - Msg: t3, - Score: float64(now.Add(-time.Hour).Unix()), + Message: t3, + Score: now.Add(-time.Hour).Unix(), }, }, }, { inProgress: []*base.TaskMessage{t1, t2, t3}, - deadlines: []h.ZSetEntry{ - {Msg: t1, Score: float64(t1Deadline)}, - {Msg: t2, Score: float64(t2Deadline)}, - {Msg: t3, Score: float64(t3Deadline)}, + deadlines: []base.Z{ + {Message: t1, Score: t1Deadline}, + {Message: t2, Score: t2Deadline}, + {Message: t3, Score: t3Deadline}, }, - dead: []h.ZSetEntry{}, + dead: []base.Z{}, target: t1, wantInProgress: []*base.TaskMessage{t2, t3}, - wantDeadlines: []h.ZSetEntry{ - {Msg: t2, Score: float64(t2Deadline)}, - {Msg: t3, Score: float64(t3Deadline)}, + wantDeadlines: []base.Z{ + {Message: t2, Score: t2Deadline}, + {Message: t3, Score: t3Deadline}, }, - wantDead: []h.ZSetEntry{ + wantDead: []base.Z{ { - Msg: h.TaskMessageWithError(*t1, errMsg), - Score: float64(now.Unix()), + Message: h.TaskMessageWithError(*t1, errMsg), + Score: now.Unix(), }, }, }, @@ -1009,19 +1009,19 @@ func TestCheckAndEnqueue(t *testing.T) { hourFromNow := time.Now().Add(time.Hour) tests := []struct { - scheduled []h.ZSetEntry - retry []h.ZSetEntry + scheduled []base.Z + retry []base.Z wantEnqueued map[string][]*base.TaskMessage wantScheduled []*base.TaskMessage wantRetry []*base.TaskMessage }{ { - scheduled: []h.ZSetEntry{ - {Msg: t1, Score: float64(secondAgo.Unix())}, - {Msg: t2, Score: float64(secondAgo.Unix())}, + scheduled: []base.Z{ + {Message: t1, Score: secondAgo.Unix()}, + {Message: t2, Score: secondAgo.Unix()}, }, - retry: []h.ZSetEntry{ - {Msg: t3, Score: float64(secondAgo.Unix())}}, + retry: []base.Z{ + {Message: t3, Score: secondAgo.Unix()}}, wantEnqueued: map[string][]*base.TaskMessage{ "default": {t1, t2, t3}, }, @@ -1029,11 +1029,11 @@ func TestCheckAndEnqueue(t *testing.T) { wantRetry: []*base.TaskMessage{}, }, { - scheduled: []h.ZSetEntry{ - {Msg: t1, Score: float64(hourFromNow.Unix())}, - {Msg: t2, Score: float64(secondAgo.Unix())}}, - retry: []h.ZSetEntry{ - {Msg: t3, Score: float64(secondAgo.Unix())}}, + scheduled: []base.Z{ + {Message: t1, Score: hourFromNow.Unix()}, + {Message: t2, Score: secondAgo.Unix()}}, + retry: []base.Z{ + {Message: t3, Score: secondAgo.Unix()}}, wantEnqueued: map[string][]*base.TaskMessage{ "default": {t2, t3}, }, @@ -1041,11 +1041,11 @@ func TestCheckAndEnqueue(t *testing.T) { wantRetry: []*base.TaskMessage{}, }, { - scheduled: []h.ZSetEntry{ - {Msg: t1, Score: float64(hourFromNow.Unix())}, - {Msg: t2, Score: float64(hourFromNow.Unix())}}, - retry: []h.ZSetEntry{ - {Msg: t3, Score: float64(hourFromNow.Unix())}}, + scheduled: []base.Z{ + {Message: t1, Score: hourFromNow.Unix()}, + {Message: t2, Score: hourFromNow.Unix()}}, + retry: []base.Z{ + {Message: t3, Score: hourFromNow.Unix()}}, wantEnqueued: map[string][]*base.TaskMessage{ "default": {}, }, @@ -1053,12 +1053,12 @@ func TestCheckAndEnqueue(t *testing.T) { wantRetry: []*base.TaskMessage{t3}, }, { - scheduled: []h.ZSetEntry{ - {Msg: t1, Score: float64(secondAgo.Unix())}, - {Msg: t4, Score: float64(secondAgo.Unix())}, + scheduled: []base.Z{ + {Message: t1, Score: secondAgo.Unix()}, + {Message: t4, Score: secondAgo.Unix()}, }, - retry: []h.ZSetEntry{ - {Msg: t5, Score: float64(secondAgo.Unix())}}, + retry: []base.Z{ + {Message: t5, Score: secondAgo.Unix()}}, wantEnqueued: map[string][]*base.TaskMessage{ "default": {t1}, "critical": {t4}, @@ -1112,41 +1112,41 @@ func TestListDeadlineExceeded(t *testing.T) { tests := []struct { desc string - deadlines []h.ZSetEntry + deadlines []base.Z t time.Time want []*base.TaskMessage }{ { desc: "with one task in-progress", - deadlines: []h.ZSetEntry{ - {Msg: t1, Score: float64(fiveMinutesAgo.Unix())}, + deadlines: []base.Z{ + {Message: t1, Score: fiveMinutesAgo.Unix()}, }, t: time.Now(), want: []*base.TaskMessage{t1}, }, { desc: "with multiple tasks in-progress, and one expired", - deadlines: []h.ZSetEntry{ - {Msg: t1, Score: float64(oneHourAgo.Unix())}, - {Msg: t2, Score: float64(fiveMinutesFromNow.Unix())}, - {Msg: t3, Score: float64(oneHourFromNow.Unix())}, + deadlines: []base.Z{ + {Message: t1, Score: oneHourAgo.Unix()}, + {Message: t2, Score: fiveMinutesFromNow.Unix()}, + {Message: t3, Score: oneHourFromNow.Unix()}, }, t: time.Now(), want: []*base.TaskMessage{t1}, }, { desc: "with multiple expired tasks in-progress", - deadlines: []h.ZSetEntry{ - {Msg: t1, Score: float64(oneHourAgo.Unix())}, - {Msg: t2, Score: float64(fiveMinutesAgo.Unix())}, - {Msg: t3, Score: float64(oneHourFromNow.Unix())}, + deadlines: []base.Z{ + {Message: t1, Score: oneHourAgo.Unix()}, + {Message: t2, Score: fiveMinutesAgo.Unix()}, + {Message: t3, Score: oneHourFromNow.Unix()}, }, t: time.Now(), want: []*base.TaskMessage{t1, t2}, }, { desc: "with empty in-progress queue", - deadlines: []h.ZSetEntry{}, + deadlines: []base.Z{}, t: time.Now(), want: []*base.TaskMessage{}, }, diff --git a/processor_test.go b/processor_test.go index 2d7aa2a..e045d9b 100644 --- a/processor_test.go +++ b/processor_test.go @@ -223,7 +223,7 @@ func TestProcessorRetry(t *testing.T) { delay time.Duration // retry delay duration handler Handler // task handler wait time.Duration // wait duration between starting and stopping processor for this test case - wantRetry []h.ZSetEntry // tasks in retry queue at the end + wantRetry []base.Z // tasks in retry queue at the end wantDead []*base.TaskMessage // tasks in dead queue at the end wantErrCount int // number of times error handler should be called }{ @@ -235,10 +235,10 @@ func TestProcessorRetry(t *testing.T) { return fmt.Errorf(errMsg) }), wait: 2 * time.Second, - wantRetry: []h.ZSetEntry{ - {Msg: h.TaskMessageAfterRetry(*m2, errMsg), Score: float64(now.Add(time.Minute).Unix())}, - {Msg: h.TaskMessageAfterRetry(*m3, errMsg), Score: float64(now.Add(time.Minute).Unix())}, - {Msg: h.TaskMessageAfterRetry(*m4, errMsg), Score: float64(now.Add(time.Minute).Unix())}, + wantRetry: []base.Z{ + {Message: h.TaskMessageAfterRetry(*m2, errMsg), Score: now.Add(time.Minute).Unix()}, + {Message: h.TaskMessageAfterRetry(*m3, errMsg), Score: now.Add(time.Minute).Unix()}, + {Message: h.TaskMessageAfterRetry(*m4, errMsg), Score: now.Add(time.Minute).Unix()}, }, wantDead: []*base.TaskMessage{h.TaskMessageWithError(*m1, errMsg)}, wantErrCount: 4, diff --git a/recoverer_test.go b/recoverer_test.go index 5d33f9a..2f9e82f 100644 --- a/recoverer_test.go +++ b/recoverer_test.go @@ -34,24 +34,24 @@ func TestRecoverer(t *testing.T) { tests := []struct { desc string inProgress []*base.TaskMessage - deadlines []h.ZSetEntry - retry []h.ZSetEntry - dead []h.ZSetEntry + deadlines []base.Z + retry []base.Z + dead []base.Z wantInProgress []*base.TaskMessage - wantDeadlines []h.ZSetEntry + wantDeadlines []base.Z wantRetry []*base.TaskMessage wantDead []*base.TaskMessage }{ { desc: "with one task in-progress", inProgress: []*base.TaskMessage{t1}, - deadlines: []h.ZSetEntry{ - {Msg: t1, Score: float64(fiveMinutesAgo.Unix())}, + deadlines: []base.Z{ + {Message: t1, Score: fiveMinutesAgo.Unix()}, }, - retry: []h.ZSetEntry{}, - dead: []h.ZSetEntry{}, + retry: []base.Z{}, + dead: []base.Z{}, wantInProgress: []*base.TaskMessage{}, - wantDeadlines: []h.ZSetEntry{}, + wantDeadlines: []base.Z{}, wantRetry: []*base.TaskMessage{ h.TaskMessageAfterRetry(*t1, "deadline exceeded"), }, @@ -60,30 +60,30 @@ func TestRecoverer(t *testing.T) { { desc: "with a task with max-retry reached", inProgress: []*base.TaskMessage{t4}, - deadlines: []h.ZSetEntry{ - {Msg: t4, Score: float64(fiveMinutesAgo.Unix())}, + deadlines: []base.Z{ + {Message: t4, Score: fiveMinutesAgo.Unix()}, }, - retry: []h.ZSetEntry{}, - dead: []h.ZSetEntry{}, + retry: []base.Z{}, + dead: []base.Z{}, wantInProgress: []*base.TaskMessage{}, - wantDeadlines: []h.ZSetEntry{}, + wantDeadlines: []base.Z{}, wantRetry: []*base.TaskMessage{}, wantDead: []*base.TaskMessage{h.TaskMessageWithError(*t4, "deadline exceeded")}, }, { desc: "with multiple tasks in-progress, and one expired", inProgress: []*base.TaskMessage{t1, t2, t3}, - deadlines: []h.ZSetEntry{ - {Msg: t1, Score: float64(oneHourAgo.Unix())}, - {Msg: t2, Score: float64(fiveMinutesFromNow.Unix())}, - {Msg: t3, Score: float64(oneHourFromNow.Unix())}, + deadlines: []base.Z{ + {Message: t1, Score: oneHourAgo.Unix()}, + {Message: t2, Score: fiveMinutesFromNow.Unix()}, + {Message: t3, Score: oneHourFromNow.Unix()}, }, - retry: []h.ZSetEntry{}, - dead: []h.ZSetEntry{}, + retry: []base.Z{}, + dead: []base.Z{}, wantInProgress: []*base.TaskMessage{t2, t3}, - wantDeadlines: []h.ZSetEntry{ - {Msg: t2, Score: float64(fiveMinutesFromNow.Unix())}, - {Msg: t3, Score: float64(oneHourFromNow.Unix())}, + wantDeadlines: []base.Z{ + {Message: t2, Score: fiveMinutesFromNow.Unix()}, + {Message: t3, Score: oneHourFromNow.Unix()}, }, wantRetry: []*base.TaskMessage{ h.TaskMessageAfterRetry(*t1, "deadline exceeded"), @@ -93,16 +93,16 @@ func TestRecoverer(t *testing.T) { { desc: "with multiple expired tasks in-progress", inProgress: []*base.TaskMessage{t1, t2, t3}, - deadlines: []h.ZSetEntry{ - {Msg: t1, Score: float64(oneHourAgo.Unix())}, - {Msg: t2, Score: float64(fiveMinutesAgo.Unix())}, - {Msg: t3, Score: float64(oneHourFromNow.Unix())}, + deadlines: []base.Z{ + {Message: t1, Score: oneHourAgo.Unix()}, + {Message: t2, Score: fiveMinutesAgo.Unix()}, + {Message: t3, Score: oneHourFromNow.Unix()}, }, - retry: []h.ZSetEntry{}, - dead: []h.ZSetEntry{}, + retry: []base.Z{}, + dead: []base.Z{}, wantInProgress: []*base.TaskMessage{t3}, - wantDeadlines: []h.ZSetEntry{ - {Msg: t3, Score: float64(oneHourFromNow.Unix())}, + wantDeadlines: []base.Z{ + {Message: t3, Score: oneHourFromNow.Unix()}, }, wantRetry: []*base.TaskMessage{ h.TaskMessageAfterRetry(*t1, "deadline exceeded"), @@ -113,11 +113,11 @@ func TestRecoverer(t *testing.T) { { desc: "with empty in-progress queue", inProgress: []*base.TaskMessage{}, - deadlines: []h.ZSetEntry{}, - retry: []h.ZSetEntry{}, - dead: []h.ZSetEntry{}, + deadlines: []base.Z{}, + retry: []base.Z{}, + dead: []base.Z{}, wantInProgress: []*base.TaskMessage{}, - wantDeadlines: []h.ZSetEntry{}, + wantDeadlines: []base.Z{}, wantRetry: []*base.TaskMessage{}, wantDead: []*base.TaskMessage{}, }, diff --git a/scheduler_test.go b/scheduler_test.go index 34cd9fb..0d522b5 100644 --- a/scheduler_test.go +++ b/scheduler_test.go @@ -31,8 +31,8 @@ func TestScheduler(t *testing.T) { now := time.Now() tests := []struct { - initScheduled []h.ZSetEntry // scheduled queue initial state - initRetry []h.ZSetEntry // retry queue initial state + initScheduled []base.Z // scheduled queue initial state + initRetry []base.Z // retry queue initial state initQueue []*base.TaskMessage // default queue initial state wait time.Duration // wait duration before checking for final state wantScheduled []*base.TaskMessage // schedule queue final state @@ -40,12 +40,12 @@ func TestScheduler(t *testing.T) { wantQueue []*base.TaskMessage // default queue final state }{ { - initScheduled: []h.ZSetEntry{ - {Msg: t1, Score: float64(now.Add(time.Hour).Unix())}, - {Msg: t2, Score: float64(now.Add(-2 * time.Second).Unix())}, + initScheduled: []base.Z{ + {Message: t1, Score: now.Add(time.Hour).Unix()}, + {Message: t2, Score: now.Add(-2 * time.Second).Unix()}, }, - initRetry: []h.ZSetEntry{ - {Msg: t3, Score: float64(time.Now().Add(-500 * time.Millisecond).Unix())}, + initRetry: []base.Z{ + {Message: t3, Score: time.Now().Add(-500 * time.Millisecond).Unix()}, }, initQueue: []*base.TaskMessage{t4}, wait: pollInterval * 2, @@ -54,12 +54,12 @@ func TestScheduler(t *testing.T) { wantQueue: []*base.TaskMessage{t2, t3, t4}, }, { - initScheduled: []h.ZSetEntry{ - {Msg: t1, Score: float64(now.Unix())}, - {Msg: t2, Score: float64(now.Add(-2 * time.Second).Unix())}, - {Msg: t3, Score: float64(now.Add(-500 * time.Millisecond).Unix())}, + initScheduled: []base.Z{ + {Message: t1, Score: now.Unix()}, + {Message: t2, Score: now.Add(-2 * time.Second).Unix()}, + {Message: t3, Score: now.Add(-500 * time.Millisecond).Unix()}, }, - initRetry: []h.ZSetEntry{}, + initRetry: []base.Z{}, initQueue: []*base.TaskMessage{t4}, wait: pollInterval * 2, wantScheduled: []*base.TaskMessage{}, diff --git a/tools/asynq/cmd/del.go b/tools/asynq/cmd/del.go index e5db6a1..adc336a 100644 --- a/tools/asynq/cmd/del.go +++ b/tools/asynq/cmd/del.go @@ -8,15 +8,14 @@ import ( "fmt" "os" - "github.com/go-redis/redis/v7" - "github.com/hibiken/asynq/internal/rdb" + "github.com/hibiken/asynq" "github.com/spf13/cobra" "github.com/spf13/viper" ) // delCmd represents the del command var delCmd = &cobra.Command{ - Use: "del [task id]", + Use: "del [task key]", Short: "Deletes a task given an identifier", Long: `Del (asynq del) will delete a task given an identifier. @@ -44,27 +43,12 @@ func init() { } func del(cmd *cobra.Command, args []string) { - id, score, qtype, err := parseQueryID(args[0]) - if err != nil { - fmt.Println(err) - os.Exit(1) - } - r := rdb.NewRDB(redis.NewClient(&redis.Options{ + i := asynq.NewInspector(asynq.RedisClientOpt{ Addr: viper.GetString("uri"), DB: viper.GetInt("db"), Password: viper.GetString("password"), - })) - switch qtype { - case "s": - err = r.DeleteScheduledTask(id, score) - case "r": - err = r.DeleteRetryTask(id, score) - case "d": - err = r.DeleteDeadTask(id, score) - default: - fmt.Println("invalid argument") - os.Exit(1) - } + }) + err := i.DeleteTaskByKey(args[0]) if err != nil { fmt.Println(err) os.Exit(1) diff --git a/tools/asynq/cmd/delall.go b/tools/asynq/cmd/delall.go index 004d403..33ae9b6 100644 --- a/tools/asynq/cmd/delall.go +++ b/tools/asynq/cmd/delall.go @@ -8,8 +8,7 @@ import ( "fmt" "os" - "github.com/go-redis/redis/v7" - "github.com/hibiken/asynq/internal/rdb" + "github.com/hibiken/asynq" "github.com/spf13/cobra" "github.com/spf13/viper" ) @@ -45,20 +44,22 @@ func init() { } func delall(cmd *cobra.Command, args []string) { - c := redis.NewClient(&redis.Options{ + i := asynq.NewInspector(asynq.RedisClientOpt{ Addr: viper.GetString("uri"), DB: viper.GetInt("db"), Password: viper.GetString("password"), }) - r := rdb.NewRDB(c) - var err error + var ( + n int + err error + ) switch args[0] { case "scheduled": - err = r.DeleteAllScheduledTasks() + n, err = i.DeleteAllScheduledTasks() case "retry": - err = r.DeleteAllRetryTasks() + n, err = i.DeleteAllRetryTasks() case "dead": - err = r.DeleteAllDeadTasks() + n, err = i.DeleteAllDeadTasks() default: fmt.Printf("error: `asynq delall [state]` only accepts %v as the argument.\n", delallValidArgs) os.Exit(1) @@ -67,5 +68,5 @@ func delall(cmd *cobra.Command, args []string) { fmt.Println(err) os.Exit(1) } - fmt.Printf("Deleted all tasks in %q state\n", args[0]) + fmt.Printf("Deleted all %d tasks in %q state\n", n, args[0]) } diff --git a/tools/asynq/cmd/enq.go b/tools/asynq/cmd/enq.go index 9838461..de64a9d 100644 --- a/tools/asynq/cmd/enq.go +++ b/tools/asynq/cmd/enq.go @@ -8,15 +8,14 @@ import ( "fmt" "os" - "github.com/go-redis/redis/v7" - "github.com/hibiken/asynq/internal/rdb" + "github.com/hibiken/asynq" "github.com/spf13/cobra" "github.com/spf13/viper" ) // enqCmd represents the enq command var enqCmd = &cobra.Command{ - Use: "enq [task id]", + Use: "enq [task key]", Short: "Enqueues a task given an identifier", Long: `Enq (asynq enq) will enqueue a task given an identifier. @@ -47,27 +46,12 @@ func init() { } func enq(cmd *cobra.Command, args []string) { - id, score, qtype, err := parseQueryID(args[0]) - if err != nil { - fmt.Println(err) - os.Exit(1) - } - r := rdb.NewRDB(redis.NewClient(&redis.Options{ + i := asynq.NewInspector(asynq.RedisClientOpt{ Addr: viper.GetString("uri"), DB: viper.GetInt("db"), Password: viper.GetString("password"), - })) - switch qtype { - case "s": - err = r.EnqueueScheduledTask(id, score) - case "r": - err = r.EnqueueRetryTask(id, score) - case "d": - err = r.EnqueueDeadTask(id, score) - default: - fmt.Println("invalid argument") - os.Exit(1) - } + }) + err := i.EnqueueTaskByKey(args[0]) if err != nil { fmt.Println(err) os.Exit(1) diff --git a/tools/asynq/cmd/enqall.go b/tools/asynq/cmd/enqall.go index 3e4913a..dd1f4d1 100644 --- a/tools/asynq/cmd/enqall.go +++ b/tools/asynq/cmd/enqall.go @@ -8,8 +8,7 @@ import ( "fmt" "os" - "github.com/go-redis/redis/v7" - "github.com/hibiken/asynq/internal/rdb" + "github.com/hibiken/asynq" "github.com/spf13/cobra" "github.com/spf13/viper" ) @@ -48,21 +47,22 @@ func init() { } func enqall(cmd *cobra.Command, args []string) { - c := redis.NewClient(&redis.Options{ + i := asynq.NewInspector(asynq.RedisClientOpt{ Addr: viper.GetString("uri"), DB: viper.GetInt("db"), Password: viper.GetString("password"), }) - r := rdb.NewRDB(c) - var n int64 - var err error + var ( + n int + err error + ) switch args[0] { case "scheduled": - n, err = r.EnqueueAllScheduledTasks() + n, err = i.EnqueueAllScheduledTasks() case "retry": - n, err = r.EnqueueAllRetryTasks() + n, err = i.EnqueueAllRetryTasks() case "dead": - n, err = r.EnqueueAllDeadTasks() + n, err = i.EnqueueAllDeadTasks() default: fmt.Printf("error: `asynq enqall [state]` only accepts %v as the argument.\n", enqallValidArgs) os.Exit(1) diff --git a/tools/asynq/cmd/history.go b/tools/asynq/cmd/history.go index 764504e..406bbd2 100644 --- a/tools/asynq/cmd/history.go +++ b/tools/asynq/cmd/history.go @@ -10,8 +10,7 @@ import ( "strings" "text/tabwriter" - "github.com/go-redis/redis/v7" - "github.com/hibiken/asynq/internal/rdb" + "github.com/hibiken/asynq" "github.com/spf13/cobra" "github.com/spf13/viper" ) @@ -38,14 +37,13 @@ func init() { } func history(cmd *cobra.Command, args []string) { - c := redis.NewClient(&redis.Options{ + i := asynq.NewInspector(asynq.RedisClientOpt{ Addr: viper.GetString("uri"), DB: viper.GetInt("db"), Password: viper.GetString("password"), }) - r := rdb.NewRDB(c) - stats, err := r.HistoricalStats(days) + stats, err := i.History(days) if err != nil { fmt.Println(err) os.Exit(1) @@ -53,7 +51,7 @@ func history(cmd *cobra.Command, args []string) { printDailyStats(stats) } -func printDailyStats(stats []*rdb.DailyStats) { +func printDailyStats(stats []*asynq.DailyStats) { format := strings.Repeat("%v\t", 4) + "\n" tw := new(tabwriter.Writer).Init(os.Stdout, 0, 8, 2, ' ', 0) fmt.Fprintf(tw, format, "Date (UTC)", "Processed", "Failed", "Error Rate") diff --git a/tools/asynq/cmd/kill.go b/tools/asynq/cmd/kill.go index a2cc438..0ca4a03 100644 --- a/tools/asynq/cmd/kill.go +++ b/tools/asynq/cmd/kill.go @@ -8,15 +8,14 @@ import ( "fmt" "os" - "github.com/go-redis/redis/v7" - "github.com/hibiken/asynq/internal/rdb" + "github.com/hibiken/asynq" "github.com/spf13/cobra" "github.com/spf13/viper" ) // killCmd represents the kill command var killCmd = &cobra.Command{ - Use: "kill [task id]", + Use: "kill [task key]", Short: "Kills a task given an identifier", Long: `Kill (asynq kill) will put a task in dead state given an identifier. @@ -44,25 +43,12 @@ func init() { } func kill(cmd *cobra.Command, args []string) { - id, score, qtype, err := parseQueryID(args[0]) - if err != nil { - fmt.Println(err) - os.Exit(1) - } - r := rdb.NewRDB(redis.NewClient(&redis.Options{ + i := asynq.NewInspector(asynq.RedisClientOpt{ Addr: viper.GetString("uri"), DB: viper.GetInt("db"), Password: viper.GetString("password"), - })) - switch qtype { - case "s": - err = r.KillScheduledTask(id, score) - case "r": - err = r.KillRetryTask(id, score) - default: - fmt.Println("invalid argument") - os.Exit(1) - } + }) + err := i.KillTaskByKey(args[0]) if err != nil { fmt.Println(err) os.Exit(1) diff --git a/tools/asynq/cmd/killall.go b/tools/asynq/cmd/killall.go index ab6bb13..bff4740 100644 --- a/tools/asynq/cmd/killall.go +++ b/tools/asynq/cmd/killall.go @@ -8,8 +8,7 @@ import ( "fmt" "os" - "github.com/go-redis/redis/v7" - "github.com/hibiken/asynq/internal/rdb" + "github.com/hibiken/asynq" "github.com/spf13/cobra" "github.com/spf13/viper" ) @@ -45,19 +44,20 @@ func init() { } func killall(cmd *cobra.Command, args []string) { - c := redis.NewClient(&redis.Options{ + i := asynq.NewInspector(asynq.RedisClientOpt{ Addr: viper.GetString("uri"), DB: viper.GetInt("db"), Password: viper.GetString("password"), }) - r := rdb.NewRDB(c) - var n int64 - var err error + var ( + n int + err error + ) switch args[0] { case "scheduled": - n, err = r.KillAllScheduledTasks() + n, err = i.KillAllScheduledTasks() case "retry": - n, err = r.KillAllRetryTasks() + n, err = i.KillAllRetryTasks() default: fmt.Printf("error: `asynq killall [state]` only accepts %v as the argument.\n", killallValidArgs) os.Exit(1) diff --git a/tools/asynq/cmd/ls.go b/tools/asynq/cmd/ls.go index ad0cfd2..7ec296b 100644 --- a/tools/asynq/cmd/ls.go +++ b/tools/asynq/cmd/ls.go @@ -8,13 +8,10 @@ import ( "fmt" "io" "os" - "strconv" "strings" "time" - "github.com/go-redis/redis/v7" - "github.com/google/uuid" - "github.com/hibiken/asynq/internal/rdb" + "github.com/hibiken/asynq" "github.com/spf13/cobra" "github.com/spf13/viper" ) @@ -62,12 +59,11 @@ func ls(cmd *cobra.Command, args []string) { fmt.Println("page number cannot be negative.") os.Exit(1) } - c := redis.NewClient(&redis.Options{ + i := asynq.NewInspector(asynq.RedisClientOpt{ Addr: viper.GetString("uri"), DB: viper.GetInt("db"), Password: viper.GetString("password"), }) - r := rdb.NewRDB(c) parts := strings.Split(args[0], ":") switch parts[0] { case "enqueued": @@ -75,54 +71,23 @@ func ls(cmd *cobra.Command, args []string) { fmt.Printf("error: Missing queue name\n`asynq ls enqueued:[queue name]`\n") os.Exit(1) } - listEnqueued(r, parts[1]) + listEnqueued(i, parts[1]) case "inprogress": - listInProgress(r) + listInProgress(i) case "scheduled": - listScheduled(r) + listScheduled(i) case "retry": - listRetry(r) + listRetry(i) case "dead": - listDead(r) + listDead(i) default: fmt.Printf("error: `asynq ls [state]`\nonly accepts %v as the argument.\n", lsValidArgs) os.Exit(1) } } -// queryID returns an identifier used for "enq" command. -// score is the zset score and queryType should be one -// of "s", "r" or "d" (scheduled, retry, dead respectively). -func queryID(id uuid.UUID, score int64, qtype string) string { - const format = "%v:%v:%v" - return fmt.Sprintf(format, qtype, score, id) -} - -// parseQueryID is a reverse operation of queryID function. -// It takes a queryID and return each part of id with proper -// type if valid, otherwise it reports an error. -func parseQueryID(queryID string) (id uuid.UUID, score int64, qtype string, err error) { - parts := strings.Split(queryID, ":") - if len(parts) != 3 { - return uuid.Nil, 0, "", fmt.Errorf("invalid id") - } - id, err = uuid.Parse(parts[2]) - if err != nil { - return uuid.Nil, 0, "", fmt.Errorf("invalid id") - } - score, err = strconv.ParseInt(parts[1], 10, 64) - if err != nil { - return uuid.Nil, 0, "", fmt.Errorf("invalid id") - } - qtype = parts[0] - if len(qtype) != 1 || !strings.Contains("srd", qtype) { - return uuid.Nil, 0, "", fmt.Errorf("invalid id") - } - return id, score, qtype, nil -} - -func listEnqueued(r *rdb.RDB, qname string) { - tasks, err := r.ListEnqueued(qname, rdb.Pagination{Size: pageSize, Page: pageNum}) +func listEnqueued(i *asynq.Inspector, qname string) { + tasks, err := i.ListEnqueuedTasks(qname, asynq.PageSize(pageSize), asynq.Page(pageNum)) if err != nil { fmt.Println(err) os.Exit(1) @@ -132,17 +97,16 @@ func listEnqueued(r *rdb.RDB, qname string) { return } cols := []string{"ID", "Type", "Payload", "Queue"} - printRows := func(w io.Writer, tmpl string) { + printTable(cols, func(w io.Writer, tmpl string) { for _, t := range tasks { fmt.Fprintf(w, tmpl, t.ID, t.Type, t.Payload, t.Queue) } - } - printTable(cols, printRows) + }) fmt.Printf("\nShowing %d tasks from page %d\n", len(tasks), pageNum) } -func listInProgress(r *rdb.RDB) { - tasks, err := r.ListInProgress(rdb.Pagination{Size: pageSize, Page: pageNum}) +func listInProgress(i *asynq.Inspector) { + tasks, err := i.ListInProgressTasks(asynq.PageSize(pageSize), asynq.Page(pageNum)) if err != nil { fmt.Println(err) os.Exit(1) @@ -152,17 +116,16 @@ func listInProgress(r *rdb.RDB) { return } cols := []string{"ID", "Type", "Payload"} - printRows := func(w io.Writer, tmpl string) { + printTable(cols, func(w io.Writer, tmpl string) { for _, t := range tasks { fmt.Fprintf(w, tmpl, t.ID, t.Type, t.Payload) } - } - printTable(cols, printRows) + }) fmt.Printf("\nShowing %d tasks from page %d\n", len(tasks), pageNum) } -func listScheduled(r *rdb.RDB) { - tasks, err := r.ListScheduled(rdb.Pagination{Size: pageSize, Page: pageNum}) +func listScheduled(i *asynq.Inspector) { + tasks, err := i.ListScheduledTasks(asynq.PageSize(pageSize), asynq.Page(pageNum)) if err != nil { fmt.Println(err) os.Exit(1) @@ -171,19 +134,19 @@ func listScheduled(r *rdb.RDB) { fmt.Println("No scheduled tasks") return } - cols := []string{"ID", "Type", "Payload", "Process In", "Queue"} - printRows := func(w io.Writer, tmpl string) { + cols := []string{"Key", "Type", "Payload", "Process In", "Queue"} + printTable(cols, func(w io.Writer, tmpl string) { for _, t := range tasks { - processIn := fmt.Sprintf("%.0f seconds", t.ProcessAt.Sub(time.Now()).Seconds()) - fmt.Fprintf(w, tmpl, queryID(t.ID, t.Score, "s"), t.Type, t.Payload, processIn, t.Queue) + processIn := fmt.Sprintf("%.0f seconds", + t.NextEnqueueAt.Sub(time.Now()).Seconds()) + fmt.Fprintf(w, tmpl, t.Key(), t.Type, t.Payload, processIn, t.Queue) } - } - printTable(cols, printRows) + }) fmt.Printf("\nShowing %d tasks from page %d\n", len(tasks), pageNum) } -func listRetry(r *rdb.RDB) { - tasks, err := r.ListRetry(rdb.Pagination{Size: pageSize, Page: pageNum}) +func listRetry(i *asynq.Inspector) { + tasks, err := i.ListRetryTasks(asynq.PageSize(pageSize), asynq.Page(pageNum)) if err != nil { fmt.Println(err) os.Exit(1) @@ -192,24 +155,23 @@ func listRetry(r *rdb.RDB) { fmt.Println("No retry tasks") return } - cols := []string{"ID", "Type", "Payload", "Next Retry", "Last Error", "Retried", "Max Retry", "Queue"} - printRows := func(w io.Writer, tmpl string) { + cols := []string{"Key", "Type", "Payload", "Next Retry", "Last Error", "Retried", "Max Retry", "Queue"} + printTable(cols, func(w io.Writer, tmpl string) { for _, t := range tasks { var nextRetry string - if d := t.ProcessAt.Sub(time.Now()); d > 0 { + if d := t.NextEnqueueAt.Sub(time.Now()); d > 0 { nextRetry = fmt.Sprintf("in %v", d.Round(time.Second)) } else { nextRetry = "right now" } - fmt.Fprintf(w, tmpl, queryID(t.ID, t.Score, "r"), t.Type, t.Payload, nextRetry, t.ErrorMsg, t.Retried, t.Retry, t.Queue) + fmt.Fprintf(w, tmpl, t.Key(), t.Type, t.Payload, nextRetry, t.ErrorMsg, t.Retried, t.MaxRetry, t.Queue) } - } - printTable(cols, printRows) + }) fmt.Printf("\nShowing %d tasks from page %d\n", len(tasks), pageNum) } -func listDead(r *rdb.RDB) { - tasks, err := r.ListDead(rdb.Pagination{Size: pageSize, Page: pageNum}) +func listDead(i *asynq.Inspector) { + tasks, err := i.ListDeadTasks(asynq.PageSize(pageSize), asynq.Page(pageNum)) if err != nil { fmt.Println(err) os.Exit(1) @@ -218,12 +180,11 @@ func listDead(r *rdb.RDB) { fmt.Println("No dead tasks") return } - cols := []string{"ID", "Type", "Payload", "Last Failed", "Last Error", "Queue"} - printRows := func(w io.Writer, tmpl string) { + cols := []string{"Key", "Type", "Payload", "Last Failed", "Last Error", "Queue"} + printTable(cols, func(w io.Writer, tmpl string) { for _, t := range tasks { - fmt.Fprintf(w, tmpl, queryID(t.ID, t.Score, "d"), t.Type, t.Payload, t.LastFailedAt, t.ErrorMsg, t.Queue) + fmt.Fprintf(w, tmpl, t.Key(), t.Type, t.Payload, t.LastFailedAt, t.ErrorMsg, t.Queue) } - } - printTable(cols, printRows) + }) fmt.Printf("\nShowing %d tasks from page %d\n", len(tasks), pageNum) } diff --git a/tools/go.sum b/tools/go.sum index 537848c..dd4c52b 100644 --- a/tools/go.sum +++ b/tools/go.sum @@ -166,6 +166,7 @@ golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=