2
0
mirror of https://github.com/hibiken/asynq.git synced 2024-11-10 11:31:58 +08:00

Rename InProgress to Active

This commit is contained in:
Ken Hibino 2020-09-05 12:43:15 -07:00
parent ebe3c4083f
commit a891ce5568
17 changed files with 304 additions and 305 deletions

View File

@ -37,12 +37,12 @@ type QueueStats struct {
// Name of the queue. // Name of the queue.
Queue string Queue string
// Size is the total number of tasks in the queue. // Size is the total number of tasks in the queue.
// The value is the sum of Pending, InProgress, Scheduled, Retry, and Dead. // The value is the sum of Pending, Active, Scheduled, Retry, and Dead.
Size int Size int
// Number of pending tasks. // Number of pending tasks.
Pending int Pending int
// Number of in-progress tasks. // Number of active tasks.
InProgress int Active int
// Number of scheduled tasks. // Number of scheduled tasks.
Scheduled int Scheduled int
// Number of retry tasks. // Number of retry tasks.
@ -71,17 +71,17 @@ func (i *Inspector) CurrentStats(qname string) (*QueueStats, error) {
return nil, err return nil, err
} }
return &QueueStats{ return &QueueStats{
Queue: stats.Queue, Queue: stats.Queue,
Size: stats.Size, Size: stats.Size,
Pending: stats.Pending, Pending: stats.Pending,
InProgress: stats.InProgress, Active: stats.Active,
Scheduled: stats.Scheduled, Scheduled: stats.Scheduled,
Retry: stats.Retry, Retry: stats.Retry,
Dead: stats.Dead, Dead: stats.Dead,
Processed: stats.Processed, Processed: stats.Processed,
Failed: stats.Failed, Failed: stats.Failed,
Paused: stats.Paused, Paused: stats.Paused,
Timestamp: stats.Timestamp, Timestamp: stats.Timestamp,
}, nil }, nil
} }
@ -126,8 +126,8 @@ type PendingTask struct {
Queue string Queue string
} }
// InProgressTask is a task that's currently being processed. // ActiveTask is a task that's currently being processed.
type InProgressTask struct { type ActiveTask struct {
*Task *Task
ID string ID string
Queue string Queue string
@ -293,22 +293,22 @@ func (i *Inspector) ListPendingTasks(qname string, opts ...ListOption) ([]*Pendi
return tasks, err return tasks, err
} }
// ListInProgressTasks retrieves in-progress tasks from the specified queue. // ListActiveTasks retrieves active tasks from the specified queue.
// //
// By default, it retrieves the first 30 tasks. // By default, it retrieves the first 30 tasks.
func (i *Inspector) ListInProgressTasks(qname string, opts ...ListOption) ([]*InProgressTask, error) { func (i *Inspector) ListActiveTasks(qname string, opts ...ListOption) ([]*ActiveTask, error) {
if err := validateQueueName(qname); err != nil { if err := validateQueueName(qname); err != nil {
return nil, err return nil, err
} }
opt := composeListOptions(opts...) opt := composeListOptions(opts...)
pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1} pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1}
msgs, err := i.rdb.ListInProgress(qname, pgn) msgs, err := i.rdb.ListActive(qname, pgn)
if err != nil { if err != nil {
return nil, err return nil, err
} }
var tasks []*InProgressTask var tasks []*ActiveTask
for _, m := range msgs { for _, m := range msgs {
tasks = append(tasks, &InProgressTask{ tasks = append(tasks, &ActiveTask{
Task: NewTask(m.Type, m.Payload), Task: NewTask(m.Type, m.Payload),
ID: m.ID.String(), ID: m.ID.String(),
Queue: m.Queue, Queue: m.Queue,

View File

@ -114,17 +114,17 @@ func TestInspectorCurrentStats(t *testing.T) {
}, },
qname: "default", qname: "default",
want: &QueueStats{ want: &QueueStats{
Queue: "default", Queue: "default",
Size: 4, Size: 4,
Pending: 1, Pending: 1,
InProgress: 1, Active: 1,
Scheduled: 2, Scheduled: 2,
Retry: 0, Retry: 0,
Dead: 0, Dead: 0,
Processed: 120, Processed: 120,
Failed: 2, Failed: 2,
Paused: false, Paused: false,
Timestamp: now, Timestamp: now,
}, },
}, },
} }
@ -132,7 +132,7 @@ func TestInspectorCurrentStats(t *testing.T) {
for _, tc := range tests { for _, tc := range tests {
asynqtest.FlushDB(t, r) asynqtest.FlushDB(t, r)
asynqtest.SeedAllPendingQueues(t, r, tc.pending) asynqtest.SeedAllPendingQueues(t, r, tc.pending)
asynqtest.SeedAllInProgressQueues(t, r, tc.inProgress) asynqtest.SeedAllActiveQueues(t, r, tc.inProgress)
asynqtest.SeedAllScheduledQueues(t, r, tc.scheduled) asynqtest.SeedAllScheduledQueues(t, r, tc.scheduled)
asynqtest.SeedAllRetryQueues(t, r, tc.retry) asynqtest.SeedAllRetryQueues(t, r, tc.retry)
asynqtest.SeedAllDeadQueues(t, r, tc.dead) asynqtest.SeedAllDeadQueues(t, r, tc.dead)
@ -291,7 +291,7 @@ func TestInspectorListPendingTasks(t *testing.T) {
} }
} }
func TestInspectorListInProgressTasks(t *testing.T) { func TestInspectorListActiveTasks(t *testing.T) {
r := setup(t) r := setup(t)
m1 := asynqtest.NewTaskMessage("task1", nil) m1 := asynqtest.NewTaskMessage("task1", nil)
m2 := asynqtest.NewTaskMessage("task2", nil) m2 := asynqtest.NewTaskMessage("task2", nil)
@ -300,8 +300,8 @@ func TestInspectorListInProgressTasks(t *testing.T) {
inspector := NewInspector(getRedisConnOpt(t)) inspector := NewInspector(getRedisConnOpt(t))
createInProgressTask := func(msg *base.TaskMessage) *InProgressTask { createActiveTask := func(msg *base.TaskMessage) *ActiveTask {
return &InProgressTask{ return &ActiveTask{
Task: NewTask(msg.Type, msg.Payload), Task: NewTask(msg.Type, msg.Payload),
ID: msg.ID.String(), ID: msg.ID.String(),
Queue: msg.Queue, Queue: msg.Queue,
@ -312,34 +312,34 @@ func TestInspectorListInProgressTasks(t *testing.T) {
desc string desc string
inProgress map[string][]*base.TaskMessage inProgress map[string][]*base.TaskMessage
qname string qname string
want []*InProgressTask want []*ActiveTask
}{ }{
{ {
desc: "with a few in-progress tasks", desc: "with a few active tasks",
inProgress: map[string][]*base.TaskMessage{ inProgress: map[string][]*base.TaskMessage{
"default": {m1, m2}, "default": {m1, m2},
"custom": {m3, m4}, "custom": {m3, m4},
}, },
qname: "default", qname: "default",
want: []*InProgressTask{ want: []*ActiveTask{
createInProgressTask(m1), createActiveTask(m1),
createInProgressTask(m2), createActiveTask(m2),
}, },
}, },
} }
for _, tc := range tests { for _, tc := range tests {
asynqtest.FlushDB(t, r) asynqtest.FlushDB(t, r)
asynqtest.SeedAllInProgressQueues(t, r, tc.inProgress) asynqtest.SeedAllActiveQueues(t, r, tc.inProgress)
got, err := inspector.ListInProgressTasks(tc.qname) got, err := inspector.ListActiveTasks(tc.qname)
if err != nil { if err != nil {
t.Errorf("%s; ListInProgressTasks(%q) returned error: %v", tc.qname, tc.desc, err) t.Errorf("%s; ListActiveTasks(%q) returned error: %v", tc.qname, tc.desc, err)
continue continue
} }
ignoreOpt := cmpopts.IgnoreUnexported(Payload{}) ignoreOpt := cmpopts.IgnoreUnexported(Payload{})
if diff := cmp.Diff(tc.want, got, ignoreOpt); diff != "" { if diff := cmp.Diff(tc.want, got, ignoreOpt); diff != "" {
t.Errorf("%s; ListInProgressTask(%q) = %v, want %v; (-want,+got)\n%s", t.Errorf("%s; ListActiveTask(%q) = %v, want %v; (-want,+got)\n%s",
tc.desc, tc.qname, got, tc.want, diff) tc.desc, tc.qname, got, tc.want, diff)
} }
} }

View File

@ -181,11 +181,11 @@ func SeedPendingQueue(tb testing.TB, r redis.UniversalClient, msgs []*base.TaskM
seedRedisList(tb, r, base.QueueKey(qname), msgs) seedRedisList(tb, r, base.QueueKey(qname), msgs)
} }
// SeedInProgressQueue initializes the in-progress queue with the given messages. // SeedActiveQueue initializes the active queue with the given messages.
func SeedInProgressQueue(tb testing.TB, r redis.UniversalClient, msgs []*base.TaskMessage, qname string) { func SeedActiveQueue(tb testing.TB, r redis.UniversalClient, msgs []*base.TaskMessage, qname string) {
tb.Helper() tb.Helper()
r.SAdd(base.AllQueues, qname) r.SAdd(base.AllQueues, qname)
seedRedisList(tb, r, base.InProgressKey(qname), msgs) seedRedisList(tb, r, base.ActiveKey(qname), msgs)
} }
// SeedScheduledQueue initializes the scheduled queue with the given messages. // SeedScheduledQueue initializes the scheduled queue with the given messages.
@ -225,10 +225,10 @@ func SeedAllPendingQueues(tb testing.TB, r redis.UniversalClient, pending map[st
} }
} }
// SeedAllInProgressQueues initializes all of the specified in-progress queues with the given messages. // SeedAllActiveQueues initializes all of the specified active queues with the given messages.
func SeedAllInProgressQueues(tb testing.TB, r redis.UniversalClient, inprogress map[string][]*base.TaskMessage) { func SeedAllActiveQueues(tb testing.TB, r redis.UniversalClient, active map[string][]*base.TaskMessage) {
for q, msgs := range inprogress { for q, msgs := range active {
SeedInProgressQueue(tb, r, msgs, q) SeedActiveQueue(tb, r, msgs, q)
} }
} }
@ -284,10 +284,10 @@ func GetPendingMessages(tb testing.TB, r redis.UniversalClient, qname string) []
return getListMessages(tb, r, base.QueueKey(qname)) return getListMessages(tb, r, base.QueueKey(qname))
} }
// GetInProgressMessages returns all in-progress messages in the given queue. // GetActiveMessages returns all active messages in the given queue.
func GetInProgressMessages(tb testing.TB, r redis.UniversalClient, qname string) []*base.TaskMessage { func GetActiveMessages(tb testing.TB, r redis.UniversalClient, qname string) []*base.TaskMessage {
tb.Helper() tb.Helper()
return getListMessages(tb, r, base.InProgressKey(qname)) return getListMessages(tb, r, base.ActiveKey(qname))
} }
// GetScheduledMessages returns all scheduled task messages in the given queue. // GetScheduledMessages returns all scheduled task messages in the given queue.

View File

@ -40,10 +40,9 @@ func QueueKey(qname string) string {
return fmt.Sprintf("asynq:{%s}", qname) return fmt.Sprintf("asynq:{%s}", qname)
} }
// TODO: Should we rename this to "active"? // ActiveKey returns a redis key for the active tasks.
// InProgressKey returns a redis key for the in-progress tasks. func ActiveKey(qname string) string {
func InProgressKey(qname string) string { return fmt.Sprintf("asynq:{%s}:active", qname)
return fmt.Sprintf("asynq:{%s}:in_progress", qname)
} }
// ScheduledKey returns a redis key for the scheduled tasks. // ScheduledKey returns a redis key for the scheduled tasks.
@ -274,7 +273,7 @@ type WorkerInfo struct {
Started time.Time Started time.Time
} }
// Cancelations is a collection that holds cancel functions for all in-progress tasks. // Cancelations is a collection that holds cancel functions for all active tasks.
// //
// Cancelations are safe for concurrent use by multipel goroutines. // Cancelations are safe for concurrent use by multipel goroutines.
type Cancelations struct { type Cancelations struct {

View File

@ -32,19 +32,19 @@ func TestQueueKey(t *testing.T) {
} }
} }
func TestInProgressKey(t *testing.T) { func TestActiveKey(t *testing.T) {
tests := []struct { tests := []struct {
qname string qname string
want string want string
}{ }{
{"default", "asynq:{default}:in_progress"}, {"default", "asynq:{default}:active"},
{"custom", "asynq:{custom}:in_progress"}, {"custom", "asynq:{custom}:active"},
} }
for _, tc := range tests { for _, tc := range tests {
got := InProgressKey(tc.qname) got := ActiveKey(tc.qname)
if got != tc.want { if got != tc.want {
t.Errorf("InProgressKey(%q) = %q, want %q", tc.qname, got, tc.want) t.Errorf("ActiveKey(%q) = %q, want %q", tc.qname, got, tc.want)
} }
} }
} }

View File

@ -31,11 +31,11 @@ type Stats struct {
// Size is the total number of tasks in the queue. // Size is the total number of tasks in the queue.
Size int Size int
// Number of tasks in each state. // Number of tasks in each state.
Pending int Pending int
InProgress int Active int
Scheduled int Scheduled int
Retry int Retry int
Dead int Dead int
// Total number of tasks processed during the current date. // Total number of tasks processed during the current date.
// The number includes both succeeded and failed tasks. // The number includes both succeeded and failed tasks.
Processed int Processed int
@ -59,7 +59,7 @@ type DailyStats struct {
} }
// KEYS[1] -> asynq:<qname> // KEYS[1] -> asynq:<qname>
// KEYS[2] -> asynq:<qname>:in_progress // KEYS[2] -> asynq:<qname>:active
// KEYS[3] -> asynq:<qname>:scheduled // KEYS[3] -> asynq:<qname>:scheduled
// KEYS[4] -> asynq:<qname>:retry // KEYS[4] -> asynq:<qname>:retry
// KEYS[5] -> asynq:<qname>:dead // KEYS[5] -> asynq:<qname>:dead
@ -108,7 +108,7 @@ func (r *RDB) CurrentStats(qname string) (*Stats, error) {
now := time.Now() now := time.Now()
res, err := currentStatsCmd.Run(r.client, []string{ res, err := currentStatsCmd.Run(r.client, []string{
base.QueueKey(qname), base.QueueKey(qname),
base.InProgressKey(qname), base.ActiveKey(qname),
base.ScheduledKey(qname), base.ScheduledKey(qname),
base.RetryKey(qname), base.RetryKey(qname),
base.DeadKey(qname), base.DeadKey(qname),
@ -135,8 +135,8 @@ func (r *RDB) CurrentStats(qname string) (*Stats, error) {
case base.QueueKey(qname): case base.QueueKey(qname):
stats.Pending = val stats.Pending = val
size += val size += val
case base.InProgressKey(qname): case base.ActiveKey(qname):
stats.InProgress = val stats.Active = val
size += val size += val
case base.ScheduledKey(qname): case base.ScheduledKey(qname):
stats.Scheduled = val stats.Scheduled = val
@ -266,12 +266,12 @@ func (r *RDB) ListPending(qname string, pgn Pagination) ([]*base.TaskMessage, er
return r.listMessages(base.QueueKey(qname), pgn) return r.listMessages(base.QueueKey(qname), pgn)
} }
// ListInProgress returns all tasks that are currently being processed for the given queue. // ListActive returns all tasks that are currently being processed for the given queue.
func (r *RDB) ListInProgress(qname string, pgn Pagination) ([]*base.TaskMessage, error) { func (r *RDB) ListActive(qname string, pgn Pagination) ([]*base.TaskMessage, error) {
if !r.client.SIsMember(base.AllQueues, qname).Val() { if !r.client.SIsMember(base.AllQueues, qname).Val() {
return nil, fmt.Errorf("queue %q does not exist", qname) return nil, fmt.Errorf("queue %q does not exist", qname)
} }
return r.listMessages(base.InProgressKey(qname), pgn) return r.listMessages(base.ActiveKey(qname), pgn)
} }
// listMessages returns a list of TaskMessage in Redis list with the given key. // listMessages returns a list of TaskMessage in Redis list with the given key.
@ -652,17 +652,17 @@ func (e *ErrQueueNotEmpty) Error() string {
return fmt.Sprintf("queue %q is not empty", e.qname) return fmt.Sprintf("queue %q is not empty", e.qname)
} }
// Only check whether in-progress queue is empty before removing. // Only check whether active queue is empty before removing.
// KEYS[1] -> asynq:{<qname>} // KEYS[1] -> asynq:{<qname>}
// KEYS[2] -> asynq:{<qname>}:in_progress // KEYS[2] -> asynq:{<qname>}:active
// KEYS[3] -> asynq:{<qname>}:scheduled // KEYS[3] -> asynq:{<qname>}:scheduled
// KEYS[4] -> asynq:{<qname>}:retry // KEYS[4] -> asynq:{<qname>}:retry
// KEYS[5] -> asynq:{<qname>}:dead // KEYS[5] -> asynq:{<qname>}:dead
// KEYS[6] -> asynq:{<qname>}:deadlines // KEYS[6] -> asynq:{<qname>}:deadlines
var removeQueueForceCmd = redis.NewScript(` var removeQueueForceCmd = redis.NewScript(`
local inprogress = redis.call("LLEN", KEYS[2]) local active = redis.call("LLEN", KEYS[2])
if inprogress > 0 then if active > 0 then
return redis.error_reply("Queue has tasks in-progress") return redis.error_reply("Queue has tasks active")
end end
redis.call("DEL", KEYS[1]) redis.call("DEL", KEYS[1])
redis.call("DEL", KEYS[2]) redis.call("DEL", KEYS[2])
@ -674,18 +674,18 @@ return redis.status_reply("OK")`)
// Checks whether queue is empty before removing. // Checks whether queue is empty before removing.
// KEYS[1] -> asynq:{<qname>} // KEYS[1] -> asynq:{<qname>}
// KEYS[2] -> asynq:{<qname>}:in_progress // KEYS[2] -> asynq:{<qname>}:active
// KEYS[3] -> asynq:{<qname>}:scheduled // KEYS[3] -> asynq:{<qname>}:scheduled
// KEYS[4] -> asynq:{<qname>}:retry // KEYS[4] -> asynq:{<qname>}:retry
// KEYS[5] -> asynq:{<qname>}:dead // KEYS[5] -> asynq:{<qname>}:dead
// KEYS[6] -> asynq:{<qname>}:deadlines // KEYS[6] -> asynq:{<qname>}:deadlines
var removeQueueCmd = redis.NewScript(` var removeQueueCmd = redis.NewScript(`
local pending = redis.call("LLEN", KEYS[1]) local pending = redis.call("LLEN", KEYS[1])
local inprogress = redis.call("LLEN", KEYS[2]) local active = redis.call("LLEN", KEYS[2])
local scheduled = redis.call("SCARD", KEYS[3]) local scheduled = redis.call("SCARD", KEYS[3])
local retry = redis.call("SCARD", KEYS[4]) local retry = redis.call("SCARD", KEYS[4])
local dead = redis.call("SCARD", KEYS[5]) local dead = redis.call("SCARD", KEYS[5])
local total = pending + inprogress + scheduled + retry + dead local total = pending + active + scheduled + retry + dead
if total > 0 then if total > 0 then
return redis.error_reply("QUEUE NOT EMPTY") return redis.error_reply("QUEUE NOT EMPTY")
end end
@ -700,7 +700,7 @@ return redis.status_reply("OK")`)
// RemoveQueue removes the specified queue. // RemoveQueue removes the specified queue.
// //
// If force is set to true, it will remove the queue regardless // If force is set to true, it will remove the queue regardless
// as long as no tasks are in-progress for the queue. // as long as no tasks are active for the queue.
// If force is set to false, it will only remove the queue if // If force is set to false, it will only remove the queue if
// the queue is empty. // the queue is empty.
func (r *RDB) RemoveQueue(qname string, force bool) error { func (r *RDB) RemoveQueue(qname string, force bool) error {
@ -719,7 +719,7 @@ func (r *RDB) RemoveQueue(qname string, force bool) error {
} }
keys := []string{ keys := []string{
base.QueueKey(qname), base.QueueKey(qname),
base.InProgressKey(qname), base.ActiveKey(qname),
base.ScheduledKey(qname), base.ScheduledKey(qname),
base.RetryKey(qname), base.RetryKey(qname),
base.DeadKey(qname), base.DeadKey(qname),

View File

@ -110,17 +110,17 @@ func TestCurrentStats(t *testing.T) {
paused: []string{}, paused: []string{},
qname: "default", qname: "default",
want: &Stats{ want: &Stats{
Queue: "default", Queue: "default",
Paused: false, Paused: false,
Size: 4, Size: 4,
Pending: 1, Pending: 1,
InProgress: 1, Active: 1,
Scheduled: 2, Scheduled: 2,
Retry: 0, Retry: 0,
Dead: 0, Dead: 0,
Processed: 120, Processed: 120,
Failed: 2, Failed: 2,
Timestamp: now, Timestamp: now,
}, },
}, },
{ {
@ -165,17 +165,17 @@ func TestCurrentStats(t *testing.T) {
paused: []string{"critical", "low"}, paused: []string{"critical", "low"},
qname: "critical", qname: "critical",
want: &Stats{ want: &Stats{
Queue: "critical", Queue: "critical",
Paused: true, Paused: true,
Size: 1, Size: 1,
Pending: 1, Pending: 1,
InProgress: 0, Active: 0,
Scheduled: 0, Scheduled: 0,
Retry: 0, Retry: 0,
Dead: 0, Dead: 0,
Processed: 100, Processed: 100,
Failed: 0, Failed: 0,
Timestamp: now, Timestamp: now,
}, },
}, },
} }
@ -188,7 +188,7 @@ func TestCurrentStats(t *testing.T) {
} }
} }
h.SeedAllPendingQueues(t, r.client, tc.pending) h.SeedAllPendingQueues(t, r.client, tc.pending)
h.SeedAllInProgressQueues(t, r.client, tc.inProgress) h.SeedAllActiveQueues(t, r.client, tc.inProgress)
h.SeedAllScheduledQueues(t, r.client, tc.scheduled) h.SeedAllScheduledQueues(t, r.client, tc.scheduled)
h.SeedAllRetryQueues(t, r.client, tc.retry) h.SeedAllRetryQueues(t, r.client, tc.retry)
h.SeedAllDeadQueues(t, r.client, tc.dead) h.SeedAllDeadQueues(t, r.client, tc.dead)
@ -433,7 +433,7 @@ func TestListPendingPagination(t *testing.T) {
} }
} }
func TestListInProgress(t *testing.T) { func TestListActive(t *testing.T) {
r := setup(t) r := setup(t)
m1 := h.NewTaskMessage("task1", nil) m1 := h.NewTaskMessage("task1", nil)
@ -466,10 +466,10 @@ func TestListInProgress(t *testing.T) {
for _, tc := range tests { for _, tc := range tests {
h.FlushDB(t, r.client) // clean up db before each test case h.FlushDB(t, r.client) // clean up db before each test case
h.SeedAllInProgressQueues(t, r.client, tc.inProgress) h.SeedAllActiveQueues(t, r.client, tc.inProgress)
got, err := r.ListInProgress(tc.qname, Pagination{Size: 20, Page: 0}) got, err := r.ListActive(tc.qname, Pagination{Size: 20, Page: 0})
op := fmt.Sprintf("r.ListInProgress(%q, Pagination{Size: 20, Page: 0})", tc.qname) op := fmt.Sprintf("r.ListActive(%q, Pagination{Size: 20, Page: 0})", tc.qname)
if err != nil { if err != nil {
t.Errorf("%s = %v, %v, want %v, nil", op, got, err, tc.inProgress) t.Errorf("%s = %v, %v, want %v, nil", op, got, err, tc.inProgress)
continue continue
@ -481,14 +481,14 @@ func TestListInProgress(t *testing.T) {
} }
} }
func TestListInProgressPagination(t *testing.T) { func TestListActivePagination(t *testing.T) {
r := setup(t) r := setup(t)
var msgs []*base.TaskMessage var msgs []*base.TaskMessage
for i := 0; i < 100; i++ { for i := 0; i < 100; i++ {
msg := h.NewTaskMessage(fmt.Sprintf("task %d", i), nil) msg := h.NewTaskMessage(fmt.Sprintf("task %d", i), nil)
msgs = append(msgs, msg) msgs = append(msgs, msg)
} }
h.SeedInProgressQueue(t, r.client, msgs, "default") h.SeedActiveQueue(t, r.client, msgs, "default")
tests := []struct { tests := []struct {
desc string desc string
@ -507,8 +507,8 @@ func TestListInProgressPagination(t *testing.T) {
} }
for _, tc := range tests { for _, tc := range tests {
got, err := r.ListInProgress(tc.qname, Pagination{Size: tc.size, Page: tc.page}) got, err := r.ListActive(tc.qname, Pagination{Size: tc.size, Page: tc.page})
op := fmt.Sprintf("r.ListInProgress(%q, Pagination{Size: %d, Page: %d})", tc.qname, tc.size, tc.page) op := fmt.Sprintf("r.ListActive(%q, Pagination{Size: %d, Page: %d})", tc.qname, tc.size, tc.page)
if err != nil { if err != nil {
t.Errorf("%s; %s returned error %v", tc.desc, op, err) t.Errorf("%s; %s returned error %v", tc.desc, op, err)
continue continue
@ -2669,7 +2669,7 @@ func TestRemoveQueue(t *testing.T) {
for _, tc := range tests { for _, tc := range tests {
h.FlushDB(t, r.client) h.FlushDB(t, r.client)
h.SeedAllPendingQueues(t, r.client, tc.pending) h.SeedAllPendingQueues(t, r.client, tc.pending)
h.SeedAllInProgressQueues(t, r.client, tc.inProgress) h.SeedAllActiveQueues(t, r.client, tc.inProgress)
h.SeedAllScheduledQueues(t, r.client, tc.scheduled) h.SeedAllScheduledQueues(t, r.client, tc.scheduled)
h.SeedAllRetryQueues(t, r.client, tc.retry) h.SeedAllRetryQueues(t, r.client, tc.retry)
h.SeedAllDeadQueues(t, r.client, tc.dead) h.SeedAllDeadQueues(t, r.client, tc.dead)
@ -2686,7 +2686,7 @@ func TestRemoveQueue(t *testing.T) {
keys := []string{ keys := []string{
base.QueueKey(tc.qname), base.QueueKey(tc.qname),
base.InProgressKey(tc.qname), base.ActiveKey(tc.qname),
base.DeadlinesKey(tc.qname), base.DeadlinesKey(tc.qname),
base.ScheduledKey(tc.qname), base.ScheduledKey(tc.qname),
base.RetryKey(tc.qname), base.RetryKey(tc.qname),
@ -2768,7 +2768,7 @@ func TestRemoveQueueError(t *testing.T) {
force: false, force: false,
}, },
{ {
desc: "force removing queue with tasks in-progress", desc: "force removing queue with active tasks",
pending: map[string][]*base.TaskMessage{ pending: map[string][]*base.TaskMessage{
"default": {m1, m2}, "default": {m1, m2},
"custom": {m3}, "custom": {m3},
@ -2790,7 +2790,7 @@ func TestRemoveQueueError(t *testing.T) {
"custom": {}, "custom": {},
}, },
qname: "custom", qname: "custom",
// Even with force=true, it should error if there are tasks in-progress. // Even with force=true, it should error if there are active tasks.
force: true, force: true,
}, },
} }
@ -2798,7 +2798,7 @@ func TestRemoveQueueError(t *testing.T) {
for _, tc := range tests { for _, tc := range tests {
h.FlushDB(t, r.client) h.FlushDB(t, r.client)
h.SeedAllPendingQueues(t, r.client, tc.pending) h.SeedAllPendingQueues(t, r.client, tc.pending)
h.SeedAllInProgressQueues(t, r.client, tc.inProgress) h.SeedAllActiveQueues(t, r.client, tc.inProgress)
h.SeedAllScheduledQueues(t, r.client, tc.scheduled) h.SeedAllScheduledQueues(t, r.client, tc.scheduled)
h.SeedAllRetryQueues(t, r.client, tc.retry) h.SeedAllRetryQueues(t, r.client, tc.retry)
h.SeedAllDeadQueues(t, r.client, tc.dead) h.SeedAllDeadQueues(t, r.client, tc.dead)
@ -2817,9 +2817,9 @@ func TestRemoveQueueError(t *testing.T) {
} }
} }
for qname, want := range tc.inProgress { for qname, want := range tc.inProgress {
gotInProgress := h.GetInProgressMessages(t, r.client, qname) gotActive := h.GetActiveMessages(t, r.client, qname)
if diff := cmp.Diff(want, gotInProgress, h.SortMsgOpt); diff != "" { if diff := cmp.Diff(want, gotActive, h.SortMsgOpt); diff != "" {
t.Errorf("%s;mismatch found in %q; (-want,+got):\n%s", tc.desc, base.InProgressKey(qname), diff) t.Errorf("%s;mismatch found in %q; (-want,+got):\n%s", tc.desc, base.ActiveKey(qname), diff)
} }
} }
for qname, want := range tc.scheduled { for qname, want := range tc.scheduled {

View File

@ -120,7 +120,7 @@ func (r *RDB) Dequeue(qnames ...string) (msg *base.TaskMessage, deadline time.Ti
// KEYS[1] -> asynq:{<qname>} // KEYS[1] -> asynq:{<qname>}
// KEYS[2] -> asynq:{<qname>}:paused // KEYS[2] -> asynq:{<qname>}:paused
// KEYS[3] -> asynq:{<qname>}:in_progress // KEYS[3] -> asynq:{<qname>}:active
// KEYS[4] -> asynq:{<qname>}:deadlines // KEYS[4] -> asynq:{<qname>}:deadlines
// ARGV[1] -> current time in Unix time // ARGV[1] -> current time in Unix time
// //
@ -156,7 +156,7 @@ func (r *RDB) dequeue(qnames ...string) (msgjson string, deadline int64, err err
keys := []string{ keys := []string{
base.QueueKey(qname), base.QueueKey(qname),
base.PausedKey(qname), base.PausedKey(qname),
base.InProgressKey(qname), base.ActiveKey(qname),
base.DeadlinesKey(qname), base.DeadlinesKey(qname),
} }
res, err := dequeueCmd.Run(r.client, keys, time.Now().Unix()).Result() res, err := dequeueCmd.Run(r.client, keys, time.Now().Unix()).Result()
@ -183,7 +183,7 @@ func (r *RDB) dequeue(qnames ...string) (msgjson string, deadline int64, err err
return "", 0, ErrNoProcessableTask return "", 0, ErrNoProcessableTask
} }
// KEYS[1] -> asynq:{<qname>}:in_progress // KEYS[1] -> asynq:{<qname>}:active
// KEYS[2] -> asynq:{<qname>}:deadlines // KEYS[2] -> asynq:{<qname>}:deadlines
// KEYS[3] -> asynq:{<qname>}:processed:<yyyy-mm-dd> // KEYS[3] -> asynq:{<qname>}:processed:<yyyy-mm-dd>
// ARGV[1] -> base.TaskMessage value // ARGV[1] -> base.TaskMessage value
@ -202,7 +202,7 @@ end
return redis.status_reply("OK") return redis.status_reply("OK")
`) `)
// KEYS[1] -> asynq:{<qname>}:in_progress // KEYS[1] -> asynq:{<qname>}:active
// KEYS[2] -> asynq:{<qname>}:deadlines // KEYS[2] -> asynq:{<qname>}:deadlines
// KEYS[3] -> asynq:{<qname>}:processed:<yyyy-mm-dd> // KEYS[3] -> asynq:{<qname>}:processed:<yyyy-mm-dd>
// KEYS[4] -> unique key // KEYS[4] -> unique key
@ -226,7 +226,7 @@ end
return redis.status_reply("OK") return redis.status_reply("OK")
`) `)
// Done removes the task from in-progress queue to mark the task as done. // Done removes the task from active queue to mark the task as done.
// It removes a uniqueness lock acquired by the task, if any. // It removes a uniqueness lock acquired by the task, if any.
func (r *RDB) Done(msg *base.TaskMessage) error { func (r *RDB) Done(msg *base.TaskMessage) error {
encoded, err := base.EncodeMessage(msg) encoded, err := base.EncodeMessage(msg)
@ -236,7 +236,7 @@ func (r *RDB) Done(msg *base.TaskMessage) error {
now := time.Now() now := time.Now()
expireAt := now.Add(statsTTL) expireAt := now.Add(statsTTL)
keys := []string{ keys := []string{
base.InProgressKey(msg.Queue), base.ActiveKey(msg.Queue),
base.DeadlinesKey(msg.Queue), base.DeadlinesKey(msg.Queue),
base.ProcessedKey(msg.Queue, now), base.ProcessedKey(msg.Queue, now),
} }
@ -249,7 +249,7 @@ func (r *RDB) Done(msg *base.TaskMessage) error {
return doneCmd.Run(r.client, keys, args...).Err() return doneCmd.Run(r.client, keys, args...).Err()
} }
// KEYS[1] -> asynq:{<qname>}:in_progress // KEYS[1] -> asynq:{<qname>}:active
// KEYS[2] -> asynq:{<qname>}:deadlines // KEYS[2] -> asynq:{<qname>}:deadlines
// KEYS[3] -> asynq:{<qname>} // KEYS[3] -> asynq:{<qname>}
// ARGV[1] -> base.TaskMessage value // ARGV[1] -> base.TaskMessage value
@ -264,14 +264,14 @@ end
redis.call("RPUSH", KEYS[3], ARGV[1]) redis.call("RPUSH", KEYS[3], ARGV[1])
return redis.status_reply("OK")`) return redis.status_reply("OK")`)
// Requeue moves the task from in-progress queue to the specified queue. // Requeue moves the task from active queue to the specified queue.
func (r *RDB) Requeue(msg *base.TaskMessage) error { func (r *RDB) Requeue(msg *base.TaskMessage) error {
encoded, err := base.EncodeMessage(msg) encoded, err := base.EncodeMessage(msg)
if err != nil { if err != nil {
return err return err
} }
return requeueCmd.Run(r.client, return requeueCmd.Run(r.client,
[]string{base.InProgressKey(msg.Queue), base.DeadlinesKey(msg.Queue), base.QueueKey(msg.Queue)}, []string{base.ActiveKey(msg.Queue), base.DeadlinesKey(msg.Queue), base.QueueKey(msg.Queue)},
encoded).Err() encoded).Err()
} }
@ -330,12 +330,12 @@ func (r *RDB) ScheduleUnique(msg *base.TaskMessage, processAt time.Time, ttl tim
return nil return nil
} }
// KEYS[1] -> asynq:{<qname>}:in_progress // KEYS[1] -> asynq:{<qname>}:active
// KEYS[2] -> asynq:{<qname>}:deadlines // KEYS[2] -> asynq:{<qname>}:deadlines
// KEYS[3] -> asynq:{<qname>}:retry // KEYS[3] -> asynq:{<qname>}:retry
// KEYS[4] -> asynq:{<qname>}:processed:<yyyy-mm-dd> // KEYS[4] -> asynq:{<qname>}:processed:<yyyy-mm-dd>
// KEYS[5] -> asynq:{<qname>}:failed:<yyyy-mm-dd> // KEYS[5] -> asynq:{<qname>}:failed:<yyyy-mm-dd>
// ARGV[1] -> base.TaskMessage value to remove from base.InProgressQueue queue // ARGV[1] -> base.TaskMessage value to remove from base.ActiveQueue queue
// ARGV[2] -> base.TaskMessage value to add to Retry queue // ARGV[2] -> base.TaskMessage value to add to Retry queue
// ARGV[3] -> retry_at UNIX timestamp // ARGV[3] -> retry_at UNIX timestamp
// ARGV[4] -> stats expiration timestamp // ARGV[4] -> stats expiration timestamp
@ -357,7 +357,7 @@ if tonumber(m) == 1 then
end end
return redis.status_reply("OK")`) return redis.status_reply("OK")`)
// Retry moves the task from in-progress to retry queue, incrementing retry count // Retry moves the task from active to retry queue, incrementing retry count
// and assigning error message to the task message. // and assigning error message to the task message.
func (r *RDB) Retry(msg *base.TaskMessage, processAt time.Time, errMsg string) error { func (r *RDB) Retry(msg *base.TaskMessage, processAt time.Time, errMsg string) error {
msgToRemove, err := base.EncodeMessage(msg) msgToRemove, err := base.EncodeMessage(msg)
@ -376,7 +376,7 @@ func (r *RDB) Retry(msg *base.TaskMessage, processAt time.Time, errMsg string) e
failedKey := base.FailedKey(msg.Queue, now) failedKey := base.FailedKey(msg.Queue, now)
expireAt := now.Add(statsTTL) expireAt := now.Add(statsTTL)
return retryCmd.Run(r.client, return retryCmd.Run(r.client,
[]string{base.InProgressKey(msg.Queue), base.DeadlinesKey(msg.Queue), base.RetryKey(msg.Queue), processedKey, failedKey}, []string{base.ActiveKey(msg.Queue), base.DeadlinesKey(msg.Queue), base.RetryKey(msg.Queue), processedKey, failedKey},
msgToRemove, msgToAdd, processAt.Unix(), expireAt.Unix()).Err() msgToRemove, msgToAdd, processAt.Unix(), expireAt.Unix()).Err()
} }
@ -385,12 +385,12 @@ const (
deadExpirationInDays = 90 deadExpirationInDays = 90
) )
// KEYS[1] -> asynq:{<qname>}:in_progress // KEYS[1] -> asynq:{<qname>}:active
// KEYS[2] -> asynq:{<qname>}:deadlines // KEYS[2] -> asynq:{<qname>}:deadlines
// KEYS[3] -> asynq:{<qname>}:dead // KEYS[3] -> asynq:{<qname>}:dead
// KEYS[4] -> asynq:{<qname>}:processed:<yyyy-mm-dd> // KEYS[4] -> asynq:{<qname>}:processed:<yyyy-mm-dd>
// KEYS[5] -> asynq:{<qname>}:failed:<yyyy-mm-dd> // KEYS[5] -> asynq:{<qname>}:failed:<yyyy-mm-dd>
// ARGV[1] -> base.TaskMessage value to remove from base.InProgressQueue queue // ARGV[1] -> base.TaskMessage value to remove from base.ActiveQueue queue
// ARGV[2] -> base.TaskMessage value to add to Dead queue // ARGV[2] -> base.TaskMessage value to add to Dead queue
// ARGV[3] -> died_at UNIX timestamp // ARGV[3] -> died_at UNIX timestamp
// ARGV[4] -> cutoff timestamp (e.g., 90 days ago) // ARGV[4] -> cutoff timestamp (e.g., 90 days ago)
@ -416,7 +416,7 @@ if tonumber(m) == 1 then
end end
return redis.status_reply("OK")`) return redis.status_reply("OK")`)
// Kill sends the task to "dead" queue from in-progress queue, assigning // Kill sends the task to "dead" queue from active queue, assigning
// the error message to the task. // the error message to the task.
// It also trims the set by timestamp and set size. // It also trims the set by timestamp and set size.
func (r *RDB) Kill(msg *base.TaskMessage, errMsg string) error { func (r *RDB) Kill(msg *base.TaskMessage, errMsg string) error {
@ -436,7 +436,7 @@ func (r *RDB) Kill(msg *base.TaskMessage, errMsg string) error {
failedKey := base.FailedKey(msg.Queue, now) failedKey := base.FailedKey(msg.Queue, now)
expireAt := now.Add(statsTTL) expireAt := now.Add(statsTTL)
return killCmd.Run(r.client, return killCmd.Run(r.client,
[]string{base.InProgressKey(msg.Queue), base.DeadlinesKey(msg.Queue), base.DeadKey(msg.Queue), processedKey, failedKey}, []string{base.ActiveKey(msg.Queue), base.DeadlinesKey(msg.Queue), base.DeadKey(msg.Queue), processedKey, failedKey},
msgToRemove, msgToAdd, now.Unix(), limit, maxDeadTasks, expireAt.Unix()).Err() msgToRemove, msgToAdd, now.Unix(), limit, maxDeadTasks, expireAt.Unix()).Err()
} }

View File

@ -167,14 +167,14 @@ func TestDequeue(t *testing.T) {
t3Deadline := now.Unix() + t3.Timeout // use whichever is earliest t3Deadline := now.Unix() + t3.Timeout // use whichever is earliest
tests := []struct { tests := []struct {
pending map[string][]*base.TaskMessage pending map[string][]*base.TaskMessage
args []string // list of queues to query args []string // list of queues to query
wantMsg *base.TaskMessage wantMsg *base.TaskMessage
wantDeadline time.Time wantDeadline time.Time
err error err error
wantPending map[string][]*base.TaskMessage wantPending map[string][]*base.TaskMessage
wantInProgress map[string][]*base.TaskMessage wantActive map[string][]*base.TaskMessage
wantDeadlines map[string][]base.Z wantDeadlines map[string][]base.Z
}{ }{
{ {
pending: map[string][]*base.TaskMessage{ pending: map[string][]*base.TaskMessage{
@ -187,7 +187,7 @@ func TestDequeue(t *testing.T) {
wantPending: map[string][]*base.TaskMessage{ wantPending: map[string][]*base.TaskMessage{
"default": {}, "default": {},
}, },
wantInProgress: map[string][]*base.TaskMessage{ wantActive: map[string][]*base.TaskMessage{
"default": {t1}, "default": {t1},
}, },
wantDeadlines: map[string][]base.Z{ wantDeadlines: map[string][]base.Z{
@ -205,7 +205,7 @@ func TestDequeue(t *testing.T) {
wantPending: map[string][]*base.TaskMessage{ wantPending: map[string][]*base.TaskMessage{
"default": {}, "default": {},
}, },
wantInProgress: map[string][]*base.TaskMessage{ wantActive: map[string][]*base.TaskMessage{
"default": {}, "default": {},
}, },
wantDeadlines: map[string][]base.Z{ wantDeadlines: map[string][]base.Z{
@ -227,7 +227,7 @@ func TestDequeue(t *testing.T) {
"critical": {}, "critical": {},
"low": {t3}, "low": {t3},
}, },
wantInProgress: map[string][]*base.TaskMessage{ wantActive: map[string][]*base.TaskMessage{
"default": {}, "default": {},
"critical": {t2}, "critical": {t2},
"low": {}, "low": {},
@ -253,7 +253,7 @@ func TestDequeue(t *testing.T) {
"critical": {}, "critical": {},
"low": {t2, t1}, "low": {t2, t1},
}, },
wantInProgress: map[string][]*base.TaskMessage{ wantActive: map[string][]*base.TaskMessage{
"default": {t3}, "default": {t3},
"critical": {}, "critical": {},
"low": {}, "low": {},
@ -279,7 +279,7 @@ func TestDequeue(t *testing.T) {
"critical": {}, "critical": {},
"low": {}, "low": {},
}, },
wantInProgress: map[string][]*base.TaskMessage{ wantActive: map[string][]*base.TaskMessage{
"default": {}, "default": {},
"critical": {}, "critical": {},
"low": {}, "low": {},
@ -319,10 +319,10 @@ func TestDequeue(t *testing.T) {
t.Errorf("mismatch found in %q: (-want,+got):\n%s", base.QueueKey(queue), diff) t.Errorf("mismatch found in %q: (-want,+got):\n%s", base.QueueKey(queue), diff)
} }
} }
for queue, want := range tc.wantInProgress { for queue, want := range tc.wantActive {
gotInProgress := h.GetInProgressMessages(t, r.client, queue) gotActive := h.GetActiveMessages(t, r.client, queue)
if diff := cmp.Diff(want, gotInProgress, h.SortMsgOpt); diff != "" { if diff := cmp.Diff(want, gotActive, h.SortMsgOpt); diff != "" {
t.Errorf("mismatch found in %q: (-want,+got):\n%s", base.InProgressKey(queue), diff) t.Errorf("mismatch found in %q: (-want,+got):\n%s", base.ActiveKey(queue), diff)
} }
} }
for queue, want := range tc.wantDeadlines { for queue, want := range tc.wantDeadlines {
@ -354,13 +354,13 @@ func TestDequeueIgnoresPausedQueues(t *testing.T) {
} }
tests := []struct { tests := []struct {
paused []string // list of paused queues paused []string // list of paused queues
pending map[string][]*base.TaskMessage pending map[string][]*base.TaskMessage
args []string // list of queues to query args []string // list of queues to query
wantMsg *base.TaskMessage wantMsg *base.TaskMessage
err error err error
wantPending map[string][]*base.TaskMessage wantPending map[string][]*base.TaskMessage
wantInProgress map[string][]*base.TaskMessage wantActive map[string][]*base.TaskMessage
}{ }{
{ {
paused: []string{"default"}, paused: []string{"default"},
@ -375,7 +375,7 @@ func TestDequeueIgnoresPausedQueues(t *testing.T) {
"default": {t1}, "default": {t1},
"critical": {}, "critical": {},
}, },
wantInProgress: map[string][]*base.TaskMessage{ wantActive: map[string][]*base.TaskMessage{
"default": {}, "default": {},
"critical": {t2}, "critical": {t2},
}, },
@ -391,7 +391,7 @@ func TestDequeueIgnoresPausedQueues(t *testing.T) {
wantPending: map[string][]*base.TaskMessage{ wantPending: map[string][]*base.TaskMessage{
"default": {t1}, "default": {t1},
}, },
wantInProgress: map[string][]*base.TaskMessage{ wantActive: map[string][]*base.TaskMessage{
"default": {}, "default": {},
}, },
}, },
@ -408,7 +408,7 @@ func TestDequeueIgnoresPausedQueues(t *testing.T) {
"default": {t1}, "default": {t1},
"critical": {t2}, "critical": {t2},
}, },
wantInProgress: map[string][]*base.TaskMessage{ wantActive: map[string][]*base.TaskMessage{
"default": {}, "default": {},
"critical": {}, "critical": {},
}, },
@ -437,10 +437,10 @@ func TestDequeueIgnoresPausedQueues(t *testing.T) {
t.Errorf("mismatch found in %q: (-want,+got):\n%s", base.QueueKey(queue), diff) t.Errorf("mismatch found in %q: (-want,+got):\n%s", base.QueueKey(queue), diff)
} }
} }
for queue, want := range tc.wantInProgress { for queue, want := range tc.wantActive {
gotInProgress := h.GetInProgressMessages(t, r.client, queue) gotActive := h.GetActiveMessages(t, r.client, queue)
if diff := cmp.Diff(want, gotInProgress, h.SortMsgOpt); diff != "" { if diff := cmp.Diff(want, gotActive, h.SortMsgOpt); diff != "" {
t.Errorf("mismatch found in %q: (-want,+got):\n%s", base.InProgressKey(queue), diff) t.Errorf("mismatch found in %q: (-want,+got):\n%s", base.ActiveKey(queue), diff)
} }
} }
} }
@ -479,12 +479,12 @@ func TestDone(t *testing.T) {
t3Deadline := now.Unix() + t3.Deadline t3Deadline := now.Unix() + t3.Deadline
tests := []struct { tests := []struct {
desc string desc string
inProgress map[string][]*base.TaskMessage // initial state of the in-progress list inProgress map[string][]*base.TaskMessage // initial state of the active list
deadlines map[string][]base.Z // initial state of deadlines set deadlines map[string][]base.Z // initial state of deadlines set
target *base.TaskMessage // task to remove target *base.TaskMessage // task to remove
wantInProgress map[string][]*base.TaskMessage // final state of the in-progress list wantActive map[string][]*base.TaskMessage // final state of the active list
wantDeadlines map[string][]base.Z // final state of the deadline set wantDeadlines map[string][]base.Z // final state of the deadline set
}{ }{
{ {
desc: "removes message from the correct queue", desc: "removes message from the correct queue",
@ -497,7 +497,7 @@ func TestDone(t *testing.T) {
"custom": {{Message: t2, Score: t2Deadline}}, "custom": {{Message: t2, Score: t2Deadline}},
}, },
target: t1, target: t1,
wantInProgress: map[string][]*base.TaskMessage{ wantActive: map[string][]*base.TaskMessage{
"default": {}, "default": {},
"custom": {t2}, "custom": {t2},
}, },
@ -515,7 +515,7 @@ func TestDone(t *testing.T) {
"default": {{Message: t1, Score: t1Deadline}}, "default": {{Message: t1, Score: t1Deadline}},
}, },
target: t1, target: t1,
wantInProgress: map[string][]*base.TaskMessage{ wantActive: map[string][]*base.TaskMessage{
"default": {}, "default": {},
}, },
wantDeadlines: map[string][]base.Z{ wantDeadlines: map[string][]base.Z{
@ -533,7 +533,7 @@ func TestDone(t *testing.T) {
"custom": {{Message: t2, Score: t2Deadline}}, "custom": {{Message: t2, Score: t2Deadline}},
}, },
target: t3, target: t3,
wantInProgress: map[string][]*base.TaskMessage{ wantActive: map[string][]*base.TaskMessage{
"default": {t1}, "default": {t1},
"custom": {t2}, "custom": {t2},
}, },
@ -547,7 +547,7 @@ func TestDone(t *testing.T) {
for _, tc := range tests { for _, tc := range tests {
h.FlushDB(t, r.client) // clean up db before each test case h.FlushDB(t, r.client) // clean up db before each test case
h.SeedAllDeadlines(t, r.client, tc.deadlines) h.SeedAllDeadlines(t, r.client, tc.deadlines)
h.SeedAllInProgressQueues(t, r.client, tc.inProgress) h.SeedAllActiveQueues(t, r.client, tc.inProgress)
for _, msgs := range tc.inProgress { for _, msgs := range tc.inProgress {
for _, msg := range msgs { for _, msg := range msgs {
// Set uniqueness lock if unique key is present. // Set uniqueness lock if unique key is present.
@ -566,10 +566,10 @@ func TestDone(t *testing.T) {
continue continue
} }
for queue, want := range tc.wantInProgress { for queue, want := range tc.wantActive {
gotInProgress := h.GetInProgressMessages(t, r.client, queue) gotActive := h.GetActiveMessages(t, r.client, queue)
if diff := cmp.Diff(want, gotInProgress, h.SortMsgOpt); diff != "" { if diff := cmp.Diff(want, gotActive, h.SortMsgOpt); diff != "" {
t.Errorf("%s; mismatch found in %q: (-want, +got):\n%s", tc.desc, base.InProgressKey(queue), diff) t.Errorf("%s; mismatch found in %q: (-want, +got):\n%s", tc.desc, base.ActiveKey(queue), diff)
continue continue
} }
} }
@ -627,13 +627,13 @@ func TestRequeue(t *testing.T) {
t3Deadline := now.Unix() + t3.Timeout t3Deadline := now.Unix() + t3.Timeout
tests := []struct { tests := []struct {
pending map[string][]*base.TaskMessage // initial state of queues pending map[string][]*base.TaskMessage // initial state of queues
inProgress map[string][]*base.TaskMessage // initial state of the in-progress list inProgress map[string][]*base.TaskMessage // initial state of the active list
deadlines map[string][]base.Z // initial state of the deadlines set deadlines map[string][]base.Z // initial state of the deadlines set
target *base.TaskMessage // task to requeue target *base.TaskMessage // task to requeue
wantPending map[string][]*base.TaskMessage // final state of queues wantPending map[string][]*base.TaskMessage // final state of queues
wantInProgress map[string][]*base.TaskMessage // final state of the in-progress list wantActive map[string][]*base.TaskMessage // final state of the active list
wantDeadlines map[string][]base.Z // final state of the deadlines set wantDeadlines map[string][]base.Z // final state of the deadlines set
}{ }{
{ {
pending: map[string][]*base.TaskMessage{ pending: map[string][]*base.TaskMessage{
@ -652,7 +652,7 @@ func TestRequeue(t *testing.T) {
wantPending: map[string][]*base.TaskMessage{ wantPending: map[string][]*base.TaskMessage{
"default": {t1}, "default": {t1},
}, },
wantInProgress: map[string][]*base.TaskMessage{ wantActive: map[string][]*base.TaskMessage{
"default": {t2}, "default": {t2},
}, },
wantDeadlines: map[string][]base.Z{ wantDeadlines: map[string][]base.Z{
@ -677,7 +677,7 @@ func TestRequeue(t *testing.T) {
wantPending: map[string][]*base.TaskMessage{ wantPending: map[string][]*base.TaskMessage{
"default": {t1, t2}, "default": {t1, t2},
}, },
wantInProgress: map[string][]*base.TaskMessage{ wantActive: map[string][]*base.TaskMessage{
"default": {}, "default": {},
}, },
wantDeadlines: map[string][]base.Z{ wantDeadlines: map[string][]base.Z{
@ -702,7 +702,7 @@ func TestRequeue(t *testing.T) {
"default": {t1}, "default": {t1},
"critical": {t3}, "critical": {t3},
}, },
wantInProgress: map[string][]*base.TaskMessage{ wantActive: map[string][]*base.TaskMessage{
"default": {t2}, "default": {t2},
"critical": {}, "critical": {},
}, },
@ -716,7 +716,7 @@ func TestRequeue(t *testing.T) {
for _, tc := range tests { for _, tc := range tests {
h.FlushDB(t, r.client) // clean up db before each test case h.FlushDB(t, r.client) // clean up db before each test case
h.SeedAllPendingQueues(t, r.client, tc.pending) h.SeedAllPendingQueues(t, r.client, tc.pending)
h.SeedAllInProgressQueues(t, r.client, tc.inProgress) h.SeedAllActiveQueues(t, r.client, tc.inProgress)
h.SeedAllDeadlines(t, r.client, tc.deadlines) h.SeedAllDeadlines(t, r.client, tc.deadlines)
err := r.Requeue(tc.target) err := r.Requeue(tc.target)
@ -731,10 +731,10 @@ func TestRequeue(t *testing.T) {
t.Errorf("mismatch found in %q; (-want, +got)\n%s", base.QueueKey(qname), diff) t.Errorf("mismatch found in %q; (-want, +got)\n%s", base.QueueKey(qname), diff)
} }
} }
for qname, want := range tc.wantInProgress { for qname, want := range tc.wantActive {
gotInProgress := h.GetInProgressMessages(t, r.client, qname) gotActive := h.GetActiveMessages(t, r.client, qname)
if diff := cmp.Diff(want, gotInProgress, h.SortMsgOpt); diff != "" { if diff := cmp.Diff(want, gotActive, h.SortMsgOpt); diff != "" {
t.Errorf("mismatch found in %q: (-want, +got):\n%s", base.InProgressKey(qname), diff) t.Errorf("mismatch found in %q: (-want, +got):\n%s", base.ActiveKey(qname), diff)
} }
} }
for qname, want := range tc.wantDeadlines { for qname, want := range tc.wantDeadlines {
@ -877,15 +877,15 @@ func TestRetry(t *testing.T) {
errMsg := "SMTP server is not responding" errMsg := "SMTP server is not responding"
tests := []struct { tests := []struct {
inProgress map[string][]*base.TaskMessage inProgress map[string][]*base.TaskMessage
deadlines map[string][]base.Z deadlines map[string][]base.Z
retry map[string][]base.Z retry map[string][]base.Z
msg *base.TaskMessage msg *base.TaskMessage
processAt time.Time processAt time.Time
errMsg string errMsg string
wantInProgress map[string][]*base.TaskMessage wantActive map[string][]*base.TaskMessage
wantDeadlines map[string][]base.Z wantDeadlines map[string][]base.Z
wantRetry map[string][]base.Z wantRetry map[string][]base.Z
}{ }{
{ {
inProgress: map[string][]*base.TaskMessage{ inProgress: map[string][]*base.TaskMessage{
@ -900,7 +900,7 @@ func TestRetry(t *testing.T) {
msg: t1, msg: t1,
processAt: now.Add(5 * time.Minute), processAt: now.Add(5 * time.Minute),
errMsg: errMsg, errMsg: errMsg,
wantInProgress: map[string][]*base.TaskMessage{ wantActive: map[string][]*base.TaskMessage{
"default": {t2}, "default": {t2},
}, },
wantDeadlines: map[string][]base.Z{ wantDeadlines: map[string][]base.Z{
@ -929,7 +929,7 @@ func TestRetry(t *testing.T) {
msg: t4, msg: t4,
processAt: now.Add(5 * time.Minute), processAt: now.Add(5 * time.Minute),
errMsg: errMsg, errMsg: errMsg,
wantInProgress: map[string][]*base.TaskMessage{ wantActive: map[string][]*base.TaskMessage{
"default": {t1, t2}, "default": {t1, t2},
"custom": {}, "custom": {},
}, },
@ -948,7 +948,7 @@ func TestRetry(t *testing.T) {
for _, tc := range tests { for _, tc := range tests {
h.FlushDB(t, r.client) h.FlushDB(t, r.client)
h.SeedAllInProgressQueues(t, r.client, tc.inProgress) h.SeedAllActiveQueues(t, r.client, tc.inProgress)
h.SeedAllDeadlines(t, r.client, tc.deadlines) h.SeedAllDeadlines(t, r.client, tc.deadlines)
h.SeedAllRetryQueues(t, r.client, tc.retry) h.SeedAllRetryQueues(t, r.client, tc.retry)
@ -958,10 +958,10 @@ func TestRetry(t *testing.T) {
continue continue
} }
for queue, want := range tc.wantInProgress { for queue, want := range tc.wantActive {
gotInProgress := h.GetInProgressMessages(t, r.client, queue) gotActive := h.GetActiveMessages(t, r.client, queue)
if diff := cmp.Diff(want, gotInProgress, h.SortMsgOpt); diff != "" { if diff := cmp.Diff(want, gotActive, h.SortMsgOpt); diff != "" {
t.Errorf("mismatch found in %q; (-want, +got)\n%s", base.InProgressKey(queue), diff) t.Errorf("mismatch found in %q; (-want, +got)\n%s", base.ActiveKey(queue), diff)
} }
} }
for queue, want := range tc.wantDeadlines { for queue, want := range tc.wantDeadlines {
@ -1046,13 +1046,13 @@ func TestKill(t *testing.T) {
// TODO(hibiken): add test cases for trimming // TODO(hibiken): add test cases for trimming
tests := []struct { tests := []struct {
inProgress map[string][]*base.TaskMessage inProgress map[string][]*base.TaskMessage
deadlines map[string][]base.Z deadlines map[string][]base.Z
dead map[string][]base.Z dead map[string][]base.Z
target *base.TaskMessage // task to kill target *base.TaskMessage // task to kill
wantInProgress map[string][]*base.TaskMessage wantActive map[string][]*base.TaskMessage
wantDeadlines map[string][]base.Z wantDeadlines map[string][]base.Z
wantDead map[string][]base.Z wantDead map[string][]base.Z
}{ }{
{ {
inProgress: map[string][]*base.TaskMessage{ inProgress: map[string][]*base.TaskMessage{
@ -1070,7 +1070,7 @@ func TestKill(t *testing.T) {
}, },
}, },
target: t1, target: t1,
wantInProgress: map[string][]*base.TaskMessage{ wantActive: map[string][]*base.TaskMessage{
"default": {t2}, "default": {t2},
}, },
wantDeadlines: map[string][]base.Z{ wantDeadlines: map[string][]base.Z{
@ -1098,7 +1098,7 @@ func TestKill(t *testing.T) {
"default": {}, "default": {},
}, },
target: t1, target: t1,
wantInProgress: map[string][]*base.TaskMessage{ wantActive: map[string][]*base.TaskMessage{
"default": {t2, t3}, "default": {t2, t3},
}, },
wantDeadlines: map[string][]base.Z{ wantDeadlines: map[string][]base.Z{
@ -1131,7 +1131,7 @@ func TestKill(t *testing.T) {
"custom": {}, "custom": {},
}, },
target: t4, target: t4,
wantInProgress: map[string][]*base.TaskMessage{ wantActive: map[string][]*base.TaskMessage{
"default": {t1}, "default": {t1},
"custom": {}, "custom": {},
}, },
@ -1150,7 +1150,7 @@ func TestKill(t *testing.T) {
for _, tc := range tests { for _, tc := range tests {
h.FlushDB(t, r.client) // clean up db before each test case h.FlushDB(t, r.client) // clean up db before each test case
h.SeedAllInProgressQueues(t, r.client, tc.inProgress) h.SeedAllActiveQueues(t, r.client, tc.inProgress)
h.SeedAllDeadlines(t, r.client, tc.deadlines) h.SeedAllDeadlines(t, r.client, tc.deadlines)
h.SeedAllDeadQueues(t, r.client, tc.dead) h.SeedAllDeadQueues(t, r.client, tc.dead)
@ -1160,10 +1160,10 @@ func TestKill(t *testing.T) {
continue continue
} }
for queue, want := range tc.wantInProgress { for queue, want := range tc.wantActive {
gotInProgress := h.GetInProgressMessages(t, r.client, queue) gotActive := h.GetActiveMessages(t, r.client, queue)
if diff := cmp.Diff(want, gotInProgress, h.SortMsgOpt); diff != "" { if diff := cmp.Diff(want, gotActive, h.SortMsgOpt); diff != "" {
t.Errorf("mismatch found in %q: (-want, +got)\n%s", base.InProgressKey(queue), diff) t.Errorf("mismatch found in %q: (-want, +got)\n%s", base.ActiveKey(queue), diff)
} }
} }
for queue, want := range tc.wantDeadlines { for queue, want := range tc.wantDeadlines {
@ -1363,7 +1363,7 @@ func TestListDeadlineExceeded(t *testing.T) {
want []*base.TaskMessage want []*base.TaskMessage
}{ }{
{ {
desc: "with one task in-progress", desc: "with a single active task",
deadlines: map[string][]base.Z{ deadlines: map[string][]base.Z{
"default": {{Message: t1, Score: fiveMinutesAgo.Unix()}}, "default": {{Message: t1, Score: fiveMinutesAgo.Unix()}},
}, },
@ -1372,7 +1372,7 @@ func TestListDeadlineExceeded(t *testing.T) {
want: []*base.TaskMessage{t1}, want: []*base.TaskMessage{t1},
}, },
{ {
desc: "with multiple tasks in-progress, and one expired", desc: "with multiple active tasks, and one expired",
deadlines: map[string][]base.Z{ deadlines: map[string][]base.Z{
"default": { "default": {
{Message: t1, Score: oneHourAgo.Unix()}, {Message: t1, Score: oneHourAgo.Unix()},
@ -1387,7 +1387,7 @@ func TestListDeadlineExceeded(t *testing.T) {
want: []*base.TaskMessage{t1}, want: []*base.TaskMessage{t1},
}, },
{ {
desc: "with multiple expired tasks in-progress", desc: "with multiple expired active tasks",
deadlines: map[string][]base.Z{ deadlines: map[string][]base.Z{
"default": { "default": {
{Message: t1, Score: oneHourAgo.Unix()}, {Message: t1, Score: oneHourAgo.Unix()},
@ -1402,7 +1402,7 @@ func TestListDeadlineExceeded(t *testing.T) {
want: []*base.TaskMessage{t1, t3}, want: []*base.TaskMessage{t1, t3},
}, },
{ {
desc: "with empty in-progress queue", desc: "with empty active queue",
deadlines: map[string][]base.Z{ deadlines: map[string][]base.Z{
"default": {}, "default": {},
"critical": {}, "critical": {},

View File

@ -56,7 +56,7 @@ type processor struct {
// abort channel communicates to the in-flight worker goroutines to stop. // abort channel communicates to the in-flight worker goroutines to stop.
abort chan struct{} abort chan struct{}
// cancelations is a set of cancel functions for all in-progress tasks. // cancelations is a set of cancel functions for all active tasks.
cancelations *base.Cancelations cancelations *base.Cancelations
starting chan<- *base.TaskMessage starting chan<- *base.TaskMessage
@ -216,9 +216,9 @@ func (p *processor) exec() {
return return
case resErr := <-resCh: case resErr := <-resCh:
// Note: One of three things should happen. // Note: One of three things should happen.
// 1) Done -> Removes the message from InProgress // 1) Done -> Removes the message from Active
// 2) Retry -> Removes the message from InProgress & Adds the message to Retry // 2) Retry -> Removes the message from Active & Adds the message to Retry
// 3) Kill -> Removes the message from InProgress & Adds the message to Dead // 3) Kill -> Removes the message from Active & Adds the message to Dead
if resErr != nil { if resErr != nil {
p.retryOrKill(ctx, msg, resErr) p.retryOrKill(ctx, msg, resErr)
return return
@ -241,7 +241,7 @@ func (p *processor) requeue(msg *base.TaskMessage) {
func (p *processor) markAsDone(ctx context.Context, msg *base.TaskMessage) { func (p *processor) markAsDone(ctx context.Context, msg *base.TaskMessage) {
err := p.broker.Done(msg) err := p.broker.Done(msg)
if err != nil { if err != nil {
errMsg := fmt.Sprintf("Could not remove task id=%s type=%q from %q err: %+v", msg.ID, msg.Type, base.InProgressKey(msg.Queue), err) errMsg := fmt.Sprintf("Could not remove task id=%s type=%q from %q err: %+v", msg.ID, msg.Type, base.ActiveKey(msg.Queue), err)
deadline, ok := ctx.Deadline() deadline, ok := ctx.Deadline()
if !ok { if !ok {
panic("asynq: internal error: missing deadline in context") panic("asynq: internal error: missing deadline in context")
@ -274,7 +274,7 @@ func (p *processor) retry(ctx context.Context, msg *base.TaskMessage, e error) {
retryAt := time.Now().Add(d) retryAt := time.Now().Add(d)
err := p.broker.Retry(msg, retryAt, e.Error()) err := p.broker.Retry(msg, retryAt, e.Error())
if err != nil { if err != nil {
errMsg := fmt.Sprintf("Could not move task id=%s from %q to %q", msg.ID, base.InProgressKey(msg.Queue), base.RetryKey(msg.Queue)) errMsg := fmt.Sprintf("Could not move task id=%s from %q to %q", msg.ID, base.ActiveKey(msg.Queue), base.RetryKey(msg.Queue))
deadline, ok := ctx.Deadline() deadline, ok := ctx.Deadline()
if !ok { if !ok {
panic("asynq: internal error: missing deadline in context") panic("asynq: internal error: missing deadline in context")
@ -293,7 +293,7 @@ func (p *processor) retry(ctx context.Context, msg *base.TaskMessage, e error) {
func (p *processor) kill(ctx context.Context, msg *base.TaskMessage, e error) { func (p *processor) kill(ctx context.Context, msg *base.TaskMessage, e error) {
err := p.broker.Kill(msg, e.Error()) err := p.broker.Kill(msg, e.Error())
if err != nil { if err != nil {
errMsg := fmt.Sprintf("Could not move task id=%s from %q to %q", msg.ID, base.InProgressKey(msg.Queue), base.DeadKey(msg.Queue)) errMsg := fmt.Sprintf("Could not move task id=%s from %q to %q", msg.ID, base.ActiveKey(msg.Queue), base.DeadKey(msg.Queue))
deadline, ok := ctx.Deadline() deadline, ok := ctx.Deadline()
if !ok { if !ok {
panic("asynq: internal error: missing deadline in context") panic("asynq: internal error: missing deadline in context")

View File

@ -118,8 +118,8 @@ func TestProcessorSuccessWithSingleQueue(t *testing.T) {
} }
} }
time.Sleep(2 * time.Second) // wait for two second to allow all pending tasks to be processed. time.Sleep(2 * time.Second) // wait for two second to allow all pending tasks to be processed.
if l := r.LLen(base.InProgressKey(base.DefaultQueueName)).Val(); l != 0 { if l := r.LLen(base.ActiveKey(base.DefaultQueueName)).Val(); l != 0 {
t.Errorf("%q has %d tasks, want 0", base.InProgressKey(base.DefaultQueueName), l) t.Errorf("%q has %d tasks, want 0", base.ActiveKey(base.DefaultQueueName), l)
} }
p.terminate() p.terminate()
@ -207,10 +207,10 @@ func TestProcessorSuccessWithMultipleQueues(t *testing.T) {
p.start(&sync.WaitGroup{}) p.start(&sync.WaitGroup{})
// Wait for two second to allow all pending tasks to be processed. // Wait for two second to allow all pending tasks to be processed.
time.Sleep(2 * time.Second) time.Sleep(2 * time.Second)
// Make sure no messages are stuck in in-progress list. // Make sure no messages are stuck in active list.
for _, qname := range tc.queues { for _, qname := range tc.queues {
if l := r.LLen(base.InProgressKey(qname)).Val(); l != 0 { if l := r.LLen(base.ActiveKey(qname)).Val(); l != 0 {
t.Errorf("%q has %d tasks, want 0", base.InProgressKey(qname), l) t.Errorf("%q has %d tasks, want 0", base.ActiveKey(qname), l)
} }
} }
p.terminate() p.terminate()
@ -283,8 +283,8 @@ func TestProcessTasksWithLargeNumberInPayload(t *testing.T) {
p.start(&sync.WaitGroup{}) p.start(&sync.WaitGroup{})
time.Sleep(2 * time.Second) // wait for two second to allow all pending tasks to be processed. time.Sleep(2 * time.Second) // wait for two second to allow all pending tasks to be processed.
if l := r.LLen(base.InProgressKey(base.DefaultQueueName)).Val(); l != 0 { if l := r.LLen(base.ActiveKey(base.DefaultQueueName)).Val(); l != 0 {
t.Errorf("%q has %d tasks, want 0", base.InProgressKey(base.DefaultQueueName), l) t.Errorf("%q has %d tasks, want 0", base.ActiveKey(base.DefaultQueueName), l)
} }
p.terminate() p.terminate()
@ -397,8 +397,8 @@ func TestProcessorRetry(t *testing.T) {
t.Errorf("mismatch found in %q after running processor; (-want, +got)\n%s", base.DeadKey(base.DefaultQueueName), diff) t.Errorf("mismatch found in %q after running processor; (-want, +got)\n%s", base.DeadKey(base.DefaultQueueName), diff)
} }
if l := r.LLen(base.InProgressKey(base.DefaultQueueName)).Val(); l != 0 { if l := r.LLen(base.ActiveKey(base.DefaultQueueName)).Val(); l != 0 {
t.Errorf("%q has %d tasks, want 0", base.InProgressKey(base.DefaultQueueName), l) t.Errorf("%q has %d tasks, want 0", base.ActiveKey(base.DefaultQueueName), l)
} }
if n != tc.wantErrCount { if n != tc.wantErrCount {
@ -548,10 +548,10 @@ func TestProcessorWithStrictPriority(t *testing.T) {
p.start(&sync.WaitGroup{}) p.start(&sync.WaitGroup{})
time.Sleep(tc.wait) time.Sleep(tc.wait)
// Make sure no tasks are stuck in in-progress list. // Make sure no tasks are stuck in active list.
for _, qname := range tc.queues { for _, qname := range tc.queues {
if l := r.LLen(base.InProgressKey(qname)).Val(); l != 0 { if l := r.LLen(base.ActiveKey(qname)).Val(); l != 0 {
t.Errorf("%q has %d tasks, want 0", base.InProgressKey(qname), l) t.Errorf("%q has %d tasks, want 0", base.ActiveKey(qname), l)
} }
} }
p.terminate() p.terminate()

View File

@ -32,18 +32,18 @@ func TestRecoverer(t *testing.T) {
oneHourAgo := now.Add(-1 * time.Hour) oneHourAgo := now.Add(-1 * time.Hour)
tests := []struct { tests := []struct {
desc string desc string
inProgress map[string][]*base.TaskMessage inProgress map[string][]*base.TaskMessage
deadlines map[string][]base.Z deadlines map[string][]base.Z
retry map[string][]base.Z retry map[string][]base.Z
dead map[string][]base.Z dead map[string][]base.Z
wantInProgress map[string][]*base.TaskMessage wantActive map[string][]*base.TaskMessage
wantDeadlines map[string][]base.Z wantDeadlines map[string][]base.Z
wantRetry map[string][]*base.TaskMessage wantRetry map[string][]*base.TaskMessage
wantDead map[string][]*base.TaskMessage wantDead map[string][]*base.TaskMessage
}{ }{
{ {
desc: "with one task in-progress", desc: "with one active task",
inProgress: map[string][]*base.TaskMessage{ inProgress: map[string][]*base.TaskMessage{
"default": {t1}, "default": {t1},
}, },
@ -56,7 +56,7 @@ func TestRecoverer(t *testing.T) {
dead: map[string][]base.Z{ dead: map[string][]base.Z{
"default": {}, "default": {},
}, },
wantInProgress: map[string][]*base.TaskMessage{ wantActive: map[string][]*base.TaskMessage{
"default": {}, "default": {},
}, },
wantDeadlines: map[string][]base.Z{ wantDeadlines: map[string][]base.Z{
@ -87,7 +87,7 @@ func TestRecoverer(t *testing.T) {
"default": {}, "default": {},
"critical": {}, "critical": {},
}, },
wantInProgress: map[string][]*base.TaskMessage{ wantActive: map[string][]*base.TaskMessage{
"default": {}, "default": {},
"critical": {}, "critical": {},
}, },
@ -105,7 +105,7 @@ func TestRecoverer(t *testing.T) {
}, },
}, },
{ {
desc: "with multiple tasks in-progress, and one expired", desc: "with multiple active tasks, and one expired",
inProgress: map[string][]*base.TaskMessage{ inProgress: map[string][]*base.TaskMessage{
"default": {t1, t2}, "default": {t1, t2},
"critical": {t3}, "critical": {t3},
@ -127,7 +127,7 @@ func TestRecoverer(t *testing.T) {
"default": {}, "default": {},
"critical": {}, "critical": {},
}, },
wantInProgress: map[string][]*base.TaskMessage{ wantActive: map[string][]*base.TaskMessage{
"default": {t2}, "default": {t2},
"critical": {t3}, "critical": {t3},
}, },
@ -145,7 +145,7 @@ func TestRecoverer(t *testing.T) {
}, },
}, },
{ {
desc: "with multiple expired tasks in-progress", desc: "with multiple expired active tasks",
inProgress: map[string][]*base.TaskMessage{ inProgress: map[string][]*base.TaskMessage{
"default": {t1, t2}, "default": {t1, t2},
"critical": {t3}, "critical": {t3},
@ -167,7 +167,7 @@ func TestRecoverer(t *testing.T) {
"default": {}, "default": {},
"cricial": {}, "cricial": {},
}, },
wantInProgress: map[string][]*base.TaskMessage{ wantActive: map[string][]*base.TaskMessage{
"default": {t2}, "default": {t2},
"critical": {}, "critical": {},
}, },
@ -184,7 +184,7 @@ func TestRecoverer(t *testing.T) {
}, },
}, },
{ {
desc: "with empty in-progress queue", desc: "with empty active queue",
inProgress: map[string][]*base.TaskMessage{ inProgress: map[string][]*base.TaskMessage{
"default": {}, "default": {},
"critical": {}, "critical": {},
@ -201,7 +201,7 @@ func TestRecoverer(t *testing.T) {
"default": {}, "default": {},
"critical": {}, "critical": {},
}, },
wantInProgress: map[string][]*base.TaskMessage{ wantActive: map[string][]*base.TaskMessage{
"default": {}, "default": {},
"critical": {}, "critical": {},
}, },
@ -222,7 +222,7 @@ func TestRecoverer(t *testing.T) {
for _, tc := range tests { for _, tc := range tests {
h.FlushDB(t, r) h.FlushDB(t, r)
h.SeedAllInProgressQueues(t, r, tc.inProgress) h.SeedAllActiveQueues(t, r, tc.inProgress)
h.SeedAllDeadlines(t, r, tc.deadlines) h.SeedAllDeadlines(t, r, tc.deadlines)
h.SeedAllRetryQueues(t, r, tc.retry) h.SeedAllRetryQueues(t, r, tc.retry)
h.SeedAllDeadQueues(t, r, tc.dead) h.SeedAllDeadQueues(t, r, tc.dead)
@ -240,10 +240,10 @@ func TestRecoverer(t *testing.T) {
time.Sleep(2 * time.Second) time.Sleep(2 * time.Second)
recoverer.terminate() recoverer.terminate()
for qname, want := range tc.wantInProgress { for qname, want := range tc.wantActive {
gotInProgress := h.GetInProgressMessages(t, r, qname) gotActive := h.GetActiveMessages(t, r, qname)
if diff := cmp.Diff(want, gotInProgress, h.SortMsgOpt); diff != "" { if diff := cmp.Diff(want, gotActive, h.SortMsgOpt); diff != "" {
t.Errorf("%s; mismatch found in %q; (-want,+got)\n%s", tc.desc, base.InProgressKey(qname), diff) t.Errorf("%s; mismatch found in %q; (-want,+got)\n%s", tc.desc, base.ActiveKey(qname), diff)
} }
} }
for qname, want := range tc.wantDeadlines { for qname, want := range tc.wantDeadlines {

View File

@ -20,7 +20,7 @@ type subscriber struct {
// channel to communicate back to the long running "subscriber" goroutine. // channel to communicate back to the long running "subscriber" goroutine.
done chan struct{} done chan struct{}
// cancelations hold cancel functions for all in-progress tasks. // cancelations hold cancel functions for all active tasks.
cancelations *base.Cancelations cancelations *base.Cancelations
// time to wait before retrying to connect to redis. // time to wait before retrying to connect to redis.

View File

@ -23,7 +23,7 @@ func TestSyncer(t *testing.T) {
} }
r := setup(t) r := setup(t)
rdbClient := rdb.NewRDB(r) rdbClient := rdb.NewRDB(r)
h.SeedInProgressQueue(t, r, inProgress, base.DefaultQueueName) h.SeedActiveQueue(t, r, inProgress, base.DefaultQueueName)
const interval = time.Second const interval = time.Second
syncRequestCh := make(chan *syncRequest) syncRequestCh := make(chan *syncRequest)
@ -48,9 +48,9 @@ func TestSyncer(t *testing.T) {
time.Sleep(2 * interval) // ensure that syncer runs at least once time.Sleep(2 * interval) // ensure that syncer runs at least once
gotInProgress := h.GetInProgressMessages(t, r, base.DefaultQueueName) gotActive := h.GetActiveMessages(t, r, base.DefaultQueueName)
if l := len(gotInProgress); l != 0 { if l := len(gotActive); l != 0 {
t.Errorf("%q has length %d; want 0", base.InProgressKey(base.DefaultQueueName), l) t.Errorf("%q has length %d; want 0", base.ActiveKey(base.DefaultQueueName), l)
} }
} }

View File

@ -145,9 +145,9 @@ func printQueueStats(s *asynq.QueueStats) {
fmt.Printf("Paused: %t\n\n", s.Paused) fmt.Printf("Paused: %t\n\n", s.Paused)
fmt.Println("Task Breakdown:") fmt.Println("Task Breakdown:")
printTable( printTable(
[]string{"InProgress", "Pending", "Scheduled", "Retry", "Dead"}, []string{"Active", "Pending", "Scheduled", "Retry", "Dead"},
func(w io.Writer, tmpl string) { func(w io.Writer, tmpl string) {
fmt.Fprintf(w, tmpl, s.InProgress, s.Pending, s.Scheduled, s.Retry, s.Dead) fmt.Fprintf(w, tmpl, s.Active, s.Pending, s.Scheduled, s.Retry, s.Dead)
}, },
) )
fmt.Println() fmt.Println()

View File

@ -51,14 +51,14 @@ func init() {
} }
type AggregateStats struct { type AggregateStats struct {
InProgress int Active int
Pending int Pending int
Scheduled int Scheduled int
Retry int Retry int
Dead int Dead int
Processed int Processed int
Failed int Failed int
Timestamp time.Time Timestamp time.Time
} }
func stats(cmd *cobra.Command, args []string) { func stats(cmd *cobra.Command, args []string) {
@ -78,7 +78,7 @@ func stats(cmd *cobra.Command, args []string) {
fmt.Println(err) fmt.Println(err)
os.Exit(1) os.Exit(1)
} }
aggStats.InProgress += s.InProgress aggStats.Active += s.Active
aggStats.Pending += s.Pending aggStats.Pending += s.Pending
aggStats.Scheduled += s.Scheduled aggStats.Scheduled += s.Scheduled
aggStats.Retry += s.Retry aggStats.Retry += s.Retry
@ -113,9 +113,9 @@ func stats(cmd *cobra.Command, args []string) {
func printStatsByState(s *AggregateStats) { func printStatsByState(s *AggregateStats) {
format := strings.Repeat("%v\t", 5) + "\n" format := strings.Repeat("%v\t", 5) + "\n"
tw := new(tabwriter.Writer).Init(os.Stdout, 0, 8, 2, ' ', 0) tw := new(tabwriter.Writer).Init(os.Stdout, 0, 8, 2, ' ', 0)
fmt.Fprintf(tw, format, "InProgress", "Pending", "Scheduled", "Retry", "Dead") fmt.Fprintf(tw, format, "Active", "Pending", "Scheduled", "Retry", "Dead")
fmt.Fprintf(tw, format, "----------", "--------", "---------", "-----", "----") fmt.Fprintf(tw, format, "----------", "--------", "---------", "-----", "----")
fmt.Fprintf(tw, format, s.InProgress, s.Pending, s.Scheduled, s.Retry, s.Dead) fmt.Fprintf(tw, format, s.Active, s.Pending, s.Scheduled, s.Retry, s.Dead)
tw.Flush() tw.Flush()
} }

View File

@ -74,7 +74,7 @@ var taskListCmd = &cobra.Command{
Long: `List tasks of the given state from the specified queue. Long: `List tasks of the given state from the specified queue.
The value for the state flag should be one of: The value for the state flag should be one of:
- in-progress - active
- pending - pending
- scheduled - scheduled
- retry - retry
@ -95,7 +95,7 @@ To list the tasks from the second page, run
var taskCancelCmd = &cobra.Command{ var taskCancelCmd = &cobra.Command{
Use: "cancel TASK_ID [TASK_ID...]", Use: "cancel TASK_ID [TASK_ID...]",
Short: "Cancel one or more in-progress tasks", Short: "Cancel one or more active tasks",
Args: cobra.MinimumNArgs(1), Args: cobra.MinimumNArgs(1),
Run: taskCancel, Run: taskCancel,
} }
@ -165,8 +165,8 @@ func taskList(cmd *cobra.Command, args []string) {
} }
switch state { switch state {
case "in-progress": case "active":
listInProgressTasks(qname, pageNum, pageSize) listActiveTasks(qname, pageNum, pageSize)
case "pending": case "pending":
listPendingTasks(qname, pageNum, pageSize) listPendingTasks(qname, pageNum, pageSize)
case "scheduled": case "scheduled":
@ -181,15 +181,15 @@ func taskList(cmd *cobra.Command, args []string) {
} }
} }
func listInProgressTasks(qname string, pageNum, pageSize int) { func listActiveTasks(qname string, pageNum, pageSize int) {
i := createInspector() i := createInspector()
tasks, err := i.ListInProgressTasks(qname, asynq.PageSize(pageSize), asynq.Page(pageNum)) tasks, err := i.ListActiveTasks(qname, asynq.PageSize(pageSize), asynq.Page(pageNum))
if err != nil { if err != nil {
fmt.Println(err) fmt.Println(err)
os.Exit(1) os.Exit(1)
} }
if len(tasks) == 0 { if len(tasks) == 0 {
fmt.Printf("No in-progress tasks in %q queue\n", qname) fmt.Printf("No active tasks in %q queue\n", qname)
return return
} }
printTable( printTable(