mirror of
https://github.com/hibiken/asynq.git
synced 2024-11-10 11:31:58 +08:00
Minor improvements
This commit is contained in:
parent
2e91c49c3f
commit
4ceb49cfd1
2
asynq.go
2
asynq.go
@ -16,7 +16,7 @@ type Task struct {
|
||||
// Type indicates the kind of the task to be performed.
|
||||
Type string
|
||||
|
||||
// Payload holds data needed for the task execution.
|
||||
// Payload holds data needed to process the task.
|
||||
Payload Payload
|
||||
}
|
||||
|
||||
|
@ -32,23 +32,26 @@ func FailureKey(t time.Time) string {
|
||||
}
|
||||
|
||||
// TaskMessage is the internal representation of a task with additional metadata fields.
|
||||
// Serialized data of this type gets written in redis.
|
||||
// Serialized data of this type gets written to redis.
|
||||
type TaskMessage struct {
|
||||
//-------- Task fields --------
|
||||
// Type represents the kind of task.
|
||||
// Type indicates the kind of the task to be performed.
|
||||
Type string
|
||||
|
||||
// Payload holds data needed to process the task.
|
||||
Payload map[string]interface{}
|
||||
|
||||
//-------- Metadata fields --------
|
||||
// ID is a unique identifier for each task
|
||||
// ID is a unique identifier for each task.
|
||||
ID xid.ID
|
||||
// Queue is a name this message should be enqueued to
|
||||
|
||||
// Queue is a name this message should be enqueued to.
|
||||
Queue string
|
||||
|
||||
// Retry is the max number of retry for this task.
|
||||
Retry int
|
||||
// Retried is the number of times we've retried this task so far
|
||||
|
||||
// Retried is the number of times we've retried this task so far.
|
||||
Retried int
|
||||
// ErrorMsg holds the error message from the last failure
|
||||
|
||||
// ErrorMsg holds the error message from the last failure.
|
||||
ErrorMsg string
|
||||
}
|
||||
|
@ -36,38 +36,32 @@ func (r *RDB) Close() error {
|
||||
return r.client.Close()
|
||||
}
|
||||
|
||||
// Enqueue inserts the given task to the end of the queue.
|
||||
// It also adds the queue name to the "all-queues" list.
|
||||
// Enqueue inserts the given task to the tail of the queue.
|
||||
func (r *RDB) Enqueue(msg *base.TaskMessage) error {
|
||||
bytes, err := json.Marshal(msg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not marshal %+v to json: %v", msg, err)
|
||||
return err
|
||||
}
|
||||
qname := base.QueuePrefix + msg.Queue
|
||||
pipe := r.client.Pipeline()
|
||||
pipe.LPush(qname, string(bytes))
|
||||
_, err = pipe.Exec()
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not enqueue the task %+v to %q: %v", msg, qname, err)
|
||||
}
|
||||
return nil
|
||||
return r.client.LPush(qname, string(bytes)).Err()
|
||||
}
|
||||
|
||||
// Dequeue blocks until there is a task available to be processed,
|
||||
// once a task is available, it adds the task to "in progress" list
|
||||
// and returns the task.
|
||||
// once a task is available, it adds the task to "in progress" queue
|
||||
// and returns the task. If there are no tasks for the entire timeout
|
||||
// duration, it returns ErrDequeueTimeout.
|
||||
func (r *RDB) Dequeue(timeout time.Duration) (*base.TaskMessage, error) {
|
||||
data, err := r.client.BRPopLPush(base.DefaultQueue, base.InProgressQueue, timeout).Result()
|
||||
if err == redis.Nil {
|
||||
return nil, ErrDequeueTimeout
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("command `BRPOPLPUSH %q %q %v` failed: %v", base.DefaultQueue, base.InProgressQueue, timeout, err)
|
||||
return nil, err
|
||||
}
|
||||
var msg base.TaskMessage
|
||||
err = json.Unmarshal([]byte(data), &msg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not unmarshal %v to json: %v", data, err)
|
||||
return nil, err
|
||||
}
|
||||
return &msg, nil
|
||||
}
|
||||
@ -76,7 +70,7 @@ func (r *RDB) Dequeue(timeout time.Duration) (*base.TaskMessage, error) {
|
||||
func (r *RDB) Done(msg *base.TaskMessage) error {
|
||||
bytes, err := json.Marshal(msg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not marshal %+v to json: %v", msg, err)
|
||||
return err
|
||||
}
|
||||
// Note: LREM count ZERO means "remove all elements equal to val"
|
||||
// KEYS[1] -> asynq:in_progress
|
||||
@ -94,10 +88,9 @@ func (r *RDB) Done(msg *base.TaskMessage) error {
|
||||
now := time.Now()
|
||||
processedKey := base.ProcessedKey(now)
|
||||
expireAt := now.Add(statsTTL)
|
||||
_, err = script.Run(r.client,
|
||||
return script.Run(r.client,
|
||||
[]string{base.InProgressQueue, processedKey},
|
||||
string(bytes), expireAt.Unix()).Result()
|
||||
return err
|
||||
string(bytes), expireAt.Unix()).Err()
|
||||
}
|
||||
|
||||
// Requeue moves the task from in-progress queue to the default
|
||||
@ -105,7 +98,7 @@ func (r *RDB) Done(msg *base.TaskMessage) error {
|
||||
func (r *RDB) Requeue(msg *base.TaskMessage) error {
|
||||
bytes, err := json.Marshal(msg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not marshal %+v to json: %v", msg, err)
|
||||
return err
|
||||
}
|
||||
// Note: Use RPUSH to push to the head of the queue.
|
||||
// KEYS[1] -> asynq:in_progress
|
||||
@ -116,22 +109,20 @@ func (r *RDB) Requeue(msg *base.TaskMessage) error {
|
||||
redis.call("RPUSH", KEYS[2], ARGV[1])
|
||||
return redis.status_reply("OK")
|
||||
`)
|
||||
_, err = script.Run(r.client, []string{base.InProgressQueue, base.DefaultQueue}, string(bytes)).Result()
|
||||
return err
|
||||
return script.Run(r.client,
|
||||
[]string{base.InProgressQueue, base.DefaultQueue},
|
||||
string(bytes)).Err()
|
||||
}
|
||||
|
||||
// Schedule adds the task to the backlog queue to be processed in the future.
|
||||
func (r *RDB) Schedule(msg *base.TaskMessage, processAt time.Time) error {
|
||||
bytes, err := json.Marshal(msg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not marshal %+v to json: %v", msg, err)
|
||||
return err
|
||||
}
|
||||
score := float64(processAt.Unix())
|
||||
err = r.client.ZAdd(base.ScheduledQueue, &redis.Z{Member: string(bytes), Score: score}).Err()
|
||||
if err != nil {
|
||||
return fmt.Errorf("command `ZADD %s %.1f %s` failed: %v", base.ScheduledQueue, score, string(bytes), err)
|
||||
}
|
||||
return nil
|
||||
return r.client.ZAdd(base.ScheduledQueue,
|
||||
&redis.Z{Member: string(bytes), Score: score}).Err()
|
||||
}
|
||||
|
||||
// Retry moves the task from in-progress to retry queue, incrementing retry count
|
||||
|
@ -45,10 +45,20 @@ func TestDequeue(t *testing.T) {
|
||||
enqueued []*base.TaskMessage
|
||||
want *base.TaskMessage
|
||||
err error
|
||||
inProgress int64 // length of "in-progress" tasks after dequeue
|
||||
wantInProgress []*base.TaskMessage
|
||||
}{
|
||||
{enqueued: []*base.TaskMessage{t1}, want: t1, err: nil, inProgress: 1},
|
||||
{enqueued: []*base.TaskMessage{}, want: nil, err: ErrDequeueTimeout, inProgress: 0},
|
||||
{
|
||||
enqueued: []*base.TaskMessage{t1},
|
||||
want: t1,
|
||||
err: nil,
|
||||
wantInProgress: []*base.TaskMessage{t1},
|
||||
},
|
||||
{
|
||||
enqueued: []*base.TaskMessage{},
|
||||
want: nil,
|
||||
err: ErrDequeueTimeout,
|
||||
wantInProgress: []*base.TaskMessage{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
@ -61,8 +71,11 @@ func TestDequeue(t *testing.T) {
|
||||
got, err, tc.want, tc.err)
|
||||
continue
|
||||
}
|
||||
if l := r.client.LLen(base.InProgressQueue).Val(); l != tc.inProgress {
|
||||
t.Errorf("%q has length %d, want %d", base.InProgressQueue, l, tc.inProgress)
|
||||
|
||||
gotInProgressRaw := r.client.LRange(base.InProgressQueue, 0, -1).Val()
|
||||
gotInProgress := mustUnmarshalSlice(t, gotInProgressRaw)
|
||||
if diff := cmp.Diff(tc.wantInProgress, gotInProgress, sortMsgOpt); diff != "" {
|
||||
t.Errorf("mismatch found in %q: (-want,+got):\n%s", base.InProgressQueue, diff)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -82,11 +95,6 @@ func TestDone(t *testing.T) {
|
||||
target: t1,
|
||||
wantInProgress: []*base.TaskMessage{t2},
|
||||
},
|
||||
{
|
||||
inProgress: []*base.TaskMessage{t2},
|
||||
target: t1,
|
||||
wantInProgress: []*base.TaskMessage{t2},
|
||||
},
|
||||
{
|
||||
inProgress: []*base.TaskMessage{t1},
|
||||
target: t1,
|
||||
@ -107,7 +115,7 @@ func TestDone(t *testing.T) {
|
||||
data := r.client.LRange(base.InProgressQueue, 0, -1).Val()
|
||||
gotInProgress := mustUnmarshalSlice(t, data)
|
||||
if diff := cmp.Diff(tc.wantInProgress, gotInProgress, sortMsgOpt); diff != "" {
|
||||
t.Errorf("mismatch found in %q after calling (*RDB).Done: (-want, +got):\n%s", base.InProgressQueue, diff)
|
||||
t.Errorf("mismatch found in %q: (-want, +got):\n%s", base.InProgressQueue, diff)
|
||||
continue
|
||||
}
|
||||
|
||||
@ -177,6 +185,132 @@ func TestRequeue(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestSchedule(t *testing.T) {
|
||||
r := setup(t)
|
||||
t1 := newTaskMessage("send_email", map[string]interface{}{"subject": "hello"})
|
||||
tests := []struct {
|
||||
msg *base.TaskMessage
|
||||
processAt time.Time
|
||||
}{
|
||||
{t1, time.Now().Add(15 * time.Minute)},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
flushDB(t, r) // clean up db before each test case
|
||||
|
||||
desc := fmt.Sprintf("(*RDB).Schedule(%v, %v)", tc.msg, tc.processAt)
|
||||
err := r.Schedule(tc.msg, tc.processAt)
|
||||
if err != nil {
|
||||
t.Errorf("%s = %v, want nil", desc, err)
|
||||
continue
|
||||
}
|
||||
|
||||
res := r.client.ZRangeWithScores(base.ScheduledQueue, 0, -1).Val()
|
||||
if len(res) != 1 {
|
||||
t.Errorf("%s inserted %d items to %q, want 1 items inserted", desc, len(res), base.ScheduledQueue)
|
||||
continue
|
||||
}
|
||||
if res[0].Score != float64(tc.processAt.Unix()) {
|
||||
t.Errorf("%s inserted an item with score %f, want %f", desc, res[0].Score, float64(tc.processAt.Unix()))
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRetry(t *testing.T) {
|
||||
r := setup(t)
|
||||
t1 := newTaskMessage("send_email", map[string]interface{}{"subject": "Hola!"})
|
||||
t2 := newTaskMessage("gen_thumbnail", map[string]interface{}{"path": "some/path/to/image.jpg"})
|
||||
t3 := newTaskMessage("reindex", nil)
|
||||
t1.Retried = 10
|
||||
errMsg := "SMTP server is not responding"
|
||||
t1AfterRetry := &base.TaskMessage{
|
||||
ID: t1.ID,
|
||||
Type: t1.Type,
|
||||
Payload: t1.Payload,
|
||||
Queue: t1.Queue,
|
||||
Retry: t1.Retry,
|
||||
Retried: t1.Retried + 1,
|
||||
ErrorMsg: errMsg,
|
||||
}
|
||||
now := time.Now()
|
||||
|
||||
tests := []struct {
|
||||
inProgress []*base.TaskMessage
|
||||
retry []sortedSetEntry
|
||||
msg *base.TaskMessage
|
||||
processAt time.Time
|
||||
errMsg string
|
||||
wantInProgress []*base.TaskMessage
|
||||
wantRetry []sortedSetEntry
|
||||
}{
|
||||
{
|
||||
inProgress: []*base.TaskMessage{t1, t2},
|
||||
retry: []sortedSetEntry{
|
||||
{t3, now.Add(time.Minute).Unix()},
|
||||
},
|
||||
msg: t1,
|
||||
processAt: now.Add(5 * time.Minute),
|
||||
errMsg: errMsg,
|
||||
wantInProgress: []*base.TaskMessage{t2},
|
||||
wantRetry: []sortedSetEntry{
|
||||
{t1AfterRetry, now.Add(5 * time.Minute).Unix()},
|
||||
{t3, now.Add(time.Minute).Unix()},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
flushDB(t, r)
|
||||
seedInProgressQueue(t, r, tc.inProgress)
|
||||
seedRetryQueue(t, r, tc.retry)
|
||||
|
||||
err := r.Retry(tc.msg, tc.processAt, tc.errMsg)
|
||||
if err != nil {
|
||||
t.Errorf("(*RDB).Retry = %v, want nil", err)
|
||||
continue
|
||||
}
|
||||
|
||||
gotInProgressRaw := r.client.LRange(base.InProgressQueue, 0, -1).Val()
|
||||
gotInProgress := mustUnmarshalSlice(t, gotInProgressRaw)
|
||||
if diff := cmp.Diff(tc.wantInProgress, gotInProgress, sortMsgOpt); diff != "" {
|
||||
t.Errorf("mismatch found in %q; (-want, +got)\n%s", base.InProgressQueue, diff)
|
||||
}
|
||||
|
||||
gotRetryRaw := r.client.ZRangeWithScores(base.RetryQueue, 0, -1).Val()
|
||||
var gotRetry []sortedSetEntry
|
||||
for _, z := range gotRetryRaw {
|
||||
gotRetry = append(gotRetry, sortedSetEntry{
|
||||
Msg: mustUnmarshal(t, z.Member.(string)),
|
||||
Score: int64(z.Score),
|
||||
})
|
||||
}
|
||||
cmpOpt := cmp.AllowUnexported(sortedSetEntry{})
|
||||
if diff := cmp.Diff(tc.wantRetry, gotRetry, cmpOpt, sortZSetEntryOpt); diff != "" {
|
||||
t.Errorf("mismatch found in %q; (-want, +got)\n%s", base.RetryQueue, diff)
|
||||
}
|
||||
|
||||
processedKey := base.ProcessedKey(time.Now())
|
||||
gotProcessed := r.client.Get(processedKey).Val()
|
||||
if gotProcessed != "1" {
|
||||
t.Errorf("GET %q = %q, want 1", processedKey, gotProcessed)
|
||||
}
|
||||
gotTTL := r.client.TTL(processedKey).Val()
|
||||
if gotTTL > statsTTL {
|
||||
t.Errorf("TTL %q = %v, want less than or equal to %v", processedKey, gotTTL, statsTTL)
|
||||
}
|
||||
|
||||
failureKey := base.FailureKey(time.Now())
|
||||
gotFailure := r.client.Get(failureKey).Val()
|
||||
if gotFailure != "1" {
|
||||
t.Errorf("GET %q = %q, want 1", failureKey, gotFailure)
|
||||
}
|
||||
gotTTL = r.client.TTL(processedKey).Val()
|
||||
if gotTTL > statsTTL {
|
||||
t.Errorf("TTL %q = %v, want less than or equal to %v", failureKey, gotTTL, statsTTL)
|
||||
}
|
||||
}
|
||||
}
|
||||
func TestKill(t *testing.T) {
|
||||
r := setup(t)
|
||||
t1 := newTaskMessage("send_email", nil)
|
||||
@ -412,132 +546,3 @@ func TestCheckAndEnqueue(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSchedule(t *testing.T) {
|
||||
r := setup(t)
|
||||
tests := []struct {
|
||||
msg *base.TaskMessage
|
||||
processAt time.Time
|
||||
}{
|
||||
{
|
||||
newTaskMessage("send_email", map[string]interface{}{"subject": "hello"}),
|
||||
time.Now().Add(15 * time.Minute),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
flushDB(t, r) // clean up db before each test case
|
||||
|
||||
desc := fmt.Sprintf("(*RDB).Schedule(%v, %v)", tc.msg, tc.processAt)
|
||||
err := r.Schedule(tc.msg, tc.processAt)
|
||||
if err != nil {
|
||||
t.Errorf("%s = %v, want nil", desc, err)
|
||||
continue
|
||||
}
|
||||
|
||||
res := r.client.ZRangeWithScores(base.ScheduledQueue, 0, -1).Val()
|
||||
if len(res) != 1 {
|
||||
t.Errorf("%s inserted %d items to %q, want 1 items inserted", desc, len(res), base.ScheduledQueue)
|
||||
continue
|
||||
}
|
||||
if res[0].Score != float64(tc.processAt.Unix()) {
|
||||
t.Errorf("%s inserted an item with score %f, want %f", desc, res[0].Score, float64(tc.processAt.Unix()))
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRetry(t *testing.T) {
|
||||
r := setup(t)
|
||||
t1 := newTaskMessage("send_email", map[string]interface{}{"subject": "Hola!"})
|
||||
t2 := newTaskMessage("gen_thumbnail", map[string]interface{}{"path": "some/path/to/image.jpg"})
|
||||
t3 := newTaskMessage("reindex", nil)
|
||||
t1.Retried = 10
|
||||
errMsg := "SMTP server is not responding"
|
||||
t1AfterRetry := &base.TaskMessage{
|
||||
ID: t1.ID,
|
||||
Type: t1.Type,
|
||||
Payload: t1.Payload,
|
||||
Queue: t1.Queue,
|
||||
Retry: t1.Retry,
|
||||
Retried: t1.Retried + 1,
|
||||
ErrorMsg: errMsg,
|
||||
}
|
||||
now := time.Now()
|
||||
|
||||
tests := []struct {
|
||||
inProgress []*base.TaskMessage
|
||||
retry []sortedSetEntry
|
||||
msg *base.TaskMessage
|
||||
processAt time.Time
|
||||
errMsg string
|
||||
wantInProgress []*base.TaskMessage
|
||||
wantRetry []sortedSetEntry
|
||||
}{
|
||||
{
|
||||
inProgress: []*base.TaskMessage{t1, t2},
|
||||
retry: []sortedSetEntry{
|
||||
{t3, now.Add(time.Minute).Unix()},
|
||||
},
|
||||
msg: t1,
|
||||
processAt: now.Add(5 * time.Minute),
|
||||
errMsg: errMsg,
|
||||
wantInProgress: []*base.TaskMessage{t2},
|
||||
wantRetry: []sortedSetEntry{
|
||||
{t1AfterRetry, now.Add(5 * time.Minute).Unix()},
|
||||
{t3, now.Add(time.Minute).Unix()},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
flushDB(t, r)
|
||||
seedInProgressQueue(t, r, tc.inProgress)
|
||||
seedRetryQueue(t, r, tc.retry)
|
||||
|
||||
err := r.Retry(tc.msg, tc.processAt, tc.errMsg)
|
||||
if err != nil {
|
||||
t.Errorf("(*RDB).Retry = %v, want nil", err)
|
||||
continue
|
||||
}
|
||||
|
||||
gotInProgressRaw := r.client.LRange(base.InProgressQueue, 0, -1).Val()
|
||||
gotInProgress := mustUnmarshalSlice(t, gotInProgressRaw)
|
||||
if diff := cmp.Diff(tc.wantInProgress, gotInProgress, sortMsgOpt); diff != "" {
|
||||
t.Errorf("mismatch found in %q; (-want, +got)\n%s", base.InProgressQueue, diff)
|
||||
}
|
||||
|
||||
gotRetryRaw := r.client.ZRangeWithScores(base.RetryQueue, 0, -1).Val()
|
||||
var gotRetry []sortedSetEntry
|
||||
for _, z := range gotRetryRaw {
|
||||
gotRetry = append(gotRetry, sortedSetEntry{
|
||||
Msg: mustUnmarshal(t, z.Member.(string)),
|
||||
Score: int64(z.Score),
|
||||
})
|
||||
}
|
||||
cmpOpt := cmp.AllowUnexported(sortedSetEntry{})
|
||||
if diff := cmp.Diff(tc.wantRetry, gotRetry, cmpOpt, sortZSetEntryOpt); diff != "" {
|
||||
t.Errorf("mismatch found in %q; (-want, +got)\n%s", base.RetryQueue, diff)
|
||||
}
|
||||
|
||||
processedKey := base.ProcessedKey(time.Now())
|
||||
gotProcessed := r.client.Get(processedKey).Val()
|
||||
if gotProcessed != "1" {
|
||||
t.Errorf("GET %q = %q, want 1", processedKey, gotProcessed)
|
||||
}
|
||||
gotTTL := r.client.TTL(processedKey).Val()
|
||||
if gotTTL > statsTTL {
|
||||
t.Errorf("TTL %q = %v, want less than or equal to %v", processedKey, gotTTL, statsTTL)
|
||||
}
|
||||
|
||||
failureKey := base.FailureKey(time.Now())
|
||||
gotFailure := r.client.Get(failureKey).Val()
|
||||
if gotFailure != "1" {
|
||||
t.Errorf("GET %q = %q, want 1", failureKey, gotFailure)
|
||||
}
|
||||
gotTTL = r.client.TTL(processedKey).Val()
|
||||
if gotTTL > statsTTL {
|
||||
t.Errorf("TTL %q = %v, want less than or equal to %v", failureKey, gotTTL, statsTTL)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user