2
0
mirror of https://github.com/hibiken/asynq.git synced 2024-11-14 11:31:18 +08:00

Modify (*RDB).Kill method to atomically move task from in_progress to

dead queue
This commit is contained in:
Ken Hibino 2019-12-15 17:16:13 -08:00
parent 1b1662bb12
commit d84e8c0ff2
2 changed files with 78 additions and 20 deletions

View File

@ -5,7 +5,6 @@ import (
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
"strconv"
"time" "time"
"github.com/go-redis/redis/v7" "github.com/go-redis/redis/v7"
@ -165,9 +164,10 @@ func (r *RDB) schedule(zset string, processAt time.Time, msg *TaskMessage) error
return nil return nil
} }
// Kill sends the task to "dead" set. // Kill sends the task to "dead" queue from in-progress queue, assigning
// the error message to the task.
// It also trims the set by timestamp and set size. // It also trims the set by timestamp and set size.
func (r *RDB) Kill(msg *TaskMessage) error { func (r *RDB) Kill(msg *TaskMessage, errMsg string) error {
const maxDeadTask = 10 const maxDeadTask = 10
const deadExpirationInDays = 90 const deadExpirationInDays = 90
bytes, err := json.Marshal(msg) bytes, err := json.Marshal(msg)
@ -175,12 +175,25 @@ func (r *RDB) Kill(msg *TaskMessage) error {
return fmt.Errorf("could not marshal %+v to json: %v", msg, err) return fmt.Errorf("could not marshal %+v to json: %v", msg, err)
} }
now := time.Now() now := time.Now()
pipe := r.client.Pipeline()
pipe.ZAdd(deadQ, &redis.Z{Member: string(bytes), Score: float64(now.Unix())})
limit := now.AddDate(0, 0, -deadExpirationInDays).Unix() // 90 days ago limit := now.AddDate(0, 0, -deadExpirationInDays).Unix() // 90 days ago
pipe.ZRemRangeByScore(deadQ, "-inf", strconv.Itoa(int(limit))) // KEYS[1] -> asynq:in_progress
pipe.ZRemRangeByRank(deadQ, 0, -maxDeadTask) // trim the set to 100 // KEYS[2] -> asynq:dead
_, err = pipe.Exec() // ARGV[1] -> TaskMessage value
// ARGV[2] -> error message
// ARGV[3] -> died_at UNIX timestamp
// ARGV[4] -> cutoff timestamp (e.g., 90 days ago)
// ARGV[5] -> max number of tasks in dead queue (e.g., 100)
script := redis.NewScript(`
redis.call("LREM", KEYS[1], 0, ARGV[1])
local msg = cjson.decode(ARGV[1])
msg["ErrorMsg"] = ARGV[2]
redis.call("ZADD", KEYS[2], ARGV[3], cjson.encode(msg))
redis.call("ZREMRANGEBYSCORE", KEYS[2], "-inf", ARGV[4])
redis.call("ZREMRANGEBYRANK", KEYS[2], 0, -ARGV[5])
return redis.status_reply("OK")
`)
_, err = script.Run(r.client, []string{inProgressQ, deadQ},
string(bytes), errMsg, now.Unix(), limit, maxDeadTask).Result()
return err return err
} }

View File

@ -115,35 +115,80 @@ func TestDone(t *testing.T) {
func TestKill(t *testing.T) { func TestKill(t *testing.T) {
r := setup(t) r := setup(t)
t1 := newTaskMessage("send_email", nil) t1 := newTaskMessage("send_email", nil)
t2 := newTaskMessage("reindex", nil)
t3 := newTaskMessage("generate_csv", nil)
errMsg := "SMTP server not responding"
t1AfterKill := &TaskMessage{
ID: t1.ID,
Type: t1.Type,
Payload: t1.Payload,
Queue: t1.Queue,
Retry: t1.Retry,
Retried: t1.Retried,
ErrorMsg: errMsg,
}
now := time.Now()
// TODO(hibiken): add test cases for trimming // TODO(hibiken): add test cases for trimming
tests := []struct { tests := []struct {
dead []sortedSetEntry // inital state of dead queue inProgress []*TaskMessage
dead []sortedSetEntry
target *TaskMessage // task to kill target *TaskMessage // task to kill
wantDead []*TaskMessage // final state of dead queue wantInProgress []*TaskMessage
wantDead []sortedSetEntry
}{ }{
{ {
inProgress: []*TaskMessage{t1, t2},
dead: []sortedSetEntry{
{t3, now.Add(-time.Hour).Unix()},
},
target: t1,
wantInProgress: []*TaskMessage{t2},
wantDead: []sortedSetEntry{
{t1AfterKill, now.Unix()},
{t3, now.Add(-time.Hour).Unix()},
},
},
{
inProgress: []*TaskMessage{t1, t2, t3},
dead: []sortedSetEntry{}, dead: []sortedSetEntry{},
target: t1, target: t1,
wantDead: []*TaskMessage{t1}, wantInProgress: []*TaskMessage{t2, t3},
wantDead: []sortedSetEntry{
{t1AfterKill, now.Unix()},
},
}, },
} }
for _, tc := range tests { for _, tc := range tests {
flushDB(t, r) // clean up db before each test case flushDB(t, r) // clean up db before each test case
seedInProgressQueue(t, r, tc.inProgress)
seedDeadQueue(t, r, tc.dead) seedDeadQueue(t, r, tc.dead)
err := r.Kill(tc.target) err := r.Kill(tc.target, errMsg)
if err != nil { if err != nil {
t.Error(err) t.Errorf("(*RDB).Kill(%v, %v) = %v, want nil", tc.target, errMsg, err)
continue continue
} }
data := r.client.ZRange(deadQ, 0, -1).Val() gotInProgressRaw := r.client.LRange(inProgressQ, 0, -1).Val()
gotDead := mustUnmarshalSlice(t, data) gotInProgress := mustUnmarshalSlice(t, gotInProgressRaw)
if diff := cmp.Diff(tc.wantDead, gotDead, sortMsgOpt); diff != "" { if diff := cmp.Diff(tc.wantInProgress, gotInProgress, sortMsgOpt); diff != "" {
t.Errorf("mismatch found in %q; (-want, +got)\n%s", inProgressQ, diff)
}
var gotDead []sortedSetEntry
data := r.client.ZRangeWithScores(deadQ, 0, -1).Val()
for _, z := range data {
gotDead = append(gotDead, sortedSetEntry{
msg: mustUnmarshal(t, z.Member.(string)),
score: int64(z.Score),
})
}
cmpOpt := cmp.AllowUnexported(sortedSetEntry{})
if diff := cmp.Diff(tc.wantDead, gotDead, cmpOpt, sortZSetEntryOpt); diff != "" {
t.Errorf("mismatch found in %q after calling (*RDB).Kill: (-want, +got):\n%s", deadQ, diff) t.Errorf("mismatch found in %q after calling (*RDB).Kill: (-want, +got):\n%s", deadQ, diff)
continue
} }
} }
} }