From d84e8c0ff26f5629b7f958296fc6e497d43366e9 Mon Sep 17 00:00:00 2001 From: Ken Hibino Date: Sun, 15 Dec 2019 17:16:13 -0800 Subject: [PATCH] Modify (*RDB).Kill method to atomically move task from in_progress to dead queue --- internal/rdb/rdb.go | 29 ++++++++++++----- internal/rdb/rdb_test.go | 69 +++++++++++++++++++++++++++++++++------- 2 files changed, 78 insertions(+), 20 deletions(-) diff --git a/internal/rdb/rdb.go b/internal/rdb/rdb.go index fde76b9..9ee829d 100644 --- a/internal/rdb/rdb.go +++ b/internal/rdb/rdb.go @@ -5,7 +5,6 @@ import ( "encoding/json" "errors" "fmt" - "strconv" "time" "github.com/go-redis/redis/v7" @@ -165,9 +164,10 @@ func (r *RDB) schedule(zset string, processAt time.Time, msg *TaskMessage) error return nil } -// Kill sends the task to "dead" set. +// Kill sends the task to "dead" queue from in-progress queue, assigning +// the error message to the task. // It also trims the set by timestamp and set size. -func (r *RDB) Kill(msg *TaskMessage) error { +func (r *RDB) Kill(msg *TaskMessage, errMsg string) error { const maxDeadTask = 10 const deadExpirationInDays = 90 bytes, err := json.Marshal(msg) @@ -175,12 +175,25 @@ func (r *RDB) Kill(msg *TaskMessage) error { return fmt.Errorf("could not marshal %+v to json: %v", msg, err) } now := time.Now() - pipe := r.client.Pipeline() - pipe.ZAdd(deadQ, &redis.Z{Member: string(bytes), Score: float64(now.Unix())}) limit := now.AddDate(0, 0, -deadExpirationInDays).Unix() // 90 days ago - pipe.ZRemRangeByScore(deadQ, "-inf", strconv.Itoa(int(limit))) - pipe.ZRemRangeByRank(deadQ, 0, -maxDeadTask) // trim the set to 100 - _, err = pipe.Exec() + // KEYS[1] -> asynq:in_progress + // KEYS[2] -> asynq:dead + // ARGV[1] -> TaskMessage value + // ARGV[2] -> error message + // ARGV[3] -> died_at UNIX timestamp + // ARGV[4] -> cutoff timestamp (e.g., 90 days ago) + // ARGV[5] -> max number of tasks in dead queue (e.g., 100) + script := redis.NewScript(` + redis.call("LREM", KEYS[1], 0, ARGV[1]) + local msg = cjson.decode(ARGV[1]) + msg["ErrorMsg"] = ARGV[2] + redis.call("ZADD", KEYS[2], ARGV[3], cjson.encode(msg)) + redis.call("ZREMRANGEBYSCORE", KEYS[2], "-inf", ARGV[4]) + redis.call("ZREMRANGEBYRANK", KEYS[2], 0, -ARGV[5]) + return redis.status_reply("OK") + `) + _, err = script.Run(r.client, []string{inProgressQ, deadQ}, + string(bytes), errMsg, now.Unix(), limit, maxDeadTask).Result() return err } diff --git a/internal/rdb/rdb_test.go b/internal/rdb/rdb_test.go index 3ee37f3..7717b7a 100644 --- a/internal/rdb/rdb_test.go +++ b/internal/rdb/rdb_test.go @@ -115,35 +115,80 @@ func TestDone(t *testing.T) { func TestKill(t *testing.T) { r := setup(t) t1 := newTaskMessage("send_email", nil) + t2 := newTaskMessage("reindex", nil) + t3 := newTaskMessage("generate_csv", nil) + errMsg := "SMTP server not responding" + t1AfterKill := &TaskMessage{ + ID: t1.ID, + Type: t1.Type, + Payload: t1.Payload, + Queue: t1.Queue, + Retry: t1.Retry, + Retried: t1.Retried, + ErrorMsg: errMsg, + } + now := time.Now() // TODO(hibiken): add test cases for trimming tests := []struct { - dead []sortedSetEntry // inital state of dead queue - target *TaskMessage // task to kill - wantDead []*TaskMessage // final state of dead queue + inProgress []*TaskMessage + dead []sortedSetEntry + target *TaskMessage // task to kill + wantInProgress []*TaskMessage + wantDead []sortedSetEntry }{ { - dead: []sortedSetEntry{}, - target: t1, - wantDead: []*TaskMessage{t1}, + inProgress: []*TaskMessage{t1, t2}, + dead: []sortedSetEntry{ + {t3, now.Add(-time.Hour).Unix()}, + }, + target: t1, + wantInProgress: []*TaskMessage{t2}, + wantDead: []sortedSetEntry{ + {t1AfterKill, now.Unix()}, + {t3, now.Add(-time.Hour).Unix()}, + }, + }, + { + inProgress: []*TaskMessage{t1, t2, t3}, + dead: []sortedSetEntry{}, + target: t1, + wantInProgress: []*TaskMessage{t2, t3}, + wantDead: []sortedSetEntry{ + {t1AfterKill, now.Unix()}, + }, }, } for _, tc := range tests { flushDB(t, r) // clean up db before each test case + seedInProgressQueue(t, r, tc.inProgress) seedDeadQueue(t, r, tc.dead) - err := r.Kill(tc.target) + err := r.Kill(tc.target, errMsg) if err != nil { - t.Error(err) + t.Errorf("(*RDB).Kill(%v, %v) = %v, want nil", tc.target, errMsg, err) continue } - data := r.client.ZRange(deadQ, 0, -1).Val() - gotDead := mustUnmarshalSlice(t, data) - if diff := cmp.Diff(tc.wantDead, gotDead, sortMsgOpt); diff != "" { + gotInProgressRaw := r.client.LRange(inProgressQ, 0, -1).Val() + gotInProgress := mustUnmarshalSlice(t, gotInProgressRaw) + if diff := cmp.Diff(tc.wantInProgress, gotInProgress, sortMsgOpt); diff != "" { + t.Errorf("mismatch found in %q; (-want, +got)\n%s", inProgressQ, diff) + } + + var gotDead []sortedSetEntry + data := r.client.ZRangeWithScores(deadQ, 0, -1).Val() + for _, z := range data { + gotDead = append(gotDead, sortedSetEntry{ + msg: mustUnmarshal(t, z.Member.(string)), + score: int64(z.Score), + }) + } + + cmpOpt := cmp.AllowUnexported(sortedSetEntry{}) + if diff := cmp.Diff(tc.wantDead, gotDead, cmpOpt, sortZSetEntryOpt); diff != "" { t.Errorf("mismatch found in %q after calling (*RDB).Kill: (-want, +got):\n%s", deadQ, diff) - continue } } }