2
0
mirror of https://github.com/hibiken/asynq.git synced 2024-12-27 16:13:40 +08:00

Update WriteServerState and ClearServerState in RDB

This commit is contained in:
Ken Hibino 2020-08-10 21:49:12 -07:00
parent 4b81b91d3e
commit becd26479b
2 changed files with 57 additions and 53 deletions

View File

@ -309,7 +309,7 @@ func (r *RDB) ScheduleUnique(msg *base.TaskMessage, processAt time.Time, ttl tim
// KEYS[2] -> asynq:{<qname>}:deadlines // KEYS[2] -> asynq:{<qname>}:deadlines
// KEYS[3] -> asynq:{<qname>}:retry // KEYS[3] -> asynq:{<qname>}:retry
// KEYS[4] -> asynq:{<qname>}:processed:<yyyy-mm-dd> // KEYS[4] -> asynq:{<qname>}:processed:<yyyy-mm-dd>
// KEYS[5] -> asynq:{<qname>}:failure:<yyyy-mm-dd> // KEYS[5] -> asynq:{<qname>}:failed:<yyyy-mm-dd>
// ARGV[1] -> base.TaskMessage value to remove from base.InProgressQueue queue // ARGV[1] -> base.TaskMessage value to remove from base.InProgressQueue queue
// ARGV[2] -> base.TaskMessage value to add to Retry queue // ARGV[2] -> base.TaskMessage value to add to Retry queue
// ARGV[3] -> retry_at UNIX timestamp // ARGV[3] -> retry_at UNIX timestamp
@ -364,7 +364,7 @@ const (
// KEYS[2] -> asynq:{<qname>}:deadlines // KEYS[2] -> asynq:{<qname>}:deadlines
// KEYS[3] -> asynq:{<qname>}:dead // KEYS[3] -> asynq:{<qname>}:dead
// KEYS[4] -> asynq:{<qname>}:processed:<yyyy-mm-dd> // KEYS[4] -> asynq:{<qname>}:processed:<yyyy-mm-dd>
// KEYS[5] -> asynq:{<qname>}:failure:<yyyy-mm-dd> // KEYS[5] -> asynq:{<qname>}:failed:<yyyy-mm-dd>
// ARGV[1] -> base.TaskMessage value to remove from base.InProgressQueue queue // ARGV[1] -> base.TaskMessage value to remove from base.InProgressQueue queue
// ARGV[2] -> base.TaskMessage value to add to Dead queue // ARGV[2] -> base.TaskMessage value to add to Dead queue
// ARGV[3] -> died_at UNIX timestamp // ARGV[3] -> died_at UNIX timestamp
@ -408,10 +408,10 @@ func (r *RDB) Kill(msg *base.TaskMessage, errMsg string) error {
now := time.Now() now := time.Now()
limit := now.AddDate(0, 0, -deadExpirationInDays).Unix() // 90 days ago limit := now.AddDate(0, 0, -deadExpirationInDays).Unix() // 90 days ago
processedKey := base.ProcessedKey(msg.Queue, now) processedKey := base.ProcessedKey(msg.Queue, now)
failureKey := base.FailureKey(msg.Queue, now) failedKey := base.FailedKey(msg.Queue, now)
expireAt := now.Add(statsTTL) expireAt := now.Add(statsTTL)
return killCmd.Run(r.client, return killCmd.Run(r.client,
[]string{base.InProgressKey(msg.Queue), base.DeadlinesKey(msg.Queue), base.DeadKey(msg.Queue), processedKey, failureKey}, []string{base.InProgressKey(msg.Queue), base.DeadlinesKey(msg.Queue), base.DeadKey(msg.Queue), processedKey, failedKey},
msgToRemove, msgToAdd, now.Unix(), limit, maxDeadTasks, expireAt.Unix()).Err() msgToRemove, msgToAdd, now.Unix(), limit, maxDeadTasks, expireAt.Unix()).Err()
} }
@ -454,7 +454,7 @@ func (r *RDB) forward(src, dst string) (int, error) {
// forwardAll moves tasks with a score less than the current unix time from the src zset, // forwardAll moves tasks with a score less than the current unix time from the src zset,
// until there's no more tasks. // until there's no more tasks.
func (r *RDB) forwardAll(src, dst string) error { func (r *RDB) forwardAll(src, dst string) (err error) {
n := 1 n := 1
for n != 0 { for n != 0 {
n, err = r.forward(src, dst) n, err = r.forward(src, dst)
@ -488,25 +488,20 @@ func (r *RDB) ListDeadlineExceeded(deadline time.Time, qnames ...string) ([]*bas
return msgs, nil return msgs, nil
} }
// KEYS[1] -> asynq:servers:<host:pid:sid> // KEYS[1] -> asynq:servers:{<host:pid:sid>}
// KEYS[2] -> asynq:servers // KEYS[2] -> asynq:workers:{<host:pid:sid>}
// KEYS[3] -> asynq:workers<host:pid:sid> // ARGV[1] -> TTL in seconds
// KEYS[4] -> asynq:workers // ARGV[2] -> server info
// ARGV[1] -> expiration time // ARGV[3:] -> alternate key-value pair of (worker id, worker data)
// ARGV[2] -> TTL in seconds
// ARGV[3] -> server info
// ARGV[4:] -> alternate key-value pair of (worker id, worker data)
// Note: Add key to ZSET with expiration time as score. // Note: Add key to ZSET with expiration time as score.
// ref: https://github.com/antirez/redis/issues/135#issuecomment-2361996 // ref: https://github.com/antirez/redis/issues/135#issuecomment-2361996
var writeServerStateCmd = redis.NewScript(` var writeServerStateCmd = redis.NewScript(`
redis.call("SETEX", KEYS[1], ARGV[2], ARGV[3]) redis.call("SETEX", KEYS[1], ARGV[1], ARGV[2])
redis.call("ZADD", KEYS[2], ARGV[1], KEYS[1]) redis.call("DEL", KEYS[2])
redis.call("DEL", KEYS[3]) for i = 3, table.getn(ARGV)-1, 2 do
for i = 4, table.getn(ARGV)-1, 2 do redis.call("HSET", KEYS[2], ARGV[i], ARGV[i+1])
redis.call("HSET", KEYS[3], ARGV[i], ARGV[i+1])
end end
redis.call("EXPIRE", KEYS[3], ARGV[2]) redis.call("EXPIRE", KEYS[2], ARGV[1])
redis.call("ZADD", KEYS[4], ARGV[1], KEYS[3])
return redis.status_reply("OK")`) return redis.status_reply("OK")`)
// WriteServerState writes server state data to redis with expiration set to the value ttl. // WriteServerState writes server state data to redis with expiration set to the value ttl.
@ -516,7 +511,7 @@ func (r *RDB) WriteServerState(info *base.ServerInfo, workers []*base.WorkerInfo
return err return err
} }
exp := time.Now().Add(ttl).UTC() exp := time.Now().Add(ttl).UTC()
args := []interface{}{float64(exp.Unix()), ttl.Seconds(), bytes} // args to the lua script args := []interface{}{ttl.Seconds(), bytes} // args to the lua script
for _, w := range workers { for _, w := range workers {
bytes, err := json.Marshal(w) bytes, err := json.Marshal(w)
if err != nil { if err != nil {
@ -526,28 +521,33 @@ func (r *RDB) WriteServerState(info *base.ServerInfo, workers []*base.WorkerInfo
} }
skey := base.ServerInfoKey(info.Host, info.PID, info.ServerID) skey := base.ServerInfoKey(info.Host, info.PID, info.ServerID)
wkey := base.WorkersKey(info.Host, info.PID, info.ServerID) wkey := base.WorkersKey(info.Host, info.PID, info.ServerID)
return writeServerStateCmd.Run(r.client, if err := r.client.ZAdd(base.AllServers, &redis.Z{Score: float64(exp.Unix()), Member: skey}).Err(); err != nil {
[]string{skey, base.AllServers, wkey, base.AllWorkers}, return err
args...).Err() }
if err := r.client.ZAdd(base.AllWorkers, &redis.Z{Score: float64(exp.Unix()), Member: wkey}).Err(); err != nil {
return err
}
return writeServerStateCmd.Run(r.client, []string{skey, wkey}, args...).Err()
} }
// KEYS[1] -> asynq:servers // KEYS[1] -> asynq:servers:{<host:pid:sid>}
// KEYS[2] -> asynq:servers:<host:pid:sid> // KEYS[2] -> asynq:workers:{<host:pid:sid>}
// KEYS[3] -> asynq:workers
// KEYS[4] -> asynq:workers<host:pid:sid>
var clearServerStateCmd = redis.NewScript(` var clearServerStateCmd = redis.NewScript(`
redis.call("ZREM", KEYS[1], KEYS[2]) redis.call("DEL", KEYS[1])
redis.call("DEL", KEYS[2]) redis.call("DEL", KEYS[2])
redis.call("ZREM", KEYS[3], KEYS[4])
redis.call("DEL", KEYS[4])
return redis.status_reply("OK")`) return redis.status_reply("OK")`)
// ClearServerState deletes server state data from redis. // ClearServerState deletes server state data from redis.
func (r *RDB) ClearServerState(host string, pid int, serverID string) error { func (r *RDB) ClearServerState(host string, pid int, serverID string) error {
skey := base.ServerInfoKey(host, pid, serverID) skey := base.ServerInfoKey(host, pid, serverID)
wkey := base.WorkersKey(host, pid, serverID) wkey := base.WorkersKey(host, pid, serverID)
return clearServerStateCmd.Run(r.client, if err := r.client.ZRem(base.AllServers, skey).Err(); err != nil {
[]string{base.AllServers, skey, base.AllWorkers, wkey}).Err() return err
}
if err := r.client.ZRem(base.AllWorkers, wkey).Err(); err != nil {
return err
}
return clearServerStateCmd.Run(r.client, []string{skey, wkey}).Err()
} }
// CancelationPubSub returns a pubsub for cancelation messages. // CancelationPubSub returns a pubsub for cancelation messages.

View File

@ -267,7 +267,7 @@ func TestDequeue(t *testing.T) {
for _, tc := range tests { for _, tc := range tests {
h.FlushDB(t, r.client) // clean up db before each test case h.FlushDB(t, r.client) // clean up db before each test case
h.SeedAllEnqueuedQueues(t, r.client, msgs, queue, tc.enqueued) h.SeedAllEnqueuedQueues(t, r.client, tc.enqueued)
gotMsg, gotDeadline, err := r.Dequeue(tc.args...) gotMsg, gotDeadline, err := r.Dequeue(tc.args...)
if err != tc.err { if err != tc.err {
@ -438,6 +438,7 @@ func TestDone(t *testing.T) {
t3Deadline := now.Unix() + t3.Deadline t3Deadline := now.Unix() + t3.Deadline
tests := []struct { tests := []struct {
desc string
inProgress map[string][]*base.TaskMessage // initial state of the in-progress list inProgress map[string][]*base.TaskMessage // initial state of the in-progress list
deadlines map[string][]base.Z // initial state of deadlines set deadlines map[string][]base.Z // initial state of deadlines set
target *base.TaskMessage // task to remove target *base.TaskMessage // task to remove
@ -445,6 +446,7 @@ func TestDone(t *testing.T) {
wantDeadlines map[string][]base.Z // final state of the deadline set wantDeadlines map[string][]base.Z // final state of the deadline set
}{ }{
{ {
desc: "removes message from the correct queue",
inProgress: map[string][]*base.TaskMessage{ inProgress: map[string][]*base.TaskMessage{
"default": {t1}, "default": {t1},
"custom": {t2}, "custom": {t2},
@ -455,8 +457,8 @@ func TestDone(t *testing.T) {
}, },
target: t1, target: t1,
wantInProgress: map[string][]*base.TaskMessage{ wantInProgress: map[string][]*base.TaskMessage{
"default": {t2}, "default": {},
"custom": {}, "custom": {t2},
}, },
wantDeadlines: map[string][]base.Z{ wantDeadlines: map[string][]base.Z{
"default": {}, "default": {},
@ -464,6 +466,7 @@ func TestDone(t *testing.T) {
}, },
}, },
{ {
desc: "with one queue",
inProgress: map[string][]*base.TaskMessage{ inProgress: map[string][]*base.TaskMessage{
"default": {t1}, "default": {t1},
}, },
@ -479,6 +482,7 @@ func TestDone(t *testing.T) {
}, },
}, },
{ {
desc: "with multiple messages in a queue",
inProgress: map[string][]*base.TaskMessage{ inProgress: map[string][]*base.TaskMessage{
"default": {t1, t3}, "default": {t1, t3},
"custom": {t2}, "custom": {t2},
@ -489,7 +493,7 @@ func TestDone(t *testing.T) {
}, },
target: t3, target: t3,
wantInProgress: map[string][]*base.TaskMessage{ wantInProgress: map[string][]*base.TaskMessage{
"defualt": {t1}, "default": {t1},
"custom": {t2}, "custom": {t2},
}, },
wantDeadlines: map[string][]base.Z{ wantDeadlines: map[string][]base.Z{
@ -517,21 +521,21 @@ func TestDone(t *testing.T) {
err := r.Done(tc.target) err := r.Done(tc.target)
if err != nil { if err != nil {
t.Errorf("(*RDB).Done(task) = %v, want nil", err) t.Errorf("%s; (*RDB).Done(task) = %v, want nil", tc.desc, err)
continue continue
} }
for queue, want := range tc.wantInProgress { for queue, want := range tc.wantInProgress {
gotInProgress := h.GetInProgressMessages(t, r.client, queue) gotInProgress := h.GetInProgressMessages(t, r.client, queue)
if diff := cmp.Diff(want, gotInProgress, h.SortMsgOpt); diff != "" { if diff := cmp.Diff(want, gotInProgress, h.SortMsgOpt); diff != "" {
t.Errorf("mismatch found in %q: (-want, +got):\n%s", base.InProgressKey(queue), diff) t.Errorf("%s; mismatch found in %q: (-want, +got):\n%s", tc.desc, base.InProgressKey(queue), diff)
continue continue
} }
} }
for queue, want := range tc.wantDeadlines { for queue, want := range tc.wantDeadlines {
gotDeadlines := h.GetDeadlinesEntries(t, r.client, queue) gotDeadlines := h.GetDeadlinesEntries(t, r.client, queue)
if diff := cmp.Diff(want, gotDeadlines, h.SortZSetEntryOpt); diff != "" { if diff := cmp.Diff(want, gotDeadlines, h.SortZSetEntryOpt); diff != "" {
t.Errorf("mismatch found in %q: (-want, +got):\n%s", base.DeadlinesKey(queue), diff) t.Errorf("%s; mismatch found in %q: (-want, +got):\n%s", tc.desc, base.DeadlinesKey(queue), diff)
continue continue
} }
} }
@ -539,16 +543,16 @@ func TestDone(t *testing.T) {
processedKey := base.ProcessedKey(tc.target.Queue, time.Now()) processedKey := base.ProcessedKey(tc.target.Queue, time.Now())
gotProcessed := r.client.Get(processedKey).Val() gotProcessed := r.client.Get(processedKey).Val()
if gotProcessed != "1" { if gotProcessed != "1" {
t.Errorf("GET %q = %q, want 1", processedKey, gotProcessed) t.Errorf("%s; GET %q = %q, want 1", tc.desc, processedKey, gotProcessed)
} }
gotTTL := r.client.TTL(processedKey).Val() gotTTL := r.client.TTL(processedKey).Val()
if gotTTL > statsTTL { if gotTTL > statsTTL {
t.Errorf("TTL %q = %v, want less than or equal to %v", processedKey, gotTTL, statsTTL) t.Errorf("%s; TTL %q = %v, want less than or equal to %v", tc.desc, processedKey, gotTTL, statsTTL)
} }
if len(tc.target.UniqueKey) > 0 && r.client.Exists(tc.target.UniqueKey).Val() != 0 { if len(tc.target.UniqueKey) > 0 && r.client.Exists(tc.target.UniqueKey).Val() != 0 {
t.Errorf("Uniqueness lock %q still exists", tc.target.UniqueKey) t.Errorf("%s; Uniqueness lock %q still exists", tc.desc, tc.target.UniqueKey)
} }
} }
} }
@ -597,7 +601,7 @@ func TestRequeue(t *testing.T) {
inProgress: map[string][]*base.TaskMessage{ inProgress: map[string][]*base.TaskMessage{
"default": {t1, t2}, "default": {t1, t2},
}, },
deadlines: []base.Z{ deadlines: map[string][]base.Z{
"default": { "default": {
{Message: t1, Score: t1Deadline}, {Message: t1, Score: t1Deadline},
{Message: t2, Score: t2Deadline}, {Message: t2, Score: t2Deadline},
@ -610,8 +614,8 @@ func TestRequeue(t *testing.T) {
wantInProgress: map[string][]*base.TaskMessage{ wantInProgress: map[string][]*base.TaskMessage{
"default": {t2}, "default": {t2},
}, },
wantDeadlines: []base.Z{ wantDeadlines: map[string][]base.Z{
"defult": { "default": {
{Message: t2, Score: t2Deadline}, {Message: t2, Score: t2Deadline},
}, },
}, },
@ -649,8 +653,8 @@ func TestRequeue(t *testing.T) {
"critical": {t3}, "critical": {t3},
}, },
deadlines: map[string][]base.Z{ deadlines: map[string][]base.Z{
"defualt": {{Message: t2, Score: t2Deadline}}, "default": {{Message: t2, Score: t2Deadline}},
"critial": {{Message: t3, Score: t3Deadline}}, "critical": {{Message: t3, Score: t3Deadline}},
}, },
target: t3, target: t3,
wantEnqueued: map[string][]*base.TaskMessage{ wantEnqueued: map[string][]*base.TaskMessage{
@ -670,7 +674,7 @@ func TestRequeue(t *testing.T) {
for _, tc := range tests { for _, tc := range tests {
h.FlushDB(t, r.client) // clean up db before each test case h.FlushDB(t, r.client) // clean up db before each test case
h.SeedAllEnqueuedQueues(t, r.client, msgs, tc.enqueued) h.SeedAllEnqueuedQueues(t, r.client, tc.enqueued)
h.SeedAllInProgressQueues(t, r.client, tc.inProgress) h.SeedAllInProgressQueues(t, r.client, tc.inProgress)
h.SeedAllDeadlines(t, r.client, tc.deadlines) h.SeedAllDeadlines(t, r.client, tc.deadlines)
@ -694,7 +698,7 @@ func TestRequeue(t *testing.T) {
} }
for qname, want := range tc.wantDeadlines { for qname, want := range tc.wantDeadlines {
gotDeadlines := h.GetDeadlinesEntries(t, r.client, qname) gotDeadlines := h.GetDeadlinesEntries(t, r.client, qname)
if diff := cmp.Diff(wnt, gotDeadlines, h.SortZSetEntryOpt); diff != "" { if diff := cmp.Diff(want, gotDeadlines, h.SortZSetEntryOpt); diff != "" {
t.Errorf("mismatch found in %q: (-want, +got):\n%s", base.DeadlinesKey(qname), diff) t.Errorf("mismatch found in %q: (-want, +got):\n%s", base.DeadlinesKey(qname), diff)
} }
} }
@ -1028,7 +1032,7 @@ func TestKill(t *testing.T) {
wantInProgress: map[string][]*base.TaskMessage{ wantInProgress: map[string][]*base.TaskMessage{
"default": {t2}, "default": {t2},
}, },
wantDeadlines: map[string]base.Z{ wantDeadlines: map[string][]base.Z{
"default": {{Message: t2, Score: t2Deadline}}, "default": {{Message: t2, Score: t2Deadline}},
}, },
wantDead: map[string][]base.Z{ wantDead: map[string][]base.Z{
@ -1090,7 +1094,7 @@ func TestKill(t *testing.T) {
"default": {t1}, "default": {t1},
"custom": {}, "custom": {},
}, },
wantDeadlines: map[string]base.Z{ wantDeadlines: map[string][]base.Z{
"default": {{Message: t1, Score: t1Deadline}}, "default": {{Message: t1, Score: t1Deadline}},
"custom": {}, "custom": {},
}, },
@ -1354,7 +1358,7 @@ func TestListDeadlineExceeded(t *testing.T) {
}, },
qnames: []string{"default", "critical"}, qnames: []string{"default", "critical"},
t: time.Now(), t: time.Now(),
want: []*base.TaskMessage{t1, t2}, want: []*base.TaskMessage{t1, t3},
}, },
{ {
desc: "with empty in-progress queue", desc: "with empty in-progress queue",