mirror of
https://github.com/hibiken/asynq.git
synced 2024-11-10 11:31:58 +08:00
(fix): Clear unique lock when task is deleted via Inspector
This commit is contained in:
parent
6cce31a134
commit
68dd6d9a9d
@ -287,10 +287,17 @@ func seedRedisList(tb testing.TB, c redis.UniversalClient, key string,
|
||||
"state": state.String(),
|
||||
"timeout": msg.Timeout,
|
||||
"deadline": msg.Deadline,
|
||||
"unique_key": msg.UniqueKey,
|
||||
}
|
||||
if err := c.HSet(key, data).Err(); err != nil {
|
||||
tb.Fatal(err)
|
||||
}
|
||||
if len(msg.UniqueKey) > 0 {
|
||||
err := c.SetNX(msg.UniqueKey, msg.ID.String(), 1*time.Minute).Err()
|
||||
if err != nil {
|
||||
tb.Fatalf("Failed to set unique lock in redis: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -310,10 +317,17 @@ func seedRedisZSet(tb testing.TB, c redis.UniversalClient, key string,
|
||||
"state": state.String(),
|
||||
"timeout": msg.Timeout,
|
||||
"deadline": msg.Deadline,
|
||||
"unique_key": msg.UniqueKey,
|
||||
}
|
||||
if err := c.HSet(key, data).Err(); err != nil {
|
||||
tb.Fatal(err)
|
||||
}
|
||||
if len(msg.UniqueKey) > 0 {
|
||||
err := c.SetNX(msg.UniqueKey, msg.ID.String(), 1*time.Minute).Err()
|
||||
if err != nil {
|
||||
tb.Fatalf("Failed to set unique lock in redis: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -170,6 +170,7 @@ func SchedulerHistoryKey(entryID string) string {
|
||||
}
|
||||
|
||||
// UniqueKey returns a redis key with the given type, payload, and queue name.
|
||||
// FIXME: We probably need to generate a hash of payload to make this key unique
|
||||
func UniqueKey(qname, tasktype string, payload []byte) string {
|
||||
return fmt.Sprintf("%sunique:%s:%s", QueueKeyPrefix(qname), tasktype, string(payload))
|
||||
}
|
||||
|
@ -509,7 +509,7 @@ func (r *RDB) ListArchived(qname string, pgn Pagination) ([]base.Z, error) {
|
||||
if !r.client.SIsMember(base.AllQueues, qname).Val() {
|
||||
return nil, errors.E(op, errors.NotFound, &errors.QueueNotFoundError{Queue: qname})
|
||||
}
|
||||
res, err := r.listZSetEntries(base.ArchivedKey(qname), qname, pgn)
|
||||
zs, err := r.listZSetEntries(base.ArchivedKey(qname), qname, pgn)
|
||||
if err != nil {
|
||||
return nil, errors.E(op, errors.CanonicalCode(err), err)
|
||||
}
|
||||
@ -719,7 +719,7 @@ func (r *RDB) runAll(zset, qname string) (int64, error) {
|
||||
}
|
||||
res, err := runAllCmd.Run(r.client, keys, argv...).Result()
|
||||
if err != nil {
|
||||
return err
|
||||
return 0, err
|
||||
}
|
||||
n, ok := res.(int64)
|
||||
if !ok {
|
||||
@ -994,6 +994,10 @@ else
|
||||
return redis.error_reply("task is not found in zset: " .. tostring(state))
|
||||
end
|
||||
end
|
||||
local unique_key = redis.call("HGET", KEYS[1], "unique_key")
|
||||
if unique_key ~= nil and unique_key ~= "" and redis.call("GET", unique_key) == ARGV[1] then
|
||||
redis.call("DEL", unique_key)
|
||||
end
|
||||
return redis.call("DEL", KEYS[1])
|
||||
`)
|
||||
|
||||
@ -1089,7 +1093,12 @@ func (r *RDB) DeleteAllScheduledTasks(qname string) (int64, error) {
|
||||
var deleteAllCmd = redis.NewScript(`
|
||||
local ids = redis.call("ZRANGE", KEYS[1], 0, -1)
|
||||
for _, id in ipairs(ids) do
|
||||
redis.call("DEL", ARGV[1] .. id)
|
||||
local task_key = ARGV[1] .. id
|
||||
local unique_key = redis.call("HGET", task_key, "unique_key")
|
||||
if unique_key ~= nil and unique_key ~= "" and redis.call("GET", unique_key) == id then
|
||||
redis.call("DEL", unique_key)
|
||||
end
|
||||
redis.call("DEL", task_key)
|
||||
end
|
||||
redis.call("DEL", KEYS[1])
|
||||
return table.getn(ids)`)
|
||||
|
@ -5,6 +5,7 @@
|
||||
package rdb
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
@ -3117,6 +3118,62 @@ func TestDeletePendingTask(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeleteTaskWithUniqueLock(t *testing.T) {
|
||||
r := setup(t)
|
||||
defer r.Close()
|
||||
m1 := &base.TaskMessage{
|
||||
ID: uuid.New(),
|
||||
Type: "email",
|
||||
Payload: h.JSON(map[string]interface{}{"user_id": json.Number("123")}),
|
||||
Queue: base.DefaultQueueName,
|
||||
UniqueKey: base.UniqueKey(base.DefaultQueueName, "email", h.JSON(map[string]interface{}{"user_id": 123})),
|
||||
}
|
||||
t1 := time.Now().Add(3 * time.Hour)
|
||||
|
||||
tests := []struct {
|
||||
scheduled map[string][]base.Z
|
||||
qname string
|
||||
id uuid.UUID
|
||||
uniqueKey string
|
||||
wantScheduled map[string][]*base.TaskMessage
|
||||
}{
|
||||
{
|
||||
scheduled: map[string][]base.Z{
|
||||
"default": {
|
||||
{Message: m1, Score: t1.Unix()},
|
||||
},
|
||||
},
|
||||
qname: "default",
|
||||
id: m1.ID,
|
||||
uniqueKey: m1.UniqueKey,
|
||||
wantScheduled: map[string][]*base.TaskMessage{
|
||||
"default": {},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
h.FlushDB(t, r.client) // clean up db before each test case
|
||||
h.SeedAllScheduledQueues(t, r.client, tc.scheduled)
|
||||
|
||||
if got := r.DeleteTask(tc.qname, tc.id); got != nil {
|
||||
t.Errorf("r.DeleteTask(%q, %v) returned error: %v", tc.qname, tc.id, got)
|
||||
continue
|
||||
}
|
||||
|
||||
for qname, want := range tc.wantScheduled {
|
||||
gotScheduled := h.GetScheduledMessages(t, r.client, qname)
|
||||
if diff := cmp.Diff(want, gotScheduled, h.SortMsgOpt); diff != "" {
|
||||
t.Errorf("mismatch found in %q; (-want, +got)\n%s", base.ScheduledKey(qname), diff)
|
||||
}
|
||||
}
|
||||
|
||||
if r.client.Exists(tc.uniqueKey).Val() != 0 {
|
||||
t.Errorf("Uniqueness lock %q still exists", tc.uniqueKey)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeleteTaskError(t *testing.T) {
|
||||
r := setup(t)
|
||||
defer r.Close()
|
||||
@ -3305,6 +3362,7 @@ func TestDeleteAllArchivedTasksWithUniqueKey(t *testing.T) {
|
||||
archived map[string][]base.Z
|
||||
qname string
|
||||
want int64
|
||||
uniqueKeys []string // list of unique keys that should be cleared
|
||||
wantArchived map[string][]*base.TaskMessage
|
||||
}{
|
||||
{
|
||||
@ -3317,6 +3375,7 @@ func TestDeleteAllArchivedTasksWithUniqueKey(t *testing.T) {
|
||||
},
|
||||
qname: "default",
|
||||
want: 3,
|
||||
uniqueKeys: []string{m1.UniqueKey, m2.UniqueKey},
|
||||
wantArchived: map[string][]*base.TaskMessage{
|
||||
"default": {},
|
||||
},
|
||||
@ -3326,18 +3385,6 @@ func TestDeleteAllArchivedTasksWithUniqueKey(t *testing.T) {
|
||||
for _, tc := range tests {
|
||||
h.FlushDB(t, r.client) // clean up db before each test case
|
||||
h.SeedAllArchivedQueues(t, r.client, tc.archived)
|
||||
var uniqueKeys []string // list of unique keys set in redis
|
||||
for _, zs := range tc.archived {
|
||||
for _, z := range zs {
|
||||
if len(z.Message.UniqueKey) > 0 {
|
||||
err := r.client.SetNX(z.Message.UniqueKey, z.Message.ID.String(), time.Minute).Err()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to set unique lock in redis: %v", err)
|
||||
}
|
||||
uniqueKeys = append(uniqueKeys, z.Message.UniqueKey)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
got, err := r.DeleteAllArchivedTasks(tc.qname)
|
||||
if err != nil {
|
||||
@ -3353,7 +3400,7 @@ func TestDeleteAllArchivedTasksWithUniqueKey(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
for _, uniqueKey := range uniqueKeys {
|
||||
for _, uniqueKey := range tc.uniqueKeys {
|
||||
if r.client.Exists(uniqueKey).Val() != 0 {
|
||||
t.Errorf("Uniqueness lock %q still exists", uniqueKey)
|
||||
}
|
||||
|
@ -114,7 +114,8 @@ redis.call("HSET", KEYS[2],
|
||||
"msg", ARGV[3],
|
||||
"state", "pending",
|
||||
"timeout", ARGV[4],
|
||||
"deadline", ARGV[5])
|
||||
"deadline", ARGV[5],
|
||||
"unique_key", KEYS[1])
|
||||
redis.call("LPUSH", KEYS[3], ARGV[1])
|
||||
return 1
|
||||
`)
|
||||
@ -407,7 +408,8 @@ redis.call("HSET", KEYS[2],
|
||||
"msg", ARGV[4],
|
||||
"state", "scheduled",
|
||||
"timeout", ARGV[5],
|
||||
"deadline", ARGV[6])
|
||||
"deadline", ARGV[6],
|
||||
"unique_key", KEYS[1])
|
||||
redis.call("ZADD", KEYS[3], ARGV[3], ARGV[1])
|
||||
return 1
|
||||
`)
|
||||
|
@ -191,7 +191,11 @@ func TestEnqueueUnique(t *testing.T) {
|
||||
}
|
||||
deadline := r.client.HGet(taskKey, "deadline").Val() // "deadline" field
|
||||
if want := strconv.Itoa(int(tc.msg.Deadline)); deadline != want {
|
||||
t.Errorf("deadline field under task-ke is set to %v, want %v", deadline, want)
|
||||
t.Errorf("deadline field under task-key is set to %v, want %v", deadline, want)
|
||||
}
|
||||
uniqueKey := r.client.HGet(taskKey, "unique_key").Val() // "unique_key" field
|
||||
if uniqueKey != tc.msg.UniqueKey {
|
||||
t.Errorf("uniqueue_key field under task key is set to %q, want %q", uniqueKey, tc.msg.UniqueKey)
|
||||
}
|
||||
|
||||
// Check queue is in the AllQueues set.
|
||||
@ -1008,7 +1012,11 @@ func TestScheduleUnique(t *testing.T) {
|
||||
}
|
||||
deadline := r.client.HGet(taskKey, "deadline").Val() // "deadline" field
|
||||
if want := strconv.Itoa(int(tc.msg.Deadline)); deadline != want {
|
||||
t.Errorf("deadline field under task-ke is set to %v, want %v", deadline, want)
|
||||
t.Errorf("deadline field under task-key is set to %v, want %v", deadline, want)
|
||||
}
|
||||
uniqueKey := r.client.HGet(taskKey, "unique_key").Val() // "unique_key" field
|
||||
if uniqueKey != tc.msg.UniqueKey {
|
||||
t.Errorf("uniqueue_key field under task key is set to %q, want %q", uniqueKey, tc.msg.UniqueKey)
|
||||
}
|
||||
|
||||
// Check queue is in the AllQueues set.
|
||||
|
Loading…
Reference in New Issue
Block a user