mirror of
https://github.com/hibiken/asynq.git
synced 2024-12-27 00:02:19 +08:00
Update RDB.ForwardIfReady to forward to group if groupKey is specified
This commit is contained in:
parent
f17c157b0f
commit
61854ea1dc
@ -205,9 +205,14 @@ func UniqueKey(qname, tasktype string, payload []byte) string {
|
|||||||
return fmt.Sprintf("%sunique:%s:%s", QueueKeyPrefix(qname), tasktype, hex.EncodeToString(checksum[:]))
|
return fmt.Sprintf("%sunique:%s:%s", QueueKeyPrefix(qname), tasktype, hex.EncodeToString(checksum[:]))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GroupKeyPrefix returns a prefix for group key.
|
||||||
|
func GroupKeyPrefix(qname string) string {
|
||||||
|
return fmt.Sprintf("%sg:", QueueKeyPrefix(qname))
|
||||||
|
}
|
||||||
|
|
||||||
// GroupKey returns a redis key used to group tasks belong in the same group.
|
// GroupKey returns a redis key used to group tasks belong in the same group.
|
||||||
func GroupKey(qname, gkey string) string {
|
func GroupKey(qname, gkey string) string {
|
||||||
return fmt.Sprintf("%sg:%s", QueueKeyPrefix(qname), gkey)
|
return fmt.Sprintf("%s%s", GroupKeyPrefix(qname), gkey)
|
||||||
}
|
}
|
||||||
|
|
||||||
// AllGroups return a redis key used to store all group keys used in a given queue.
|
// AllGroups return a redis key used to store all group keys used in a given queue.
|
||||||
|
@ -515,7 +515,8 @@ if redis.call("EXISTS", KEYS[1]) == 1 then
|
|||||||
end
|
end
|
||||||
redis.call("HSET", KEYS[1],
|
redis.call("HSET", KEYS[1],
|
||||||
"msg", ARGV[1],
|
"msg", ARGV[1],
|
||||||
"state", "aggregating")
|
"state", "aggregating",
|
||||||
|
"group", ARGV[4])
|
||||||
redis.call("ZADD", KEYS[2], ARGV[3], ARGV[2])
|
redis.call("ZADD", KEYS[2], ARGV[3], ARGV[2])
|
||||||
redis.call("SADD", KEYS[3], ARGV[4])
|
redis.call("SADD", KEYS[3], ARGV[4])
|
||||||
return 1
|
return 1
|
||||||
@ -576,7 +577,8 @@ if redis.call("EXISTS", KEYS[1]) == 1 then
|
|||||||
end
|
end
|
||||||
redis.call("HSET", KEYS[1],
|
redis.call("HSET", KEYS[1],
|
||||||
"msg", ARGV[1],
|
"msg", ARGV[1],
|
||||||
"state", "aggregating")
|
"state", "aggregating",
|
||||||
|
"group", ARGV[4])
|
||||||
redis.call("ZADD", KEYS[2], ARGV[3], ARGV[2])
|
redis.call("ZADD", KEYS[2], ARGV[3], ARGV[2])
|
||||||
redis.call("SADD", KEYS[3], ARGV[4])
|
redis.call("SADD", KEYS[3], ARGV[4])
|
||||||
return 1
|
return 1
|
||||||
@ -916,24 +918,41 @@ func (r *RDB) ForwardIfReady(qnames ...string) error {
|
|||||||
// ARGV[1] -> current unix time in seconds
|
// ARGV[1] -> current unix time in seconds
|
||||||
// ARGV[2] -> task key prefix
|
// ARGV[2] -> task key prefix
|
||||||
// ARGV[3] -> current unix time in nsec
|
// ARGV[3] -> current unix time in nsec
|
||||||
|
// ARGV[4] -> group key prefix
|
||||||
// Note: Script moves tasks up to 100 at a time to keep the runtime of script short.
|
// Note: Script moves tasks up to 100 at a time to keep the runtime of script short.
|
||||||
var forwardCmd = redis.NewScript(`
|
var forwardCmd = redis.NewScript(`
|
||||||
local ids = redis.call("ZRANGEBYSCORE", KEYS[1], "-inf", ARGV[1], "LIMIT", 0, 100)
|
local ids = redis.call("ZRANGEBYSCORE", KEYS[1], "-inf", ARGV[1], "LIMIT", 0, 100)
|
||||||
for _, id in ipairs(ids) do
|
for _, id in ipairs(ids) do
|
||||||
|
local taskKey = ARGV[2] .. id
|
||||||
|
local group = redis.call("HGET", taskKey, "group")
|
||||||
|
if group then
|
||||||
|
redis.call("ZADD", ARGV[4] .. group, ARGV[1], id)
|
||||||
|
redis.call("ZREM", KEYS[1], id)
|
||||||
|
redis.call("HSET", taskKey,
|
||||||
|
"state", "aggregating")
|
||||||
|
else
|
||||||
redis.call("LPUSH", KEYS[2], id)
|
redis.call("LPUSH", KEYS[2], id)
|
||||||
redis.call("ZREM", KEYS[1], id)
|
redis.call("ZREM", KEYS[1], id)
|
||||||
redis.call("HSET", ARGV[2] .. id,
|
redis.call("HSET", taskKey,
|
||||||
"state", "pending",
|
"state", "pending",
|
||||||
"pending_since", ARGV[3])
|
"pending_since", ARGV[3])
|
||||||
|
end
|
||||||
end
|
end
|
||||||
return table.getn(ids)`)
|
return table.getn(ids)`)
|
||||||
|
|
||||||
// forward moves tasks with a score less than the current unix time
|
// forward moves tasks with a score less than the current unix time from the delayed (i.e. scheduled | retry) zset
|
||||||
// from the src zset to the dst list. It returns the number of tasks moved.
|
// to the pending list or group set.
|
||||||
func (r *RDB) forward(src, dst, taskKeyPrefix string) (int, error) {
|
// It returns the number of tasks moved.
|
||||||
|
func (r *RDB) forward(delayedKey, pendingKey, taskKeyPrefix, groupKeyPrefix string) (int, error) {
|
||||||
now := r.clock.Now()
|
now := r.clock.Now()
|
||||||
res, err := forwardCmd.Run(context.Background(), r.client,
|
keys := []string{delayedKey, pendingKey}
|
||||||
[]string{src, dst}, now.Unix(), taskKeyPrefix, now.UnixNano()).Result()
|
argv := []interface{}{
|
||||||
|
now.Unix(),
|
||||||
|
taskKeyPrefix,
|
||||||
|
now.UnixNano(),
|
||||||
|
groupKeyPrefix,
|
||||||
|
}
|
||||||
|
res, err := forwardCmd.Run(context.Background(), r.client, keys, argv...).Result()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, errors.E(errors.Internal, fmt.Sprintf("redis eval error: %v", err))
|
return 0, errors.E(errors.Internal, fmt.Sprintf("redis eval error: %v", err))
|
||||||
}
|
}
|
||||||
@ -945,15 +964,16 @@ func (r *RDB) forward(src, dst, taskKeyPrefix string) (int, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// forwardAll checks for tasks in scheduled/retry state that are ready to be run, and updates
|
// forwardAll checks for tasks in scheduled/retry state that are ready to be run, and updates
|
||||||
// their state to "pending".
|
// their state to "pending" or "aggregating".
|
||||||
func (r *RDB) forwardAll(qname string) (err error) {
|
func (r *RDB) forwardAll(qname string) (err error) {
|
||||||
sources := []string{base.ScheduledKey(qname), base.RetryKey(qname)}
|
delayedKeys := []string{base.ScheduledKey(qname), base.RetryKey(qname)}
|
||||||
dst := base.PendingKey(qname)
|
pendingKey := base.PendingKey(qname)
|
||||||
taskKeyPrefix := base.TaskKeyPrefix(qname)
|
taskKeyPrefix := base.TaskKeyPrefix(qname)
|
||||||
for _, src := range sources {
|
groupKeyPrefix := base.GroupKeyPrefix(qname)
|
||||||
|
for _, delayedKey := range delayedKeys {
|
||||||
n := 1
|
n := 1
|
||||||
for n != 0 {
|
for n != 0 {
|
||||||
n, err = r.forward(src, dst, taskKeyPrefix)
|
n, err = r.forward(delayedKey, pendingKey, taskKeyPrefix, groupKeyPrefix)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -1222,6 +1222,10 @@ func TestAddToGroup(t *testing.T) {
|
|||||||
if want := "aggregating"; state != want {
|
if want := "aggregating"; state != want {
|
||||||
t.Errorf("state field under task-key is set to %q, want %q", state, want)
|
t.Errorf("state field under task-key is set to %q, want %q", state, want)
|
||||||
}
|
}
|
||||||
|
group := r.client.HGet(ctx, taskKey, "group").Val() // "group" field
|
||||||
|
if want := tc.groupKey; group != want {
|
||||||
|
t.Errorf("group field under task-key is set to %q, want %q", group, want)
|
||||||
|
}
|
||||||
|
|
||||||
// Check queue is in the AllQueues set.
|
// Check queue is in the AllQueues set.
|
||||||
if !r.client.SIsMember(context.Background(), base.AllQueues, tc.msg.Queue).Val() {
|
if !r.client.SIsMember(context.Background(), base.AllQueues, tc.msg.Queue).Val() {
|
||||||
@ -1329,6 +1333,10 @@ func TestAddToGroupUnique(t *testing.T) {
|
|||||||
if want := "aggregating"; state != want {
|
if want := "aggregating"; state != want {
|
||||||
t.Errorf("state field under task-key is set to %q, want %q", state, want)
|
t.Errorf("state field under task-key is set to %q, want %q", state, want)
|
||||||
}
|
}
|
||||||
|
group := r.client.HGet(ctx, taskKey, "group").Val() // "group" field
|
||||||
|
if want := tc.groupKey; group != want {
|
||||||
|
t.Errorf("group field under task-key is set to %q, want %q", group, want)
|
||||||
|
}
|
||||||
|
|
||||||
// Check queue is in the AllQueues set.
|
// Check queue is in the AllQueues set.
|
||||||
if !r.client.SIsMember(context.Background(), base.AllQueues, tc.msg.Queue).Val() {
|
if !r.client.SIsMember(context.Background(), base.AllQueues, tc.msg.Queue).Val() {
|
||||||
@ -2166,6 +2174,149 @@ func TestArchive(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestForwardIfReadyWithGroup(t *testing.T) {
|
||||||
|
r := setup(t)
|
||||||
|
defer r.Close()
|
||||||
|
|
||||||
|
now := time.Now()
|
||||||
|
r.SetClock(timeutil.NewSimulatedClock(now))
|
||||||
|
ctx := context.Background()
|
||||||
|
t1 := h.NewTaskMessage("send_email", nil)
|
||||||
|
t2 := h.NewTaskMessage("generate_csv", nil)
|
||||||
|
t3 := h.NewTaskMessage("gen_thumbnail", nil)
|
||||||
|
t4 := h.NewTaskMessageWithQueue("important_task", nil, "critical")
|
||||||
|
t5 := h.NewTaskMessageWithQueue("minor_task", nil, "low")
|
||||||
|
secondAgo := now.Add(-time.Second)
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
scheduled map[string][]base.Z
|
||||||
|
retry map[string][]base.Z
|
||||||
|
qnames []string
|
||||||
|
wantPending map[string][]*base.TaskMessage
|
||||||
|
wantScheduled map[string][]*base.TaskMessage
|
||||||
|
wantRetry map[string][]*base.TaskMessage
|
||||||
|
wantGroup map[string]map[string][]base.Z
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
scheduled: map[string][]base.Z{
|
||||||
|
"default": {
|
||||||
|
{Message: t1, Score: secondAgo.Unix()},
|
||||||
|
{Message: t2, Score: secondAgo.Unix()},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
retry: map[string][]base.Z{
|
||||||
|
"default": {{Message: t3, Score: secondAgo.Unix()}},
|
||||||
|
},
|
||||||
|
qnames: []string{"default"},
|
||||||
|
wantPending: map[string][]*base.TaskMessage{
|
||||||
|
"default": {t3},
|
||||||
|
},
|
||||||
|
wantScheduled: map[string][]*base.TaskMessage{
|
||||||
|
"default": {},
|
||||||
|
},
|
||||||
|
wantRetry: map[string][]*base.TaskMessage{
|
||||||
|
"default": {},
|
||||||
|
},
|
||||||
|
wantGroup: map[string]map[string][]base.Z{
|
||||||
|
"default": {
|
||||||
|
"notifications": {{Message: t1, Score: now.Unix()}},
|
||||||
|
"csv": {{Message: t2, Score: now.Unix()}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
scheduled: map[string][]base.Z{
|
||||||
|
"default": {{Message: t1, Score: secondAgo.Unix()}},
|
||||||
|
"critical": {{Message: t4, Score: secondAgo.Unix()}},
|
||||||
|
"low": {},
|
||||||
|
},
|
||||||
|
retry: map[string][]base.Z{
|
||||||
|
"default": {},
|
||||||
|
"critical": {},
|
||||||
|
"low": {{Message: t5, Score: secondAgo.Unix()}},
|
||||||
|
},
|
||||||
|
qnames: []string{"default", "critical", "low"},
|
||||||
|
wantPending: map[string][]*base.TaskMessage{
|
||||||
|
"default": {},
|
||||||
|
"critical": {},
|
||||||
|
"low": {},
|
||||||
|
},
|
||||||
|
wantScheduled: map[string][]*base.TaskMessage{
|
||||||
|
"default": {},
|
||||||
|
"critical": {},
|
||||||
|
"low": {},
|
||||||
|
},
|
||||||
|
wantRetry: map[string][]*base.TaskMessage{
|
||||||
|
"default": {},
|
||||||
|
"critical": {},
|
||||||
|
"low": {},
|
||||||
|
},
|
||||||
|
wantGroup: map[string]map[string][]base.Z{
|
||||||
|
"default": {
|
||||||
|
"notifications": {{Message: t1, Score: now.Unix()}},
|
||||||
|
},
|
||||||
|
"critical": {
|
||||||
|
"critical_task_group": {{Message: t4, Score: now.Unix()}},
|
||||||
|
},
|
||||||
|
"low": {
|
||||||
|
"minor_task_group": {{Message: t5, Score: now.Unix()}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
h.FlushDB(t, r.client) // clean up db before each test case
|
||||||
|
h.SeedAllScheduledQueues(t, r.client, tc.scheduled)
|
||||||
|
h.SeedAllRetryQueues(t, r.client, tc.retry)
|
||||||
|
// Set "group" field under the task key.
|
||||||
|
r.client.HSet(ctx, base.TaskKey(t1.Queue, t1.ID), "group", "notifications")
|
||||||
|
r.client.HSet(ctx, base.TaskKey(t2.Queue, t2.ID), "group", "csv")
|
||||||
|
r.client.HSet(ctx, base.TaskKey(t4.Queue, t4.ID), "group", "critical_task_group")
|
||||||
|
r.client.HSet(ctx, base.TaskKey(t5.Queue, t5.ID), "group", "minor_task_group")
|
||||||
|
|
||||||
|
err := r.ForwardIfReady(tc.qnames...)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("(*RDB).ForwardIfReady(%v) = %v, want nil", tc.qnames, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
for qname, want := range tc.wantPending {
|
||||||
|
gotPending := h.GetPendingMessages(t, r.client, qname)
|
||||||
|
if diff := cmp.Diff(want, gotPending, h.SortMsgOpt); diff != "" {
|
||||||
|
t.Errorf("mismatch found in %q; (-want, +got)\n%s", base.PendingKey(qname), diff)
|
||||||
|
}
|
||||||
|
// Make sure "pending_since" field is set
|
||||||
|
for _, msg := range gotPending {
|
||||||
|
pendingSince := r.client.HGet(ctx, base.TaskKey(msg.Queue, msg.ID), "pending_since").Val()
|
||||||
|
if want := strconv.Itoa(int(now.UnixNano())); pendingSince != want {
|
||||||
|
t.Error("pending_since field is not set for newly pending message")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for qname, want := range tc.wantScheduled {
|
||||||
|
gotScheduled := h.GetScheduledMessages(t, r.client, qname)
|
||||||
|
if diff := cmp.Diff(want, gotScheduled, h.SortMsgOpt); diff != "" {
|
||||||
|
t.Errorf("mismatch found in %q; (-want, +got)\n%s", base.ScheduledKey(qname), diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for qname, want := range tc.wantRetry {
|
||||||
|
gotRetry := h.GetRetryMessages(t, r.client, qname)
|
||||||
|
if diff := cmp.Diff(want, gotRetry, h.SortMsgOpt); diff != "" {
|
||||||
|
t.Errorf("mismatch found in %q; (-want, +got)\n%s", base.RetryKey(qname), diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for qname, groups := range tc.wantGroup {
|
||||||
|
for groupKey, wantGroup := range groups {
|
||||||
|
gotGroup := h.GetGroupEntries(t, r.client, qname, groupKey)
|
||||||
|
if diff := cmp.Diff(wantGroup, gotGroup, h.SortZSetEntryOpt); diff != "" {
|
||||||
|
t.Errorf("mismatch found in %q; (-want, +got)\n%s", base.GroupKey(qname, groupKey), diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestForwardIfReady(t *testing.T) {
|
func TestForwardIfReady(t *testing.T) {
|
||||||
r := setup(t)
|
r := setup(t)
|
||||||
defer r.Close()
|
defer r.Close()
|
||||||
@ -2288,7 +2439,7 @@ func TestForwardIfReady(t *testing.T) {
|
|||||||
|
|
||||||
err := r.ForwardIfReady(tc.qnames...)
|
err := r.ForwardIfReady(tc.qnames...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("(*RDB).CheckScheduled(%v) = %v, want nil", tc.qnames, err)
|
t.Errorf("(*RDB).ForwardIfReady(%v) = %v, want nil", tc.qnames, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user