2
0
mirror of https://github.com/hibiken/asynq.git synced 2025-04-22 16:50:18 +08:00

use new context at call point instead of global context variable

This commit is contained in:
strobus 2021-08-19 15:29:35 -04:00
parent 0f32aca3fc
commit 626e626058
9 changed files with 190 additions and 194 deletions

View File

@ -16,8 +16,6 @@ import (
"github.com/hibiken/asynq/internal/base" "github.com/hibiken/asynq/internal/base"
) )
var ctx = context.Background()
func TestClientEnqueueWithProcessAtOption(t *testing.T) { func TestClientEnqueueWithProcessAtOption(t *testing.T) {
r := setup(t) r := setup(t)
client := NewClient(getRedisConnOpt(t)) client := NewClient(getRedisConnOpt(t))
@ -754,7 +752,7 @@ func TestClientEnqueueUnique(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
gotTTL := r.TTL(ctx, base.UniqueKey(base.DefaultQueueName, tc.task.Type(), tc.task.Payload())).Val() gotTTL := r.TTL(context.Background(), base.UniqueKey(base.DefaultQueueName, tc.task.Type(), tc.task.Payload())).Val()
if !cmp.Equal(tc.ttl.Seconds(), gotTTL.Seconds(), cmpopts.EquateApprox(0, 1)) { if !cmp.Equal(tc.ttl.Seconds(), gotTTL.Seconds(), cmpopts.EquateApprox(0, 1)) {
t.Errorf("TTL = %v, want %v", gotTTL, tc.ttl) t.Errorf("TTL = %v, want %v", gotTTL, tc.ttl)
continue continue
@ -799,7 +797,7 @@ func TestClientEnqueueUniqueWithProcessInOption(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
gotTTL := r.TTL(ctx, base.UniqueKey(base.DefaultQueueName, tc.task.Type(), tc.task.Payload())).Val() gotTTL := r.TTL(context.Background(), base.UniqueKey(base.DefaultQueueName, tc.task.Type(), tc.task.Payload())).Val()
wantTTL := time.Duration(tc.ttl.Seconds()+tc.d.Seconds()) * time.Second wantTTL := time.Duration(tc.ttl.Seconds()+tc.d.Seconds()) * time.Second
if !cmp.Equal(wantTTL.Seconds(), gotTTL.Seconds(), cmpopts.EquateApprox(0, 1)) { if !cmp.Equal(wantTTL.Seconds(), gotTTL.Seconds(), cmpopts.EquateApprox(0, 1)) {
t.Errorf("TTL = %v, want %v", gotTTL, wantTTL) t.Errorf("TTL = %v, want %v", gotTTL, wantTTL)
@ -845,7 +843,7 @@ func TestClientEnqueueUniqueWithProcessAtOption(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
gotTTL := r.TTL(ctx, base.UniqueKey(base.DefaultQueueName, tc.task.Type(), tc.task.Payload())).Val() gotTTL := r.TTL(context.Background(), base.UniqueKey(base.DefaultQueueName, tc.task.Type(), tc.task.Payload())).Val()
wantTTL := tc.at.Add(tc.ttl).Sub(time.Now()) wantTTL := tc.at.Add(tc.ttl).Sub(time.Now())
if !cmp.Equal(wantTTL.Seconds(), gotTTL.Seconds(), cmpopts.EquateApprox(0, 1)) { if !cmp.Equal(wantTTL.Seconds(), gotTTL.Seconds(), cmpopts.EquateApprox(0, 1)) {
t.Errorf("TTL = %v, want %v", gotTTL, wantTTL) t.Errorf("TTL = %v, want %v", gotTTL, wantTTL)

View File

@ -5,6 +5,7 @@
package asynq package asynq
import ( import (
"context"
"errors" "errors"
"fmt" "fmt"
"math" "math"
@ -37,7 +38,7 @@ func TestInspectorQueues(t *testing.T) {
for _, tc := range tests { for _, tc := range tests {
h.FlushDB(t, r) h.FlushDB(t, r)
for _, qname := range tc.queues { for _, qname := range tc.queues {
if err := r.SAdd(ctx, base.AllQueues, qname).Err(); err != nil { if err := r.SAdd(context.Background(), base.AllQueues, qname).Err(); err != nil {
t.Fatalf("could not initialize all queue set: %v", err) t.Fatalf("could not initialize all queue set: %v", err)
} }
} }
@ -136,7 +137,7 @@ func TestInspectorDeleteQueue(t *testing.T) {
tc.qname, tc.force, err) tc.qname, tc.force, err)
continue continue
} }
if r.SIsMember(ctx, base.AllQueues, tc.qname).Val() { if r.SIsMember(context.Background(), base.AllQueues, tc.qname).Val() {
t.Errorf("%q is a member of %q", tc.qname, base.AllQueues) t.Errorf("%q is a member of %q", tc.qname, base.AllQueues)
} }
} }
@ -345,11 +346,11 @@ func TestInspectorGetQueueInfo(t *testing.T) {
h.SeedAllArchivedQueues(t, r, tc.archived) h.SeedAllArchivedQueues(t, r, tc.archived)
for qname, n := range tc.processed { for qname, n := range tc.processed {
processedKey := base.ProcessedKey(qname, now) processedKey := base.ProcessedKey(qname, now)
r.Set(ctx, processedKey, n, 0) r.Set(context.Background(), processedKey, n, 0)
} }
for qname, n := range tc.failed { for qname, n := range tc.failed {
failedKey := base.FailedKey(qname, now) failedKey := base.FailedKey(qname, now)
r.Set(ctx, failedKey, n, 0) r.Set(context.Background(), failedKey, n, 0)
} }
got, err := inspector.GetQueueInfo(tc.qname) got, err := inspector.GetQueueInfo(tc.qname)
@ -385,14 +386,14 @@ func TestInspectorHistory(t *testing.T) {
for _, tc := range tests { for _, tc := range tests {
h.FlushDB(t, r) h.FlushDB(t, r)
r.SAdd(ctx, base.AllQueues, tc.qname) r.SAdd(context.Background(), base.AllQueues, tc.qname)
// populate last n days data // populate last n days data
for i := 0; i < tc.n; i++ { for i := 0; i < tc.n; i++ {
ts := now.Add(-time.Duration(i) * 24 * time.Hour) ts := now.Add(-time.Duration(i) * 24 * time.Hour)
processedKey := base.ProcessedKey(tc.qname, ts) processedKey := base.ProcessedKey(tc.qname, ts)
failedKey := base.FailedKey(tc.qname, ts) failedKey := base.FailedKey(tc.qname, ts)
r.Set(ctx, processedKey, (i+1)*1000, 0) r.Set(context.Background(), processedKey, (i+1)*1000, 0)
r.Set(ctx, failedKey, (i+1)*10, 0) r.Set(context.Background(), failedKey, (i+1)*10, 0)
} }
got, err := inspector.History(tc.qname, tc.n) got, err := inspector.History(tc.qname, tc.n)

View File

@ -20,8 +20,6 @@ import (
"github.com/hibiken/asynq/internal/base" "github.com/hibiken/asynq/internal/base"
) )
var ctx = context.Background()
// EquateInt64Approx returns a Comparer option that treats int64 values // EquateInt64Approx returns a Comparer option that treats int64 values
// to be equal if they are within the given margin. // to be equal if they are within the given margin.
func EquateInt64Approx(margin int64) cmp.Option { func EquateInt64Approx(margin int64) cmp.Option {
@ -168,11 +166,11 @@ func FlushDB(tb testing.TB, r redis.UniversalClient) {
tb.Helper() tb.Helper()
switch r := r.(type) { switch r := r.(type) {
case *redis.Client: case *redis.Client:
if err := r.FlushDB(ctx).Err(); err != nil { if err := r.FlushDB(context.Background()).Err(); err != nil {
tb.Fatal(err) tb.Fatal(err)
} }
case *redis.ClusterClient: case *redis.ClusterClient:
err := r.ForEachMaster(ctx, func(ctx2 context.Context, c *redis.Client) error { err := r.ForEachMaster(context.Background(), func(ctx2 context.Context, c *redis.Client) error {
if err := c.FlushAll(ctx2).Err(); err != nil { if err := c.FlushAll(ctx2).Err(); err != nil {
return err return err
} }
@ -187,42 +185,42 @@ func FlushDB(tb testing.TB, r redis.UniversalClient) {
// SeedPendingQueue initializes the specified queue with the given messages. // SeedPendingQueue initializes the specified queue with the given messages.
func SeedPendingQueue(tb testing.TB, r redis.UniversalClient, msgs []*base.TaskMessage, qname string) { func SeedPendingQueue(tb testing.TB, r redis.UniversalClient, msgs []*base.TaskMessage, qname string) {
tb.Helper() tb.Helper()
r.SAdd(ctx, base.AllQueues, qname) r.SAdd(context.Background(), base.AllQueues, qname)
seedRedisList(tb, r, base.PendingKey(qname), msgs, base.TaskStatePending) seedRedisList(tb, r, base.PendingKey(qname), msgs, base.TaskStatePending)
} }
// SeedActiveQueue initializes the active queue with the given messages. // SeedActiveQueue initializes the active queue with the given messages.
func SeedActiveQueue(tb testing.TB, r redis.UniversalClient, msgs []*base.TaskMessage, qname string) { func SeedActiveQueue(tb testing.TB, r redis.UniversalClient, msgs []*base.TaskMessage, qname string) {
tb.Helper() tb.Helper()
r.SAdd(ctx, base.AllQueues, qname) r.SAdd(context.Background(), base.AllQueues, qname)
seedRedisList(tb, r, base.ActiveKey(qname), msgs, base.TaskStateActive) seedRedisList(tb, r, base.ActiveKey(qname), msgs, base.TaskStateActive)
} }
// SeedScheduledQueue initializes the scheduled queue with the given messages. // SeedScheduledQueue initializes the scheduled queue with the given messages.
func SeedScheduledQueue(tb testing.TB, r redis.UniversalClient, entries []base.Z, qname string) { func SeedScheduledQueue(tb testing.TB, r redis.UniversalClient, entries []base.Z, qname string) {
tb.Helper() tb.Helper()
r.SAdd(ctx, base.AllQueues, qname) r.SAdd(context.Background(), base.AllQueues, qname)
seedRedisZSet(tb, r, base.ScheduledKey(qname), entries, base.TaskStateScheduled) seedRedisZSet(tb, r, base.ScheduledKey(qname), entries, base.TaskStateScheduled)
} }
// SeedRetryQueue initializes the retry queue with the given messages. // SeedRetryQueue initializes the retry queue with the given messages.
func SeedRetryQueue(tb testing.TB, r redis.UniversalClient, entries []base.Z, qname string) { func SeedRetryQueue(tb testing.TB, r redis.UniversalClient, entries []base.Z, qname string) {
tb.Helper() tb.Helper()
r.SAdd(ctx, base.AllQueues, qname) r.SAdd(context.Background(), base.AllQueues, qname)
seedRedisZSet(tb, r, base.RetryKey(qname), entries, base.TaskStateRetry) seedRedisZSet(tb, r, base.RetryKey(qname), entries, base.TaskStateRetry)
} }
// SeedArchivedQueue initializes the archived queue with the given messages. // SeedArchivedQueue initializes the archived queue with the given messages.
func SeedArchivedQueue(tb testing.TB, r redis.UniversalClient, entries []base.Z, qname string) { func SeedArchivedQueue(tb testing.TB, r redis.UniversalClient, entries []base.Z, qname string) {
tb.Helper() tb.Helper()
r.SAdd(ctx, base.AllQueues, qname) r.SAdd(context.Background(), base.AllQueues, qname)
seedRedisZSet(tb, r, base.ArchivedKey(qname), entries, base.TaskStateArchived) seedRedisZSet(tb, r, base.ArchivedKey(qname), entries, base.TaskStateArchived)
} }
// SeedDeadlines initializes the deadlines set with the given entries. // SeedDeadlines initializes the deadlines set with the given entries.
func SeedDeadlines(tb testing.TB, r redis.UniversalClient, entries []base.Z, qname string) { func SeedDeadlines(tb testing.TB, r redis.UniversalClient, entries []base.Z, qname string) {
tb.Helper() tb.Helper()
r.SAdd(ctx, base.AllQueues, qname) r.SAdd(context.Background(), base.AllQueues, qname)
seedRedisZSet(tb, r, base.DeadlinesKey(qname), entries, base.TaskStateActive) seedRedisZSet(tb, r, base.DeadlinesKey(qname), entries, base.TaskStateActive)
} }
@ -281,7 +279,7 @@ func seedRedisList(tb testing.TB, c redis.UniversalClient, key string,
tb.Helper() tb.Helper()
for _, msg := range msgs { for _, msg := range msgs {
encoded := MustMarshal(tb, msg) encoded := MustMarshal(tb, msg)
if err := c.LPush(ctx, key, msg.ID.String()).Err(); err != nil { if err := c.LPush(context.Background(), key, msg.ID.String()).Err(); err != nil {
tb.Fatal(err) tb.Fatal(err)
} }
key := base.TaskKey(msg.Queue, msg.ID.String()) key := base.TaskKey(msg.Queue, msg.ID.String())
@ -292,11 +290,11 @@ func seedRedisList(tb testing.TB, c redis.UniversalClient, key string,
"deadline": msg.Deadline, "deadline": msg.Deadline,
"unique_key": msg.UniqueKey, "unique_key": msg.UniqueKey,
} }
if err := c.HSet(ctx, key, data).Err(); err != nil { if err := c.HSet(context.Background(), key, data).Err(); err != nil {
tb.Fatal(err) tb.Fatal(err)
} }
if len(msg.UniqueKey) > 0 { if len(msg.UniqueKey) > 0 {
err := c.SetNX(ctx, msg.UniqueKey, msg.ID.String(), 1*time.Minute).Err() err := c.SetNX(context.Background(), msg.UniqueKey, msg.ID.String(), 1*time.Minute).Err()
if err != nil { if err != nil {
tb.Fatalf("Failed to set unique lock in redis: %v", err) tb.Fatalf("Failed to set unique lock in redis: %v", err)
} }
@ -311,7 +309,7 @@ func seedRedisZSet(tb testing.TB, c redis.UniversalClient, key string,
msg := item.Message msg := item.Message
encoded := MustMarshal(tb, msg) encoded := MustMarshal(tb, msg)
z := &redis.Z{Member: msg.ID.String(), Score: float64(item.Score)} z := &redis.Z{Member: msg.ID.String(), Score: float64(item.Score)}
if err := c.ZAdd(ctx, key, z).Err(); err != nil { if err := c.ZAdd(context.Background(), key, z).Err(); err != nil {
tb.Fatal(err) tb.Fatal(err)
} }
key := base.TaskKey(msg.Queue, msg.ID.String()) key := base.TaskKey(msg.Queue, msg.ID.String())
@ -322,11 +320,11 @@ func seedRedisZSet(tb testing.TB, c redis.UniversalClient, key string,
"deadline": msg.Deadline, "deadline": msg.Deadline,
"unique_key": msg.UniqueKey, "unique_key": msg.UniqueKey,
} }
if err := c.HSet(ctx, key, data).Err(); err != nil { if err := c.HSet(context.Background(), key, data).Err(); err != nil {
tb.Fatal(err) tb.Fatal(err)
} }
if len(msg.UniqueKey) > 0 { if len(msg.UniqueKey) > 0 {
err := c.SetNX(ctx, msg.UniqueKey, msg.ID.String(), 1*time.Minute).Err() err := c.SetNX(context.Background(), msg.UniqueKey, msg.ID.String(), 1*time.Minute).Err()
if err != nil { if err != nil {
tb.Fatalf("Failed to set unique lock in redis: %v", err) tb.Fatalf("Failed to set unique lock in redis: %v", err)
} }
@ -401,13 +399,13 @@ func GetDeadlinesEntries(tb testing.TB, r redis.UniversalClient, qname string) [
func getMessagesFromList(tb testing.TB, r redis.UniversalClient, qname string, func getMessagesFromList(tb testing.TB, r redis.UniversalClient, qname string,
keyFn func(qname string) string, state base.TaskState) []*base.TaskMessage { keyFn func(qname string) string, state base.TaskState) []*base.TaskMessage {
tb.Helper() tb.Helper()
ids := r.LRange(ctx, keyFn(qname), 0, -1).Val() ids := r.LRange(context.Background(), keyFn(qname), 0, -1).Val()
var msgs []*base.TaskMessage var msgs []*base.TaskMessage
for _, id := range ids { for _, id := range ids {
taskKey := base.TaskKey(qname, id) taskKey := base.TaskKey(qname, id)
data := r.HGet(ctx, taskKey, "msg").Val() data := r.HGet(context.Background(), taskKey, "msg").Val()
msgs = append(msgs, MustUnmarshal(tb, data)) msgs = append(msgs, MustUnmarshal(tb, data))
if gotState := r.HGet(ctx, taskKey, "state").Val(); gotState != state.String() { if gotState := r.HGet(context.Background(), taskKey, "state").Val(); gotState != state.String() {
tb.Errorf("task (id=%q) is in %q state, want %v", id, gotState, state) tb.Errorf("task (id=%q) is in %q state, want %v", id, gotState, state)
} }
} }
@ -418,13 +416,13 @@ func getMessagesFromList(tb testing.TB, r redis.UniversalClient, qname string,
func getMessagesFromZSet(tb testing.TB, r redis.UniversalClient, qname string, func getMessagesFromZSet(tb testing.TB, r redis.UniversalClient, qname string,
keyFn func(qname string) string, state base.TaskState) []*base.TaskMessage { keyFn func(qname string) string, state base.TaskState) []*base.TaskMessage {
tb.Helper() tb.Helper()
ids := r.ZRange(ctx, keyFn(qname), 0, -1).Val() ids := r.ZRange(context.Background(), keyFn(qname), 0, -1).Val()
var msgs []*base.TaskMessage var msgs []*base.TaskMessage
for _, id := range ids { for _, id := range ids {
taskKey := base.TaskKey(qname, id) taskKey := base.TaskKey(qname, id)
msg := r.HGet(ctx, taskKey, "msg").Val() msg := r.HGet(context.Background(), taskKey, "msg").Val()
msgs = append(msgs, MustUnmarshal(tb, msg)) msgs = append(msgs, MustUnmarshal(tb, msg))
if gotState := r.HGet(ctx, taskKey, "state").Val(); gotState != state.String() { if gotState := r.HGet(context.Background(), taskKey, "state").Val(); gotState != state.String() {
tb.Errorf("task (id=%q) is in %q state, want %v", id, gotState, state) tb.Errorf("task (id=%q) is in %q state, want %v", id, gotState, state)
} }
} }
@ -435,14 +433,14 @@ func getMessagesFromZSet(tb testing.TB, r redis.UniversalClient, qname string,
func getMessagesFromZSetWithScores(tb testing.TB, r redis.UniversalClient, func getMessagesFromZSetWithScores(tb testing.TB, r redis.UniversalClient,
qname string, keyFn func(qname string) string, state base.TaskState) []base.Z { qname string, keyFn func(qname string) string, state base.TaskState) []base.Z {
tb.Helper() tb.Helper()
zs := r.ZRangeWithScores(ctx, keyFn(qname), 0, -1).Val() zs := r.ZRangeWithScores(context.Background(), keyFn(qname), 0, -1).Val()
var res []base.Z var res []base.Z
for _, z := range zs { for _, z := range zs {
taskID := z.Member.(string) taskID := z.Member.(string)
taskKey := base.TaskKey(qname, taskID) taskKey := base.TaskKey(qname, taskID)
msg := r.HGet(ctx, taskKey, "msg").Val() msg := r.HGet(context.Background(), taskKey, "msg").Val()
res = append(res, base.Z{Message: MustUnmarshal(tb, msg), Score: int64(z.Score)}) res = append(res, base.Z{Message: MustUnmarshal(tb, msg), Score: int64(z.Score)})
if gotState := r.HGet(ctx, taskKey, "state").Val(); gotState != state.String() { if gotState := r.HGet(context.Background(), taskKey, "state").Val(); gotState != state.String() {
tb.Errorf("task (id=%q) is in %q state, want %v", taskID, gotState, state) tb.Errorf("task (id=%q) is in %q state, want %v", taskID, gotState, state)
} }
} }

View File

@ -5,6 +5,7 @@
package rdb package rdb
import ( import (
"context"
"fmt" "fmt"
"strings" "strings"
"time" "time"
@ -18,7 +19,7 @@ import (
// AllQueues returns a list of all queue names. // AllQueues returns a list of all queue names.
func (r *RDB) AllQueues() ([]string, error) { func (r *RDB) AllQueues() ([]string, error) {
return r.client.SMembers(ctx, base.AllQueues).Result() return r.client.SMembers(context.Background(), base.AllQueues).Result()
} }
// Stats represents a state of queues at a certain time. // Stats represents a state of queues at a certain time.
@ -102,7 +103,7 @@ return res`)
// CurrentStats returns a current state of the queues. // CurrentStats returns a current state of the queues.
func (r *RDB) CurrentStats(qname string) (*Stats, error) { func (r *RDB) CurrentStats(qname string) (*Stats, error) {
var op errors.Op = "rdb.CurrentStats" var op errors.Op = "rdb.CurrentStats"
exists, err := r.client.SIsMember(ctx, base.AllQueues, qname).Result() exists, err := r.client.SIsMember(context.Background(), base.AllQueues, qname).Result()
if err != nil { if err != nil {
return nil, errors.E(op, errors.Unknown, err) return nil, errors.E(op, errors.Unknown, err)
} }
@ -110,7 +111,7 @@ func (r *RDB) CurrentStats(qname string) (*Stats, error) {
return nil, errors.E(op, errors.NotFound, &errors.QueueNotFoundError{Queue: qname}) return nil, errors.E(op, errors.NotFound, &errors.QueueNotFoundError{Queue: qname})
} }
now := time.Now() now := time.Now()
res, err := currentStatsCmd.Run(ctx, r.client, []string{ res, err := currentStatsCmd.Run(context.Background(), r.client, []string{
base.PendingKey(qname), base.PendingKey(qname),
base.ActiveKey(qname), base.ActiveKey(qname),
base.ScheduledKey(qname), base.ScheduledKey(qname),
@ -181,7 +182,7 @@ func (r *RDB) memoryUsage(qname string) (int64, error) {
err error err error
) )
for { for {
data, cursor, err = r.client.Scan(ctx, cursor, fmt.Sprintf("asynq:{%s}*", qname), 100).Result() data, cursor, err = r.client.Scan(context.Background(), cursor, fmt.Sprintf("asynq:{%s}*", qname), 100).Result()
if err != nil { if err != nil {
return 0, errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "scan", Err: err}) return 0, errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "scan", Err: err})
} }
@ -192,7 +193,7 @@ func (r *RDB) memoryUsage(qname string) (int64, error) {
} }
var usg int64 var usg int64
for _, k := range keys { for _, k := range keys {
n, err := r.client.MemoryUsage(ctx, k).Result() n, err := r.client.MemoryUsage(context.Background(), k).Result()
if err != nil { if err != nil {
return 0, errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "memory usage", Err: err}) return 0, errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "memory usage", Err: err})
} }
@ -218,7 +219,7 @@ func (r *RDB) HistoricalStats(qname string, n int) ([]*DailyStats, error) {
if n < 1 { if n < 1 {
return nil, errors.E(op, errors.FailedPrecondition, "the number of days must be positive") return nil, errors.E(op, errors.FailedPrecondition, "the number of days must be positive")
} }
exists, err := r.client.SIsMember(ctx, base.AllQueues, qname).Result() exists, err := r.client.SIsMember(context.Background(), base.AllQueues, qname).Result()
if err != nil { if err != nil {
return nil, errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "sismember", Err: err}) return nil, errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "sismember", Err: err})
} }
@ -235,7 +236,7 @@ func (r *RDB) HistoricalStats(qname string, n int) ([]*DailyStats, error) {
keys = append(keys, base.ProcessedKey(qname, ts)) keys = append(keys, base.ProcessedKey(qname, ts))
keys = append(keys, base.FailedKey(qname, ts)) keys = append(keys, base.FailedKey(qname, ts))
} }
res, err := historicalStatsCmd.Run(ctx, r.client, keys).Result() res, err := historicalStatsCmd.Run(context.Background(), r.client, keys).Result()
if err != nil { if err != nil {
return nil, errors.E(op, errors.Unknown, fmt.Sprintf("redis eval error: %v", err)) return nil, errors.E(op, errors.Unknown, fmt.Sprintf("redis eval error: %v", err))
} }
@ -257,7 +258,7 @@ func (r *RDB) HistoricalStats(qname string, n int) ([]*DailyStats, error) {
// RedisInfo returns a map of redis info. // RedisInfo returns a map of redis info.
func (r *RDB) RedisInfo() (map[string]string, error) { func (r *RDB) RedisInfo() (map[string]string, error) {
res, err := r.client.Info(ctx).Result() res, err := r.client.Info(context.Background()).Result()
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -266,7 +267,7 @@ func (r *RDB) RedisInfo() (map[string]string, error) {
// RedisClusterInfo returns a map of redis cluster info. // RedisClusterInfo returns a map of redis cluster info.
func (r *RDB) RedisClusterInfo() (map[string]string, error) { func (r *RDB) RedisClusterInfo() (map[string]string, error) {
res, err := r.client.ClusterInfo(ctx).Result() res, err := r.client.ClusterInfo(context.Background()).Result()
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -295,7 +296,7 @@ func reverse(x []string) {
// checkQueueExists verifies whether the queue exists. // checkQueueExists verifies whether the queue exists.
// It returns QueueNotFoundError if queue doesn't exist. // It returns QueueNotFoundError if queue doesn't exist.
func (r *RDB) checkQueueExists(qname string) error { func (r *RDB) checkQueueExists(qname string) error {
exists, err := r.client.SIsMember(ctx, base.AllQueues, qname).Result() exists, err := r.client.SIsMember(context.Background(), base.AllQueues, qname).Result()
if err != nil { if err != nil {
return errors.E(errors.Unknown, &errors.RedisCommandError{Command: "sismember", Err: err}) return errors.E(errors.Unknown, &errors.RedisCommandError{Command: "sismember", Err: err})
} }
@ -344,7 +345,7 @@ func (r *RDB) GetTaskInfo(qname string, id uuid.UUID) (*base.TaskInfo, error) {
time.Now().Unix(), time.Now().Unix(),
base.QueueKeyPrefix(qname), base.QueueKeyPrefix(qname),
} }
res, err := getTaskInfoCmd.Run(ctx, r.client, keys, argv...).Result() res, err := getTaskInfoCmd.Run(context.Background(), r.client, keys, argv...).Result()
if err != nil { if err != nil {
if err.Error() == "NOT FOUND" { if err.Error() == "NOT FOUND" {
return nil, errors.E(op, errors.NotFound, &errors.TaskNotFoundError{Queue: qname, ID: id.String()}) return nil, errors.E(op, errors.NotFound, &errors.TaskNotFoundError{Queue: qname, ID: id.String()})
@ -410,7 +411,7 @@ func (p Pagination) stop() int64 {
// ListPending returns pending tasks that are ready to be processed. // ListPending returns pending tasks that are ready to be processed.
func (r *RDB) ListPending(qname string, pgn Pagination) ([]*base.TaskMessage, error) { func (r *RDB) ListPending(qname string, pgn Pagination) ([]*base.TaskMessage, error) {
var op errors.Op = "rdb.ListPending" var op errors.Op = "rdb.ListPending"
if !r.client.SIsMember(ctx, base.AllQueues, qname).Val() { if !r.client.SIsMember(context.Background(), base.AllQueues, qname).Val() {
return nil, errors.E(op, errors.NotFound, &errors.QueueNotFoundError{Queue: qname}) return nil, errors.E(op, errors.NotFound, &errors.QueueNotFoundError{Queue: qname})
} }
res, err := r.listMessages(base.PendingKey(qname), qname, pgn) res, err := r.listMessages(base.PendingKey(qname), qname, pgn)
@ -423,7 +424,7 @@ func (r *RDB) ListPending(qname string, pgn Pagination) ([]*base.TaskMessage, er
// ListActive returns all tasks that are currently being processed for the given queue. // ListActive returns all tasks that are currently being processed for the given queue.
func (r *RDB) ListActive(qname string, pgn Pagination) ([]*base.TaskMessage, error) { func (r *RDB) ListActive(qname string, pgn Pagination) ([]*base.TaskMessage, error) {
var op errors.Op = "rdb.ListActive" var op errors.Op = "rdb.ListActive"
if !r.client.SIsMember(ctx, base.AllQueues, qname).Val() { if !r.client.SIsMember(context.Background(), base.AllQueues, qname).Val() {
return nil, errors.E(op, errors.NotFound, &errors.QueueNotFoundError{Queue: qname}) return nil, errors.E(op, errors.NotFound, &errors.QueueNotFoundError{Queue: qname})
} }
res, err := r.listMessages(base.ActiveKey(qname), qname, pgn) res, err := r.listMessages(base.ActiveKey(qname), qname, pgn)
@ -453,7 +454,7 @@ func (r *RDB) listMessages(key, qname string, pgn Pagination) ([]*base.TaskMessa
// correct range and reverse the list to get the tasks with pagination. // correct range and reverse the list to get the tasks with pagination.
stop := -pgn.start() - 1 stop := -pgn.start() - 1
start := -pgn.stop() - 1 start := -pgn.stop() - 1
res, err := listMessagesCmd.Run(ctx, r.client, res, err := listMessagesCmd.Run(context.Background(), r.client,
[]string{key}, start, stop, base.TaskKeyPrefix(qname)).Result() []string{key}, start, stop, base.TaskKeyPrefix(qname)).Result()
if err != nil { if err != nil {
return nil, errors.E(errors.Unknown, err) return nil, errors.E(errors.Unknown, err)
@ -479,7 +480,7 @@ func (r *RDB) listMessages(key, qname string, pgn Pagination) ([]*base.TaskMessa
// to be processed in the future. // to be processed in the future.
func (r *RDB) ListScheduled(qname string, pgn Pagination) ([]base.Z, error) { func (r *RDB) ListScheduled(qname string, pgn Pagination) ([]base.Z, error) {
var op errors.Op = "rdb.ListScheduled" var op errors.Op = "rdb.ListScheduled"
if !r.client.SIsMember(ctx, base.AllQueues, qname).Val() { if !r.client.SIsMember(context.Background(), base.AllQueues, qname).Val() {
return nil, errors.E(op, errors.NotFound, &errors.QueueNotFoundError{Queue: qname}) return nil, errors.E(op, errors.NotFound, &errors.QueueNotFoundError{Queue: qname})
} }
res, err := r.listZSetEntries(base.ScheduledKey(qname), qname, pgn) res, err := r.listZSetEntries(base.ScheduledKey(qname), qname, pgn)
@ -493,7 +494,7 @@ func (r *RDB) ListScheduled(qname string, pgn Pagination) ([]base.Z, error) {
// and willl be retried in the future. // and willl be retried in the future.
func (r *RDB) ListRetry(qname string, pgn Pagination) ([]base.Z, error) { func (r *RDB) ListRetry(qname string, pgn Pagination) ([]base.Z, error) {
var op errors.Op = "rdb.ListRetry" var op errors.Op = "rdb.ListRetry"
if !r.client.SIsMember(ctx, base.AllQueues, qname).Val() { if !r.client.SIsMember(context.Background(), base.AllQueues, qname).Val() {
return nil, errors.E(op, errors.NotFound, &errors.QueueNotFoundError{Queue: qname}) return nil, errors.E(op, errors.NotFound, &errors.QueueNotFoundError{Queue: qname})
} }
res, err := r.listZSetEntries(base.RetryKey(qname), qname, pgn) res, err := r.listZSetEntries(base.RetryKey(qname), qname, pgn)
@ -506,7 +507,7 @@ func (r *RDB) ListRetry(qname string, pgn Pagination) ([]base.Z, error) {
// ListArchived returns all tasks from the given queue that have exhausted its retry limit. // ListArchived returns all tasks from the given queue that have exhausted its retry limit.
func (r *RDB) ListArchived(qname string, pgn Pagination) ([]base.Z, error) { func (r *RDB) ListArchived(qname string, pgn Pagination) ([]base.Z, error) {
var op errors.Op = "rdb.ListArchived" var op errors.Op = "rdb.ListArchived"
if !r.client.SIsMember(ctx, base.AllQueues, qname).Val() { if !r.client.SIsMember(context.Background(), base.AllQueues, qname).Val() {
return nil, errors.E(op, errors.NotFound, &errors.QueueNotFoundError{Queue: qname}) return nil, errors.E(op, errors.NotFound, &errors.QueueNotFoundError{Queue: qname})
} }
zs, err := r.listZSetEntries(base.ArchivedKey(qname), qname, pgn) zs, err := r.listZSetEntries(base.ArchivedKey(qname), qname, pgn)
@ -537,7 +538,7 @@ return res
// listZSetEntries returns a list of message and score pairs in Redis sorted-set // listZSetEntries returns a list of message and score pairs in Redis sorted-set
// with the given key. // with the given key.
func (r *RDB) listZSetEntries(key, qname string, pgn Pagination) ([]base.Z, error) { func (r *RDB) listZSetEntries(key, qname string, pgn Pagination) ([]base.Z, error) {
res, err := listZSetEntriesCmd.Run(ctx, r.client, []string{key}, res, err := listZSetEntriesCmd.Run(context.Background(), r.client, []string{key},
pgn.start(), pgn.stop(), base.TaskKeyPrefix(qname)).Result() pgn.start(), pgn.stop(), base.TaskKeyPrefix(qname)).Result()
if err != nil { if err != nil {
return nil, errors.E(errors.Unknown, err) return nil, errors.E(errors.Unknown, err)
@ -664,7 +665,7 @@ func (r *RDB) RunTask(qname string, id uuid.UUID) error {
id.String(), id.String(),
base.QueueKeyPrefix(qname), base.QueueKeyPrefix(qname),
} }
res, err := runTaskCmd.Run(ctx, r.client, keys, argv...).Result() res, err := runTaskCmd.Run(context.Background(), r.client, keys, argv...).Result()
if err != nil { if err != nil {
return errors.E(op, errors.Unknown, err) return errors.E(op, errors.Unknown, err)
} }
@ -717,7 +718,7 @@ func (r *RDB) runAll(zset, qname string) (int64, error) {
argv := []interface{}{ argv := []interface{}{
base.TaskKeyPrefix(qname), base.TaskKeyPrefix(qname),
} }
res, err := runAllCmd.Run(ctx, r.client, keys, argv...).Result() res, err := runAllCmd.Run(context.Background(), r.client, keys, argv...).Result()
if err != nil { if err != nil {
return 0, err return 0, err
} }
@ -805,7 +806,7 @@ func (r *RDB) ArchiveAllPendingTasks(qname string) (int64, error) {
maxArchiveSize, maxArchiveSize,
base.TaskKeyPrefix(qname), base.TaskKeyPrefix(qname),
} }
res, err := archiveAllPendingCmd.Run(ctx, r.client, keys, argv...).Result() res, err := archiveAllPendingCmd.Run(context.Background(), r.client, keys, argv...).Result()
if err != nil { if err != nil {
return 0, errors.E(op, errors.Internal, err) return 0, errors.E(op, errors.Internal, err)
} }
@ -886,7 +887,7 @@ func (r *RDB) ArchiveTask(qname string, id uuid.UUID) error {
maxArchiveSize, maxArchiveSize,
base.QueueKeyPrefix(qname), base.QueueKeyPrefix(qname),
} }
res, err := archiveTaskCmd.Run(ctx, r.client, keys, argv...).Result() res, err := archiveTaskCmd.Run(context.Background(), r.client, keys, argv...).Result()
if err != nil { if err != nil {
return errors.E(op, errors.Unknown, err) return errors.E(op, errors.Unknown, err)
} }
@ -951,7 +952,7 @@ func (r *RDB) archiveAll(src, dst, qname string) (int64, error) {
base.TaskKeyPrefix(qname), base.TaskKeyPrefix(qname),
qname, qname,
} }
res, err := archiveAllCmd.Run(ctx, r.client, keys, argv...).Result() res, err := archiveAllCmd.Run(context.Background(), r.client, keys, argv...).Result()
if err != nil { if err != nil {
return 0, err return 0, err
} }
@ -1018,7 +1019,7 @@ func (r *RDB) DeleteTask(qname string, id uuid.UUID) error {
id.String(), id.String(),
base.QueueKeyPrefix(qname), base.QueueKeyPrefix(qname),
} }
res, err := deleteTaskCmd.Run(ctx, r.client, keys, argv...).Result() res, err := deleteTaskCmd.Run(context.Background(), r.client, keys, argv...).Result()
if err != nil { if err != nil {
return errors.E(op, errors.Unknown, err) return errors.E(op, errors.Unknown, err)
} }
@ -1110,7 +1111,7 @@ func (r *RDB) deleteAll(key, qname string) (int64, error) {
base.TaskKeyPrefix(qname), base.TaskKeyPrefix(qname),
qname, qname,
} }
res, err := deleteAllCmd.Run(ctx, r.client, []string{key}, argv...).Result() res, err := deleteAllCmd.Run(context.Background(), r.client, []string{key}, argv...).Result()
if err != nil { if err != nil {
return 0, err return 0, err
} }
@ -1151,7 +1152,7 @@ func (r *RDB) DeleteAllPendingTasks(qname string) (int64, error) {
argv := []interface{}{ argv := []interface{}{
base.TaskKeyPrefix(qname), base.TaskKeyPrefix(qname),
} }
res, err := deleteAllPendingCmd.Run(ctx, r.client, keys, argv...).Result() res, err := deleteAllPendingCmd.Run(context.Background(), r.client, keys, argv...).Result()
if err != nil { if err != nil {
return 0, errors.E(op, errors.Unknown, err) return 0, errors.E(op, errors.Unknown, err)
} }
@ -1282,7 +1283,7 @@ return 1`)
// the queue is empty. // the queue is empty.
func (r *RDB) RemoveQueue(qname string, force bool) error { func (r *RDB) RemoveQueue(qname string, force bool) error {
var op errors.Op = "rdb.RemoveQueue" var op errors.Op = "rdb.RemoveQueue"
exists, err := r.client.SIsMember(ctx, base.AllQueues, qname).Result() exists, err := r.client.SIsMember(context.Background(), base.AllQueues, qname).Result()
if err != nil { if err != nil {
return err return err
} }
@ -1303,7 +1304,7 @@ func (r *RDB) RemoveQueue(qname string, force bool) error {
base.ArchivedKey(qname), base.ArchivedKey(qname),
base.DeadlinesKey(qname), base.DeadlinesKey(qname),
} }
res, err := script.Run(ctx, r.client, keys, base.TaskKeyPrefix(qname)).Result() res, err := script.Run(context.Background(), r.client, keys, base.TaskKeyPrefix(qname)).Result()
if err != nil { if err != nil {
return errors.E(op, errors.Unknown, err) return errors.E(op, errors.Unknown, err)
} }
@ -1313,7 +1314,7 @@ func (r *RDB) RemoveQueue(qname string, force bool) error {
} }
switch n { switch n {
case 1: case 1:
if err := r.client.SRem(ctx, base.AllQueues, qname).Err(); err != nil { if err := r.client.SRem(context.Background(), base.AllQueues, qname).Err(); err != nil {
return errors.E(op, errors.Unknown, err) return errors.E(op, errors.Unknown, err)
} }
return nil return nil
@ -1336,7 +1337,7 @@ return keys`)
// ListServers returns the list of server info. // ListServers returns the list of server info.
func (r *RDB) ListServers() ([]*base.ServerInfo, error) { func (r *RDB) ListServers() ([]*base.ServerInfo, error) {
now := time.Now() now := time.Now()
res, err := listServerKeysCmd.Run(ctx, r.client, []string{base.AllServers}, now.Unix()).Result() res, err := listServerKeysCmd.Run(context.Background(), r.client, []string{base.AllServers}, now.Unix()).Result()
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -1346,7 +1347,7 @@ func (r *RDB) ListServers() ([]*base.ServerInfo, error) {
} }
var servers []*base.ServerInfo var servers []*base.ServerInfo
for _, key := range keys { for _, key := range keys {
data, err := r.client.Get(ctx, key).Result() data, err := r.client.Get(context.Background(), key).Result()
if err != nil { if err != nil {
continue // skip bad data continue // skip bad data
} }
@ -1370,7 +1371,7 @@ return keys`)
func (r *RDB) ListWorkers() ([]*base.WorkerInfo, error) { func (r *RDB) ListWorkers() ([]*base.WorkerInfo, error) {
var op errors.Op = "rdb.ListWorkers" var op errors.Op = "rdb.ListWorkers"
now := time.Now() now := time.Now()
res, err := listWorkersCmd.Run(ctx, r.client, []string{base.AllWorkers}, now.Unix()).Result() res, err := listWorkersCmd.Run(context.Background(), r.client, []string{base.AllWorkers}, now.Unix()).Result()
if err != nil { if err != nil {
return nil, errors.E(op, errors.Unknown, err) return nil, errors.E(op, errors.Unknown, err)
} }
@ -1380,7 +1381,7 @@ func (r *RDB) ListWorkers() ([]*base.WorkerInfo, error) {
} }
var workers []*base.WorkerInfo var workers []*base.WorkerInfo
for _, key := range keys { for _, key := range keys {
data, err := r.client.HVals(ctx, key).Result() data, err := r.client.HVals(context.Background(), key).Result()
if err != nil { if err != nil {
continue // skip bad data continue // skip bad data
} }
@ -1405,7 +1406,7 @@ return keys`)
// ListSchedulerEntries returns the list of scheduler entries. // ListSchedulerEntries returns the list of scheduler entries.
func (r *RDB) ListSchedulerEntries() ([]*base.SchedulerEntry, error) { func (r *RDB) ListSchedulerEntries() ([]*base.SchedulerEntry, error) {
now := time.Now() now := time.Now()
res, err := listSchedulerKeysCmd.Run(ctx, r.client, []string{base.AllSchedulers}, now.Unix()).Result() res, err := listSchedulerKeysCmd.Run(context.Background(), r.client, []string{base.AllSchedulers}, now.Unix()).Result()
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -1415,7 +1416,7 @@ func (r *RDB) ListSchedulerEntries() ([]*base.SchedulerEntry, error) {
} }
var entries []*base.SchedulerEntry var entries []*base.SchedulerEntry
for _, key := range keys { for _, key := range keys {
data, err := r.client.LRange(ctx, key, 0, -1).Result() data, err := r.client.LRange(context.Background(), key, 0, -1).Result()
if err != nil { if err != nil {
continue // skip bad data continue // skip bad data
} }
@ -1433,7 +1434,7 @@ func (r *RDB) ListSchedulerEntries() ([]*base.SchedulerEntry, error) {
// ListSchedulerEnqueueEvents returns the list of scheduler enqueue events. // ListSchedulerEnqueueEvents returns the list of scheduler enqueue events.
func (r *RDB) ListSchedulerEnqueueEvents(entryID string, pgn Pagination) ([]*base.SchedulerEnqueueEvent, error) { func (r *RDB) ListSchedulerEnqueueEvents(entryID string, pgn Pagination) ([]*base.SchedulerEnqueueEvent, error) {
key := base.SchedulerHistoryKey(entryID) key := base.SchedulerHistoryKey(entryID)
zs, err := r.client.ZRevRangeWithScores(ctx, key, pgn.start(), pgn.stop()).Result() zs, err := r.client.ZRevRangeWithScores(context.Background(), key, pgn.start(), pgn.stop()).Result()
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -1455,7 +1456,7 @@ func (r *RDB) ListSchedulerEnqueueEvents(entryID string, pgn Pagination) ([]*bas
// Pause pauses processing of tasks from the given queue. // Pause pauses processing of tasks from the given queue.
func (r *RDB) Pause(qname string) error { func (r *RDB) Pause(qname string) error {
key := base.PausedKey(qname) key := base.PausedKey(qname)
ok, err := r.client.SetNX(ctx, key, time.Now().Unix(), 0).Result() ok, err := r.client.SetNX(context.Background(), key, time.Now().Unix(), 0).Result()
if err != nil { if err != nil {
return err return err
} }
@ -1468,7 +1469,7 @@ func (r *RDB) Pause(qname string) error {
// Unpause resumes processing of tasks from the given queue. // Unpause resumes processing of tasks from the given queue.
func (r *RDB) Unpause(qname string) error { func (r *RDB) Unpause(qname string) error {
key := base.PausedKey(qname) key := base.PausedKey(qname)
deleted, err := r.client.Del(ctx, key).Result() deleted, err := r.client.Del(context.Background(), key).Result()
if err != nil { if err != nil {
return err return err
} }
@ -1481,7 +1482,7 @@ func (r *RDB) Unpause(qname string) error {
// ClusterKeySlot returns an integer identifying the hash slot the given queue hashes to. // ClusterKeySlot returns an integer identifying the hash slot the given queue hashes to.
func (r *RDB) ClusterKeySlot(qname string) (int64, error) { func (r *RDB) ClusterKeySlot(qname string) (int64, error) {
key := base.PendingKey(qname) key := base.PendingKey(qname)
return r.client.ClusterKeySlot(ctx, key).Result() return r.client.ClusterKeySlot(context.Background(), key).Result()
} }
// ClusterNodes returns a list of nodes the given queue belongs to. // ClusterNodes returns a list of nodes the given queue belongs to.
@ -1490,7 +1491,7 @@ func (r *RDB) ClusterNodes(qname string) ([]redis.ClusterNode, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
clusterSlots, err := r.client.ClusterSlots(ctx).Result() clusterSlots, err := r.client.ClusterSlots(context.Background()).Result()
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -5,6 +5,7 @@
package rdb package rdb
import ( import (
"context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"testing" "testing"
@ -34,7 +35,7 @@ func TestAllQueues(t *testing.T) {
for _, tc := range tests { for _, tc := range tests {
h.FlushDB(t, r.client) h.FlushDB(t, r.client)
for _, qname := range tc.queues { for _, qname := range tc.queues {
if err := r.client.SAdd(ctx, base.AllQueues, qname).Err(); err != nil { if err := r.client.SAdd(context.Background(), base.AllQueues, qname).Err(); err != nil {
t.Fatalf("could not initialize all queue set: %v", err) t.Fatalf("could not initialize all queue set: %v", err)
} }
} }
@ -198,11 +199,11 @@ func TestCurrentStats(t *testing.T) {
h.SeedAllArchivedQueues(t, r.client, tc.archived) h.SeedAllArchivedQueues(t, r.client, tc.archived)
for qname, n := range tc.processed { for qname, n := range tc.processed {
processedKey := base.ProcessedKey(qname, now) processedKey := base.ProcessedKey(qname, now)
r.client.Set(ctx, processedKey, n, 0) r.client.Set(context.Background(), processedKey, n, 0)
} }
for qname, n := range tc.failed { for qname, n := range tc.failed {
failedKey := base.FailedKey(qname, now) failedKey := base.FailedKey(qname, now)
r.client.Set(ctx, failedKey, n, 0) r.client.Set(context.Background(), failedKey, n, 0)
} }
got, err := r.CurrentStats(tc.qname) got, err := r.CurrentStats(tc.qname)
@ -247,14 +248,14 @@ func TestHistoricalStats(t *testing.T) {
for _, tc := range tests { for _, tc := range tests {
h.FlushDB(t, r.client) h.FlushDB(t, r.client)
r.client.SAdd(ctx, base.AllQueues, tc.qname) r.client.SAdd(context.Background(), base.AllQueues, tc.qname)
// populate last n days data // populate last n days data
for i := 0; i < tc.n; i++ { for i := 0; i < tc.n; i++ {
ts := now.Add(-time.Duration(i) * 24 * time.Hour) ts := now.Add(-time.Duration(i) * 24 * time.Hour)
processedKey := base.ProcessedKey(tc.qname, ts) processedKey := base.ProcessedKey(tc.qname, ts)
failedKey := base.FailedKey(tc.qname, ts) failedKey := base.FailedKey(tc.qname, ts)
r.client.Set(ctx, processedKey, (i+1)*1000, 0) r.client.Set(context.Background(), processedKey, (i+1)*1000, 0)
r.client.Set(ctx, failedKey, (i+1)*10, 0) r.client.Set(context.Background(), failedKey, (i+1)*10, 0)
} }
got, err := r.HistoricalStats(tc.qname, tc.n) got, err := r.HistoricalStats(tc.qname, tc.n)
@ -3168,7 +3169,7 @@ func TestDeleteTaskWithUniqueLock(t *testing.T) {
} }
} }
if r.client.Exists(ctx, tc.uniqueKey).Val() != 0 { if r.client.Exists(context.Background(), tc.uniqueKey).Val() != 0 {
t.Errorf("Uniqueness lock %q still exists", tc.uniqueKey) t.Errorf("Uniqueness lock %q still exists", tc.uniqueKey)
} }
} }
@ -3401,7 +3402,7 @@ func TestDeleteAllArchivedTasksWithUniqueKey(t *testing.T) {
} }
for _, uniqueKey := range tc.uniqueKeys { for _, uniqueKey := range tc.uniqueKeys {
if r.client.Exists(ctx, uniqueKey).Val() != 0 { if r.client.Exists(context.Background(), uniqueKey).Val() != 0 {
t.Errorf("Uniqueness lock %q still exists", uniqueKey) t.Errorf("Uniqueness lock %q still exists", uniqueKey)
} }
} }
@ -3702,7 +3703,7 @@ func TestRemoveQueue(t *testing.T) {
tc.qname, tc.force, err) tc.qname, tc.force, err)
continue continue
} }
if r.client.SIsMember(ctx, base.AllQueues, tc.qname).Val() { if r.client.SIsMember(context.Background(), base.AllQueues, tc.qname).Val() {
t.Errorf("%q is a member of %q", tc.qname, base.AllQueues) t.Errorf("%q is a member of %q", tc.qname, base.AllQueues)
} }
@ -3715,12 +3716,12 @@ func TestRemoveQueue(t *testing.T) {
base.ArchivedKey(tc.qname), base.ArchivedKey(tc.qname),
} }
for _, key := range keys { for _, key := range keys {
if r.client.Exists(ctx, key).Val() != 0 { if r.client.Exists(context.Background(), key).Val() != 0 {
t.Errorf("key %q still exists", key) t.Errorf("key %q still exists", key)
} }
} }
if n := len(r.client.Keys(ctx, base.TaskKeyPrefix(tc.qname) + "*").Val()); n != 0 { if n := len(r.client.Keys(context.Background(), base.TaskKeyPrefix(tc.qname) + "*").Val()); n != 0 {
t.Errorf("%d keys still exists for tasks", n) t.Errorf("%d keys still exists for tasks", n)
} }
} }
@ -4137,7 +4138,7 @@ func TestRecordSchedulerEnqueueEventTrimsDataSet(t *testing.T) {
} }
// Make sure the set is full. // Make sure the set is full.
if n := r.client.ZCard(ctx, key).Val(); n != maxEvents { if n := r.client.ZCard(context.Background(), key).Val(); n != maxEvents {
t.Fatalf("unexpected number of events; got %d, want %d", n, maxEvents) t.Fatalf("unexpected number of events; got %d, want %d", n, maxEvents)
} }
@ -4149,7 +4150,7 @@ func TestRecordSchedulerEnqueueEventTrimsDataSet(t *testing.T) {
if err := r.RecordSchedulerEnqueueEvent(entryID, &event); err != nil { if err := r.RecordSchedulerEnqueueEvent(entryID, &event); err != nil {
t.Fatalf("RecordSchedulerEnqueueEvent failed: %v", err) t.Fatalf("RecordSchedulerEnqueueEvent failed: %v", err)
} }
if n := r.client.ZCard(ctx, key).Val(); n != maxEvents { if n := r.client.ZCard(context.Background(), key).Val(); n != maxEvents {
t.Fatalf("unexpected number of events; got %d, want %d", n, maxEvents) t.Fatalf("unexpected number of events; got %d, want %d", n, maxEvents)
} }
events, err := r.ListSchedulerEnqueueEvents(entryID, Pagination{Size: maxEvents}) events, err := r.ListSchedulerEnqueueEvents(entryID, Pagination{Size: maxEvents})
@ -4182,7 +4183,7 @@ func TestPause(t *testing.T) {
t.Errorf("Pause(%q) returned error: %v", tc.qname, err) t.Errorf("Pause(%q) returned error: %v", tc.qname, err)
} }
key := base.PausedKey(tc.qname) key := base.PausedKey(tc.qname)
if r.client.Exists(ctx, key).Val() == 0 { if r.client.Exists(context.Background(), key).Val() == 0 {
t.Errorf("key %q does not exist", key) t.Errorf("key %q does not exist", key)
} }
} }
@ -4237,7 +4238,7 @@ func TestUnpause(t *testing.T) {
t.Errorf("Unpause(%q) returned error: %v", tc.qname, err) t.Errorf("Unpause(%q) returned error: %v", tc.qname, err)
} }
key := base.PausedKey(tc.qname) key := base.PausedKey(tc.qname)
if r.client.Exists(ctx, key).Val() == 1 { if r.client.Exists(context.Background(), key).Val() == 1 {
t.Errorf("key %q exists", key) t.Errorf("key %q exists", key)
} }
} }

View File

@ -18,8 +18,6 @@ import (
const statsTTL = 90 * 24 * time.Hour // 90 days const statsTTL = 90 * 24 * time.Hour // 90 days
var ctx = context.Background()
// RDB is a client interface to query and mutate task queues. // RDB is a client interface to query and mutate task queues.
type RDB struct { type RDB struct {
client redis.UniversalClient client redis.UniversalClient
@ -42,11 +40,11 @@ func (r *RDB) Client() redis.UniversalClient {
// Ping checks the connection with redis server. // Ping checks the connection with redis server.
func (r *RDB) Ping() error { func (r *RDB) Ping() error {
return r.client.Ping(ctx).Err() return r.client.Ping(context.Background()).Err()
} }
func (r *RDB) runScript(op errors.Op, script *redis.Script, keys []string, args ...interface{}) error { func (r *RDB) runScript(op errors.Op, script *redis.Script, keys []string, args ...interface{}) error {
if err := script.Run(ctx, r.client, keys, args...).Err(); err != nil { if err := script.Run(context.Background(), r.client, keys, args...).Err(); err != nil {
return errors.E(op, errors.Internal, fmt.Sprintf("redis eval error: %v", err)) return errors.E(op, errors.Internal, fmt.Sprintf("redis eval error: %v", err))
} }
return nil return nil
@ -82,7 +80,7 @@ func (r *RDB) Enqueue(msg *base.TaskMessage) error {
if err != nil { if err != nil {
return errors.E(op, errors.Unknown, fmt.Sprintf("cannot encode message: %v", err)) return errors.E(op, errors.Unknown, fmt.Sprintf("cannot encode message: %v", err))
} }
if err := r.client.SAdd(ctx, base.AllQueues, msg.Queue).Err(); err != nil { if err := r.client.SAdd(context.Background(), base.AllQueues, msg.Queue).Err(); err != nil {
return errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "sadd", Err: err}) return errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "sadd", Err: err})
} }
keys := []string{ keys := []string{
@ -136,7 +134,7 @@ func (r *RDB) EnqueueUnique(msg *base.TaskMessage, ttl time.Duration) error {
if err != nil { if err != nil {
return errors.E(op, errors.Internal, "cannot encode task message: %v", err) return errors.E(op, errors.Internal, "cannot encode task message: %v", err)
} }
if err := r.client.SAdd(ctx, base.AllQueues, msg.Queue).Err(); err != nil { if err := r.client.SAdd(context.Background(), base.AllQueues, msg.Queue).Err(); err != nil {
return errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "sadd", Err: err}) return errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "sadd", Err: err})
} }
keys := []string{ keys := []string{
@ -151,7 +149,7 @@ func (r *RDB) EnqueueUnique(msg *base.TaskMessage, ttl time.Duration) error {
msg.Timeout, msg.Timeout,
msg.Deadline, msg.Deadline,
} }
res, err := enqueueUniqueCmd.Run(ctx, r.client, keys, argv...).Result() res, err := enqueueUniqueCmd.Run(context.Background(), r.client, keys, argv...).Result()
if err != nil { if err != nil {
return errors.E(op, errors.Unknown, fmt.Sprintf("redis eval error: %v", err)) return errors.E(op, errors.Unknown, fmt.Sprintf("redis eval error: %v", err))
} }
@ -226,7 +224,7 @@ func (r *RDB) Dequeue(qnames ...string) (msg *base.TaskMessage, deadline time.Ti
time.Now().Unix(), time.Now().Unix(),
base.TaskKeyPrefix(qname), base.TaskKeyPrefix(qname),
} }
res, err := dequeueCmd.Run(ctx, r.client, keys, argv...).Result() res, err := dequeueCmd.Run(context.Background(), r.client, keys, argv...).Result()
if err == redis.Nil { if err == redis.Nil {
continue continue
} else if err != nil { } else if err != nil {
@ -381,7 +379,7 @@ func (r *RDB) Schedule(msg *base.TaskMessage, processAt time.Time) error {
if err != nil { if err != nil {
return errors.E(op, errors.Unknown, fmt.Sprintf("cannot encode message: %v", err)) return errors.E(op, errors.Unknown, fmt.Sprintf("cannot encode message: %v", err))
} }
if err := r.client.SAdd(ctx, base.AllQueues, msg.Queue).Err(); err != nil { if err := r.client.SAdd(context.Background(), base.AllQueues, msg.Queue).Err(); err != nil {
return errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "sadd", Err: err}) return errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "sadd", Err: err})
} }
keys := []string{ keys := []string{
@ -430,7 +428,7 @@ func (r *RDB) ScheduleUnique(msg *base.TaskMessage, processAt time.Time, ttl tim
if err != nil { if err != nil {
return errors.E(op, errors.Internal, fmt.Sprintf("cannot encode task message: %v", err)) return errors.E(op, errors.Internal, fmt.Sprintf("cannot encode task message: %v", err))
} }
if err := r.client.SAdd(ctx, base.AllQueues, msg.Queue).Err(); err != nil { if err := r.client.SAdd(context.Background(), base.AllQueues, msg.Queue).Err(); err != nil {
return errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "sadd", Err: err}) return errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "sadd", Err: err})
} }
keys := []string{ keys := []string{
@ -446,7 +444,7 @@ func (r *RDB) ScheduleUnique(msg *base.TaskMessage, processAt time.Time, ttl tim
msg.Timeout, msg.Timeout,
msg.Deadline, msg.Deadline,
} }
res, err := scheduleUniqueCmd.Run(ctx, r.client, keys, argv...).Result() res, err := scheduleUniqueCmd.Run(context.Background(), r.client, keys, argv...).Result()
if err != nil { if err != nil {
return errors.E(op, errors.Unknown, fmt.Sprintf("redis eval error: %v", err)) return errors.E(op, errors.Unknown, fmt.Sprintf("redis eval error: %v", err))
} }
@ -621,7 +619,7 @@ return table.getn(ids)`)
// from the src zset to the dst list. It returns the number of tasks moved. // from the src zset to the dst list. It returns the number of tasks moved.
func (r *RDB) forward(src, dst, taskKeyPrefix string) (int, error) { func (r *RDB) forward(src, dst, taskKeyPrefix string) (int, error) {
now := float64(time.Now().Unix()) now := float64(time.Now().Unix())
res, err := forwardCmd.Run(ctx, r.client, []string{src, dst}, now, taskKeyPrefix).Result() res, err := forwardCmd.Run(context.Background(), r.client, []string{src, dst}, now, taskKeyPrefix).Result()
if err != nil { if err != nil {
return 0, errors.E(errors.Internal, fmt.Sprintf("redis eval error: %v", err)) return 0, errors.E(errors.Internal, fmt.Sprintf("redis eval error: %v", err))
} }
@ -668,7 +666,7 @@ func (r *RDB) ListDeadlineExceeded(deadline time.Time, qnames ...string) ([]*bas
var op errors.Op = "rdb.ListDeadlineExceeded" var op errors.Op = "rdb.ListDeadlineExceeded"
var msgs []*base.TaskMessage var msgs []*base.TaskMessage
for _, qname := range qnames { for _, qname := range qnames {
res, err := listDeadlineExceededCmd.Run(ctx, r.client, res, err := listDeadlineExceededCmd.Run(context.Background(), r.client,
[]string{base.DeadlinesKey(qname)}, []string{base.DeadlinesKey(qname)},
deadline.Unix(), base.TaskKeyPrefix(qname)).Result() deadline.Unix(), base.TaskKeyPrefix(qname)).Result()
if err != nil { if err != nil {
@ -723,10 +721,10 @@ func (r *RDB) WriteServerState(info *base.ServerInfo, workers []*base.WorkerInfo
} }
skey := base.ServerInfoKey(info.Host, info.PID, info.ServerID) skey := base.ServerInfoKey(info.Host, info.PID, info.ServerID)
wkey := base.WorkersKey(info.Host, info.PID, info.ServerID) wkey := base.WorkersKey(info.Host, info.PID, info.ServerID)
if err := r.client.ZAdd(ctx, base.AllServers, &redis.Z{Score: float64(exp.Unix()), Member: skey}).Err(); err != nil { if err := r.client.ZAdd(context.Background(), base.AllServers, &redis.Z{Score: float64(exp.Unix()), Member: skey}).Err(); err != nil {
return errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "sadd", Err: err}) return errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "sadd", Err: err})
} }
if err := r.client.ZAdd(ctx, base.AllWorkers, &redis.Z{Score: float64(exp.Unix()), Member: wkey}).Err(); err != nil { if err := r.client.ZAdd(context.Background(), base.AllWorkers, &redis.Z{Score: float64(exp.Unix()), Member: wkey}).Err(); err != nil {
return errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "zadd", Err: err}) return errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "zadd", Err: err})
} }
return r.runScript(op, writeServerStateCmd, []string{skey, wkey}, args...) return r.runScript(op, writeServerStateCmd, []string{skey, wkey}, args...)
@ -744,10 +742,10 @@ func (r *RDB) ClearServerState(host string, pid int, serverID string) error {
var op errors.Op = "rdb.ClearServerState" var op errors.Op = "rdb.ClearServerState"
skey := base.ServerInfoKey(host, pid, serverID) skey := base.ServerInfoKey(host, pid, serverID)
wkey := base.WorkersKey(host, pid, serverID) wkey := base.WorkersKey(host, pid, serverID)
if err := r.client.ZRem(ctx, base.AllServers, skey).Err(); err != nil { if err := r.client.ZRem(context.Background(), base.AllServers, skey).Err(); err != nil {
return errors.E(op, errors.Internal, &errors.RedisCommandError{Command: "zrem", Err: err}) return errors.E(op, errors.Internal, &errors.RedisCommandError{Command: "zrem", Err: err})
} }
if err := r.client.ZRem(ctx, base.AllWorkers, wkey).Err(); err != nil { if err := r.client.ZRem(context.Background(), base.AllWorkers, wkey).Err(); err != nil {
return errors.E(op, errors.Internal, &errors.RedisCommandError{Command: "zrem", Err: err}) return errors.E(op, errors.Internal, &errors.RedisCommandError{Command: "zrem", Err: err})
} }
return r.runScript(op, clearServerStateCmd, []string{skey, wkey}) return r.runScript(op, clearServerStateCmd, []string{skey, wkey})
@ -777,7 +775,7 @@ func (r *RDB) WriteSchedulerEntries(schedulerID string, entries []*base.Schedule
} }
exp := time.Now().Add(ttl).UTC() exp := time.Now().Add(ttl).UTC()
key := base.SchedulerEntriesKey(schedulerID) key := base.SchedulerEntriesKey(schedulerID)
err := r.client.ZAdd(ctx, base.AllSchedulers, &redis.Z{Score: float64(exp.Unix()), Member: key}).Err() err := r.client.ZAdd(context.Background(), base.AllSchedulers, &redis.Z{Score: float64(exp.Unix()), Member: key}).Err()
if err != nil { if err != nil {
return errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "zadd", Err: err}) return errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "zadd", Err: err})
} }
@ -788,10 +786,10 @@ func (r *RDB) WriteSchedulerEntries(schedulerID string, entries []*base.Schedule
func (r *RDB) ClearSchedulerEntries(scheduelrID string) error { func (r *RDB) ClearSchedulerEntries(scheduelrID string) error {
var op errors.Op = "rdb.ClearSchedulerEntries" var op errors.Op = "rdb.ClearSchedulerEntries"
key := base.SchedulerEntriesKey(scheduelrID) key := base.SchedulerEntriesKey(scheduelrID)
if err := r.client.ZRem(ctx, base.AllSchedulers, key).Err(); err != nil { if err := r.client.ZRem(context.Background(), base.AllSchedulers, key).Err(); err != nil {
return errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "zrem", Err: err}) return errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "zrem", Err: err})
} }
if err := r.client.Del(ctx, key).Err(); err != nil { if err := r.client.Del(context.Background(), key).Err(); err != nil {
return errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "del", Err: err}) return errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "del", Err: err})
} }
return nil return nil
@ -800,8 +798,8 @@ func (r *RDB) ClearSchedulerEntries(scheduelrID string) error {
// CancelationPubSub returns a pubsub for cancelation messages. // CancelationPubSub returns a pubsub for cancelation messages.
func (r *RDB) CancelationPubSub() (*redis.PubSub, error) { func (r *RDB) CancelationPubSub() (*redis.PubSub, error) {
var op errors.Op = "rdb.CancelationPubSub" var op errors.Op = "rdb.CancelationPubSub"
pubsub := r.client.Subscribe(ctx, base.CancelChannel) pubsub := r.client.Subscribe(context.Background(), base.CancelChannel)
_, err := pubsub.Receive(ctx) _, err := pubsub.Receive(context.Background())
if err != nil { if err != nil {
return nil, errors.E(op, errors.Unknown, fmt.Sprintf("redis pubsub receive error: %v", err)) return nil, errors.E(op, errors.Unknown, fmt.Sprintf("redis pubsub receive error: %v", err))
} }
@ -812,7 +810,7 @@ func (r *RDB) CancelationPubSub() (*redis.PubSub, error) {
// The message is the ID for the task to be canceled. // The message is the ID for the task to be canceled.
func (r *RDB) PublishCancelation(id string) error { func (r *RDB) PublishCancelation(id string) error {
var op errors.Op = "rdb.PublishCancelation" var op errors.Op = "rdb.PublishCancelation"
if err := r.client.Publish(ctx, base.CancelChannel, id).Err(); err != nil { if err := r.client.Publish(context.Background(), base.CancelChannel, id).Err(); err != nil {
return errors.E(op, errors.Unknown, fmt.Sprintf("redis pubsub publish error: %v", err)) return errors.E(op, errors.Unknown, fmt.Sprintf("redis pubsub publish error: %v", err))
} }
return nil return nil
@ -852,7 +850,7 @@ func (r *RDB) RecordSchedulerEnqueueEvent(entryID string, event *base.SchedulerE
func (r *RDB) ClearSchedulerHistory(entryID string) error { func (r *RDB) ClearSchedulerHistory(entryID string) error {
var op errors.Op = "rdb.ClearSchedulerHistory" var op errors.Op = "rdb.ClearSchedulerHistory"
key := base.SchedulerHistoryKey(entryID) key := base.SchedulerHistoryKey(entryID)
if err := r.client.Del(ctx, key).Err(); err != nil { if err := r.client.Del(context.Background(), key).Err(); err != nil {
return errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "del", Err: err}) return errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "del", Err: err})
} }
return nil return nil

View File

@ -5,6 +5,7 @@
package rdb package rdb
import ( import (
"context"
"encoding/json" "encoding/json"
"flag" "flag"
"strconv" "strconv"
@ -85,7 +86,7 @@ func TestEnqueue(t *testing.T) {
// Check Pending list has task ID. // Check Pending list has task ID.
pendingKey := base.PendingKey(tc.msg.Queue) pendingKey := base.PendingKey(tc.msg.Queue)
pendingIDs := r.client.LRange(ctx, pendingKey, 0, -1).Val() pendingIDs := r.client.LRange(context.Background(), pendingKey, 0, -1).Val()
if n := len(pendingIDs); n != 1 { if n := len(pendingIDs); n != 1 {
t.Errorf("Redis LIST %q contains %d IDs, want 1", pendingKey, n) t.Errorf("Redis LIST %q contains %d IDs, want 1", pendingKey, n)
continue continue
@ -97,26 +98,26 @@ func TestEnqueue(t *testing.T) {
// Check the value under the task key. // Check the value under the task key.
taskKey := base.TaskKey(tc.msg.Queue, tc.msg.ID.String()) taskKey := base.TaskKey(tc.msg.Queue, tc.msg.ID.String())
encoded := r.client.HGet(ctx, taskKey, "msg").Val() // "msg" field encoded := r.client.HGet(context.Background(), taskKey, "msg").Val() // "msg" field
decoded := h.MustUnmarshal(t, encoded) decoded := h.MustUnmarshal(t, encoded)
if diff := cmp.Diff(tc.msg, decoded); diff != "" { if diff := cmp.Diff(tc.msg, decoded); diff != "" {
t.Errorf("persisted message was %v, want %v; (-want, +got)\n%s", decoded, tc.msg, diff) t.Errorf("persisted message was %v, want %v; (-want, +got)\n%s", decoded, tc.msg, diff)
} }
state := r.client.HGet(ctx, taskKey, "state").Val() // "state" field state := r.client.HGet(context.Background(), taskKey, "state").Val() // "state" field
if state != "pending" { if state != "pending" {
t.Errorf("state field under task-key is set to %q, want %q", state, "pending") t.Errorf("state field under task-key is set to %q, want %q", state, "pending")
} }
timeout := r.client.HGet(ctx, taskKey, "timeout").Val() // "timeout" field timeout := r.client.HGet(context.Background(), taskKey, "timeout").Val() // "timeout" field
if want := strconv.Itoa(int(tc.msg.Timeout)); timeout != want { if want := strconv.Itoa(int(tc.msg.Timeout)); timeout != want {
t.Errorf("timeout field under task-key is set to %v, want %v", timeout, want) t.Errorf("timeout field under task-key is set to %v, want %v", timeout, want)
} }
deadline := r.client.HGet(ctx, taskKey, "deadline").Val() // "deadline" field deadline := r.client.HGet(context.Background(), taskKey, "deadline").Val() // "deadline" field
if want := strconv.Itoa(int(tc.msg.Deadline)); deadline != want { if want := strconv.Itoa(int(tc.msg.Deadline)); deadline != want {
t.Errorf("deadline field under task-key is set to %v, want %v", deadline, want) t.Errorf("deadline field under task-key is set to %v, want %v", deadline, want)
} }
// Check queue is in the AllQueues set. // Check queue is in the AllQueues set.
if !r.client.SIsMember(ctx, base.AllQueues, tc.msg.Queue).Val() { if !r.client.SIsMember(context.Background(), base.AllQueues, tc.msg.Queue).Val() {
t.Errorf("%q is not a member of SET %q", tc.msg.Queue, base.AllQueues) t.Errorf("%q is not a member of SET %q", tc.msg.Queue, base.AllQueues)
} }
} }
@ -158,13 +159,13 @@ func TestEnqueueUnique(t *testing.T) {
if diff := cmp.Diff(tc.msg, gotPending[0]); diff != "" { if diff := cmp.Diff(tc.msg, gotPending[0]); diff != "" {
t.Errorf("persisted data differed from the original input (-want, +got)\n%s", diff) t.Errorf("persisted data differed from the original input (-want, +got)\n%s", diff)
} }
if !r.client.SIsMember(ctx, base.AllQueues, tc.msg.Queue).Val() { if !r.client.SIsMember(context.Background(), base.AllQueues, tc.msg.Queue).Val() {
t.Errorf("%q is not a member of SET %q", tc.msg.Queue, base.AllQueues) t.Errorf("%q is not a member of SET %q", tc.msg.Queue, base.AllQueues)
} }
// Check Pending list has task ID. // Check Pending list has task ID.
pendingKey := base.PendingKey(tc.msg.Queue) pendingKey := base.PendingKey(tc.msg.Queue)
pendingIDs := r.client.LRange(ctx, pendingKey, 0, -1).Val() pendingIDs := r.client.LRange(context.Background(), pendingKey, 0, -1).Val()
if len(pendingIDs) != 1 { if len(pendingIDs) != 1 {
t.Errorf("Redis LIST %q contains %d IDs, want 1", pendingKey, len(pendingIDs)) t.Errorf("Redis LIST %q contains %d IDs, want 1", pendingKey, len(pendingIDs))
continue continue
@ -176,30 +177,30 @@ func TestEnqueueUnique(t *testing.T) {
// Check the value under the task key. // Check the value under the task key.
taskKey := base.TaskKey(tc.msg.Queue, tc.msg.ID.String()) taskKey := base.TaskKey(tc.msg.Queue, tc.msg.ID.String())
encoded := r.client.HGet(ctx, taskKey, "msg").Val() // "msg" field encoded := r.client.HGet(context.Background(), taskKey, "msg").Val() // "msg" field
decoded := h.MustUnmarshal(t, encoded) decoded := h.MustUnmarshal(t, encoded)
if diff := cmp.Diff(tc.msg, decoded); diff != "" { if diff := cmp.Diff(tc.msg, decoded); diff != "" {
t.Errorf("persisted message was %v, want %v; (-want, +got)\n%s", decoded, tc.msg, diff) t.Errorf("persisted message was %v, want %v; (-want, +got)\n%s", decoded, tc.msg, diff)
} }
state := r.client.HGet(ctx, taskKey, "state").Val() // "state" field state := r.client.HGet(context.Background(), taskKey, "state").Val() // "state" field
if state != "pending" { if state != "pending" {
t.Errorf("state field under task-key is set to %q, want %q", state, "pending") t.Errorf("state field under task-key is set to %q, want %q", state, "pending")
} }
timeout := r.client.HGet(ctx, taskKey, "timeout").Val() // "timeout" field timeout := r.client.HGet(context.Background(), taskKey, "timeout").Val() // "timeout" field
if want := strconv.Itoa(int(tc.msg.Timeout)); timeout != want { if want := strconv.Itoa(int(tc.msg.Timeout)); timeout != want {
t.Errorf("timeout field under task-key is set to %v, want %v", timeout, want) t.Errorf("timeout field under task-key is set to %v, want %v", timeout, want)
} }
deadline := r.client.HGet(ctx, taskKey, "deadline").Val() // "deadline" field deadline := r.client.HGet(context.Background(), taskKey, "deadline").Val() // "deadline" field
if want := strconv.Itoa(int(tc.msg.Deadline)); deadline != want { if want := strconv.Itoa(int(tc.msg.Deadline)); deadline != want {
t.Errorf("deadline field under task-key is set to %v, want %v", deadline, want) t.Errorf("deadline field under task-key is set to %v, want %v", deadline, want)
} }
uniqueKey := r.client.HGet(ctx, taskKey, "unique_key").Val() // "unique_key" field uniqueKey := r.client.HGet(context.Background(), taskKey, "unique_key").Val() // "unique_key" field
if uniqueKey != tc.msg.UniqueKey { if uniqueKey != tc.msg.UniqueKey {
t.Errorf("uniqueue_key field under task key is set to %q, want %q", uniqueKey, tc.msg.UniqueKey) t.Errorf("uniqueue_key field under task key is set to %q, want %q", uniqueKey, tc.msg.UniqueKey)
} }
// Check queue is in the AllQueues set. // Check queue is in the AllQueues set.
if !r.client.SIsMember(ctx, base.AllQueues, tc.msg.Queue).Val() { if !r.client.SIsMember(context.Background(), base.AllQueues, tc.msg.Queue).Val() {
t.Errorf("%q is not a member of SET %q", tc.msg.Queue, base.AllQueues) t.Errorf("%q is not a member of SET %q", tc.msg.Queue, base.AllQueues)
} }
@ -209,7 +210,7 @@ func TestEnqueueUnique(t *testing.T) {
t.Errorf("Second message: (*RDB).EnqueueUnique(msg, ttl) = %v, want %v", got, errors.ErrDuplicateTask) t.Errorf("Second message: (*RDB).EnqueueUnique(msg, ttl) = %v, want %v", got, errors.ErrDuplicateTask)
continue continue
} }
gotTTL := r.client.TTL(ctx, tc.msg.UniqueKey).Val() gotTTL := r.client.TTL(context.Background(), tc.msg.UniqueKey).Val()
if !cmp.Equal(tc.ttl.Seconds(), gotTTL.Seconds(), cmpopts.EquateApprox(0, 2)) { if !cmp.Equal(tc.ttl.Seconds(), gotTTL.Seconds(), cmpopts.EquateApprox(0, 2)) {
t.Errorf("TTL %q = %v, want %v", tc.msg.UniqueKey, gotTTL, tc.ttl) t.Errorf("TTL %q = %v, want %v", tc.msg.UniqueKey, gotTTL, tc.ttl)
continue continue
@ -681,7 +682,7 @@ func TestDone(t *testing.T) {
for _, msg := range msgs { for _, msg := range msgs {
// Set uniqueness lock if unique key is present. // Set uniqueness lock if unique key is present.
if len(msg.UniqueKey) > 0 { if len(msg.UniqueKey) > 0 {
err := r.client.SetNX(ctx, msg.UniqueKey, msg.ID.String(), time.Minute).Err() err := r.client.SetNX(context.Background(), msg.UniqueKey, msg.ID.String(), time.Minute).Err()
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -711,17 +712,17 @@ func TestDone(t *testing.T) {
} }
processedKey := base.ProcessedKey(tc.target.Queue, time.Now()) processedKey := base.ProcessedKey(tc.target.Queue, time.Now())
gotProcessed := r.client.Get(ctx, processedKey).Val() gotProcessed := r.client.Get(context.Background(), processedKey).Val()
if gotProcessed != "1" { if gotProcessed != "1" {
t.Errorf("%s; GET %q = %q, want 1", tc.desc, processedKey, gotProcessed) t.Errorf("%s; GET %q = %q, want 1", tc.desc, processedKey, gotProcessed)
} }
gotTTL := r.client.TTL(ctx, processedKey).Val() gotTTL := r.client.TTL(context.Background(), processedKey).Val()
if gotTTL > statsTTL { if gotTTL > statsTTL {
t.Errorf("%s; TTL %q = %v, want less than or equal to %v", tc.desc, processedKey, gotTTL, statsTTL) t.Errorf("%s; TTL %q = %v, want less than or equal to %v", tc.desc, processedKey, gotTTL, statsTTL)
} }
if len(tc.target.UniqueKey) > 0 && r.client.Exists(ctx, tc.target.UniqueKey).Val() != 0 { if len(tc.target.UniqueKey) > 0 && r.client.Exists(context.Background(), tc.target.UniqueKey).Val() != 0 {
t.Errorf("%s; Uniqueness lock %q still exists", tc.desc, tc.target.UniqueKey) t.Errorf("%s; Uniqueness lock %q still exists", tc.desc, tc.target.UniqueKey)
} }
} }
@ -899,7 +900,7 @@ func TestSchedule(t *testing.T) {
// Check Scheduled zset has task ID. // Check Scheduled zset has task ID.
scheduledKey := base.ScheduledKey(tc.msg.Queue) scheduledKey := base.ScheduledKey(tc.msg.Queue)
zs := r.client.ZRangeWithScores(ctx, scheduledKey, 0, -1).Val() zs := r.client.ZRangeWithScores(context.Background(), scheduledKey, 0, -1).Val()
if n := len(zs); n != 1 { if n := len(zs); n != 1 {
t.Errorf("Redis ZSET %q contains %d elements, want 1", t.Errorf("Redis ZSET %q contains %d elements, want 1",
scheduledKey, n) scheduledKey, n)
@ -918,28 +919,28 @@ func TestSchedule(t *testing.T) {
// Check the values under the task key. // Check the values under the task key.
taskKey := base.TaskKey(tc.msg.Queue, tc.msg.ID.String()) taskKey := base.TaskKey(tc.msg.Queue, tc.msg.ID.String())
encoded := r.client.HGet(ctx, taskKey, "msg").Val() // "msg" field encoded := r.client.HGet(context.Background(), taskKey, "msg").Val() // "msg" field
decoded := h.MustUnmarshal(t, encoded) decoded := h.MustUnmarshal(t, encoded)
if diff := cmp.Diff(tc.msg, decoded); diff != "" { if diff := cmp.Diff(tc.msg, decoded); diff != "" {
t.Errorf("persisted message was %v, want %v; (-want, +got)\n%s", t.Errorf("persisted message was %v, want %v; (-want, +got)\n%s",
decoded, tc.msg, diff) decoded, tc.msg, diff)
} }
state := r.client.HGet(ctx, taskKey, "state").Val() // "state" field state := r.client.HGet(context.Background(), taskKey, "state").Val() // "state" field
if want := "scheduled"; state != want { if want := "scheduled"; state != want {
t.Errorf("state field under task-key is set to %q, want %q", t.Errorf("state field under task-key is set to %q, want %q",
state, want) state, want)
} }
timeout := r.client.HGet(ctx, taskKey, "timeout").Val() // "timeout" field timeout := r.client.HGet(context.Background(), taskKey, "timeout").Val() // "timeout" field
if want := strconv.Itoa(int(tc.msg.Timeout)); timeout != want { if want := strconv.Itoa(int(tc.msg.Timeout)); timeout != want {
t.Errorf("timeout field under task-key is set to %v, want %v", timeout, want) t.Errorf("timeout field under task-key is set to %v, want %v", timeout, want)
} }
deadline := r.client.HGet(ctx, taskKey, "deadline").Val() // "deadline" field deadline := r.client.HGet(context.Background(), taskKey, "deadline").Val() // "deadline" field
if want := strconv.Itoa(int(tc.msg.Deadline)); deadline != want { if want := strconv.Itoa(int(tc.msg.Deadline)); deadline != want {
t.Errorf("deadline field under task-ke is set to %v, want %v", deadline, want) t.Errorf("deadline field under task-ke is set to %v, want %v", deadline, want)
} }
// Check queue is in the AllQueues set. // Check queue is in the AllQueues set.
if !r.client.SIsMember(ctx, base.AllQueues, tc.msg.Queue).Val() { if !r.client.SIsMember(context.Background(), base.AllQueues, tc.msg.Queue).Val() {
t.Errorf("%q is not a member of SET %q", tc.msg.Queue, base.AllQueues) t.Errorf("%q is not a member of SET %q", tc.msg.Queue, base.AllQueues)
} }
} }
@ -976,7 +977,7 @@ func TestScheduleUnique(t *testing.T) {
// Check Scheduled zset has task ID. // Check Scheduled zset has task ID.
scheduledKey := base.ScheduledKey(tc.msg.Queue) scheduledKey := base.ScheduledKey(tc.msg.Queue)
zs := r.client.ZRangeWithScores(ctx, scheduledKey, 0, -1).Val() zs := r.client.ZRangeWithScores(context.Background(), scheduledKey, 0, -1).Val()
if n := len(zs); n != 1 { if n := len(zs); n != 1 {
t.Errorf("Redis ZSET %q contains %d elements, want 1", t.Errorf("Redis ZSET %q contains %d elements, want 1",
scheduledKey, n) scheduledKey, n)
@ -995,32 +996,32 @@ func TestScheduleUnique(t *testing.T) {
// Check the values under the task key. // Check the values under the task key.
taskKey := base.TaskKey(tc.msg.Queue, tc.msg.ID.String()) taskKey := base.TaskKey(tc.msg.Queue, tc.msg.ID.String())
encoded := r.client.HGet(ctx, taskKey, "msg").Val() // "msg" field encoded := r.client.HGet(context.Background(), taskKey, "msg").Val() // "msg" field
decoded := h.MustUnmarshal(t, encoded) decoded := h.MustUnmarshal(t, encoded)
if diff := cmp.Diff(tc.msg, decoded); diff != "" { if diff := cmp.Diff(tc.msg, decoded); diff != "" {
t.Errorf("persisted message was %v, want %v; (-want, +got)\n%s", t.Errorf("persisted message was %v, want %v; (-want, +got)\n%s",
decoded, tc.msg, diff) decoded, tc.msg, diff)
} }
state := r.client.HGet(ctx, taskKey, "state").Val() // "state" field state := r.client.HGet(context.Background(), taskKey, "state").Val() // "state" field
if want := "scheduled"; state != want { if want := "scheduled"; state != want {
t.Errorf("state field under task-key is set to %q, want %q", t.Errorf("state field under task-key is set to %q, want %q",
state, want) state, want)
} }
timeout := r.client.HGet(ctx, taskKey, "timeout").Val() // "timeout" field timeout := r.client.HGet(context.Background(), taskKey, "timeout").Val() // "timeout" field
if want := strconv.Itoa(int(tc.msg.Timeout)); timeout != want { if want := strconv.Itoa(int(tc.msg.Timeout)); timeout != want {
t.Errorf("timeout field under task-key is set to %v, want %v", timeout, want) t.Errorf("timeout field under task-key is set to %v, want %v", timeout, want)
} }
deadline := r.client.HGet(ctx, taskKey, "deadline").Val() // "deadline" field deadline := r.client.HGet(context.Background(), taskKey, "deadline").Val() // "deadline" field
if want := strconv.Itoa(int(tc.msg.Deadline)); deadline != want { if want := strconv.Itoa(int(tc.msg.Deadline)); deadline != want {
t.Errorf("deadline field under task-key is set to %v, want %v", deadline, want) t.Errorf("deadline field under task-key is set to %v, want %v", deadline, want)
} }
uniqueKey := r.client.HGet(ctx, taskKey, "unique_key").Val() // "unique_key" field uniqueKey := r.client.HGet(context.Background(), taskKey, "unique_key").Val() // "unique_key" field
if uniqueKey != tc.msg.UniqueKey { if uniqueKey != tc.msg.UniqueKey {
t.Errorf("uniqueue_key field under task key is set to %q, want %q", uniqueKey, tc.msg.UniqueKey) t.Errorf("uniqueue_key field under task key is set to %q, want %q", uniqueKey, tc.msg.UniqueKey)
} }
// Check queue is in the AllQueues set. // Check queue is in the AllQueues set.
if !r.client.SIsMember(ctx, base.AllQueues, tc.msg.Queue).Val() { if !r.client.SIsMember(context.Background(), base.AllQueues, tc.msg.Queue).Val() {
t.Errorf("%q is not a member of SET %q", tc.msg.Queue, base.AllQueues) t.Errorf("%q is not a member of SET %q", tc.msg.Queue, base.AllQueues)
} }
@ -1031,7 +1032,7 @@ func TestScheduleUnique(t *testing.T) {
continue continue
} }
gotTTL := r.client.TTL(ctx, tc.msg.UniqueKey).Val() gotTTL := r.client.TTL(context.Background(), tc.msg.UniqueKey).Val()
if !cmp.Equal(tc.ttl.Seconds(), gotTTL.Seconds(), cmpopts.EquateApprox(0, 1)) { if !cmp.Equal(tc.ttl.Seconds(), gotTTL.Seconds(), cmpopts.EquateApprox(0, 1)) {
t.Errorf("TTL %q = %v, want %v", tc.msg.UniqueKey, gotTTL, tc.ttl) t.Errorf("TTL %q = %v, want %v", tc.msg.UniqueKey, gotTTL, tc.ttl)
continue continue
@ -1189,21 +1190,21 @@ func TestRetry(t *testing.T) {
} }
processedKey := base.ProcessedKey(tc.msg.Queue, time.Now()) processedKey := base.ProcessedKey(tc.msg.Queue, time.Now())
gotProcessed := r.client.Get(ctx, processedKey).Val() gotProcessed := r.client.Get(context.Background(), processedKey).Val()
if gotProcessed != "1" { if gotProcessed != "1" {
t.Errorf("GET %q = %q, want 1", processedKey, gotProcessed) t.Errorf("GET %q = %q, want 1", processedKey, gotProcessed)
} }
gotTTL := r.client.TTL(ctx, processedKey).Val() gotTTL := r.client.TTL(context.Background(), processedKey).Val()
if gotTTL > statsTTL { if gotTTL > statsTTL {
t.Errorf("TTL %q = %v, want less than or equal to %v", processedKey, gotTTL, statsTTL) t.Errorf("TTL %q = %v, want less than or equal to %v", processedKey, gotTTL, statsTTL)
} }
failedKey := base.FailedKey(tc.msg.Queue, time.Now()) failedKey := base.FailedKey(tc.msg.Queue, time.Now())
gotFailed := r.client.Get(ctx, failedKey).Val() gotFailed := r.client.Get(context.Background(), failedKey).Val()
if gotFailed != "1" { if gotFailed != "1" {
t.Errorf("GET %q = %q, want 1", failedKey, gotFailed) t.Errorf("GET %q = %q, want 1", failedKey, gotFailed)
} }
gotTTL = r.client.TTL(ctx, failedKey).Val() gotTTL = r.client.TTL(context.Background(), failedKey).Val()
if gotTTL > statsTTL { if gotTTL > statsTTL {
t.Errorf("TTL %q = %v, want less than or equal to %v", failedKey, gotTTL, statsTTL) t.Errorf("TTL %q = %v, want less than or equal to %v", failedKey, gotTTL, statsTTL)
} }
@ -1399,21 +1400,21 @@ func TestArchive(t *testing.T) {
} }
processedKey := base.ProcessedKey(tc.target.Queue, time.Now()) processedKey := base.ProcessedKey(tc.target.Queue, time.Now())
gotProcessed := r.client.Get(ctx, processedKey).Val() gotProcessed := r.client.Get(context.Background(), processedKey).Val()
if gotProcessed != "1" { if gotProcessed != "1" {
t.Errorf("GET %q = %q, want 1", processedKey, gotProcessed) t.Errorf("GET %q = %q, want 1", processedKey, gotProcessed)
} }
gotTTL := r.client.TTL(ctx, processedKey).Val() gotTTL := r.client.TTL(context.Background(), processedKey).Val()
if gotTTL > statsTTL { if gotTTL > statsTTL {
t.Errorf("TTL %q = %v, want less than or equal to %v", processedKey, gotTTL, statsTTL) t.Errorf("TTL %q = %v, want less than or equal to %v", processedKey, gotTTL, statsTTL)
} }
failedKey := base.FailedKey(tc.target.Queue, time.Now()) failedKey := base.FailedKey(tc.target.Queue, time.Now())
gotFailed := r.client.Get(ctx, failedKey).Val() gotFailed := r.client.Get(context.Background(), failedKey).Val()
if gotFailed != "1" { if gotFailed != "1" {
t.Errorf("GET %q = %q, want 1", failedKey, gotFailed) t.Errorf("GET %q = %q, want 1", failedKey, gotFailed)
} }
gotTTL = r.client.TTL(ctx, processedKey).Val() gotTTL = r.client.TTL(context.Background(), processedKey).Val()
if gotTTL > statsTTL { if gotTTL > statsTTL {
t.Errorf("TTL %q = %v, want less than or equal to %v", failedKey, gotTTL, statsTTL) t.Errorf("TTL %q = %v, want less than or equal to %v", failedKey, gotTTL, statsTTL)
} }
@ -1683,7 +1684,7 @@ func TestWriteServerState(t *testing.T) {
// Check ServerInfo was written correctly. // Check ServerInfo was written correctly.
skey := base.ServerInfoKey(host, pid, serverID) skey := base.ServerInfoKey(host, pid, serverID)
data := r.client.Get(ctx, skey).Val() data := r.client.Get(context.Background(), skey).Val()
got, err := base.DecodeServerInfo([]byte(data)) got, err := base.DecodeServerInfo([]byte(data))
if err != nil { if err != nil {
t.Fatalf("could not decode server info: %v", err) t.Fatalf("could not decode server info: %v", err)
@ -1693,12 +1694,12 @@ func TestWriteServerState(t *testing.T) {
got, info, diff) got, info, diff)
} }
// Check ServerInfo TTL was set correctly. // Check ServerInfo TTL was set correctly.
gotTTL := r.client.TTL(ctx, skey).Val() gotTTL := r.client.TTL(context.Background(), skey).Val()
if !cmp.Equal(ttl.Seconds(), gotTTL.Seconds(), cmpopts.EquateApprox(0, 1)) { if !cmp.Equal(ttl.Seconds(), gotTTL.Seconds(), cmpopts.EquateApprox(0, 1)) {
t.Errorf("TTL of %q was %v, want %v", skey, gotTTL, ttl) t.Errorf("TTL of %q was %v, want %v", skey, gotTTL, ttl)
} }
// Check ServerInfo key was added to the set all server keys correctly. // Check ServerInfo key was added to the set all server keys correctly.
gotServerKeys := r.client.ZRange(ctx, base.AllServers, 0, -1).Val() gotServerKeys := r.client.ZRange(context.Background(), base.AllServers, 0, -1).Val()
wantServerKeys := []string{skey} wantServerKeys := []string{skey}
if diff := cmp.Diff(wantServerKeys, gotServerKeys); diff != "" { if diff := cmp.Diff(wantServerKeys, gotServerKeys); diff != "" {
t.Errorf("%q contained %v, want %v", base.AllServers, gotServerKeys, wantServerKeys) t.Errorf("%q contained %v, want %v", base.AllServers, gotServerKeys, wantServerKeys)
@ -1706,12 +1707,12 @@ func TestWriteServerState(t *testing.T) {
// Check WorkersInfo was written correctly. // Check WorkersInfo was written correctly.
wkey := base.WorkersKey(host, pid, serverID) wkey := base.WorkersKey(host, pid, serverID)
workerExist := r.client.Exists(ctx, wkey).Val() workerExist := r.client.Exists(context.Background(), wkey).Val()
if workerExist != 0 { if workerExist != 0 {
t.Errorf("%q key exists", wkey) t.Errorf("%q key exists", wkey)
} }
// Check WorkersInfo key was added to the set correctly. // Check WorkersInfo key was added to the set correctly.
gotWorkerKeys := r.client.ZRange(ctx, base.AllWorkers, 0, -1).Val() gotWorkerKeys := r.client.ZRange(context.Background(), base.AllWorkers, 0, -1).Val()
wantWorkerKeys := []string{wkey} wantWorkerKeys := []string{wkey}
if diff := cmp.Diff(wantWorkerKeys, gotWorkerKeys); diff != "" { if diff := cmp.Diff(wantWorkerKeys, gotWorkerKeys); diff != "" {
t.Errorf("%q contained %v, want %v", base.AllWorkers, gotWorkerKeys, wantWorkerKeys) t.Errorf("%q contained %v, want %v", base.AllWorkers, gotWorkerKeys, wantWorkerKeys)
@ -1773,7 +1774,7 @@ func TestWriteServerStateWithWorkers(t *testing.T) {
// Check ServerInfo was written correctly. // Check ServerInfo was written correctly.
skey := base.ServerInfoKey(host, pid, serverID) skey := base.ServerInfoKey(host, pid, serverID)
data := r.client.Get(ctx, skey).Val() data := r.client.Get(context.Background(), skey).Val()
got, err := base.DecodeServerInfo([]byte(data)) got, err := base.DecodeServerInfo([]byte(data))
if err != nil { if err != nil {
t.Fatalf("could not decode server info: %v", err) t.Fatalf("could not decode server info: %v", err)
@ -1783,12 +1784,12 @@ func TestWriteServerStateWithWorkers(t *testing.T) {
got, serverInfo, diff) got, serverInfo, diff)
} }
// Check ServerInfo TTL was set correctly. // Check ServerInfo TTL was set correctly.
gotTTL := r.client.TTL(ctx, skey).Val() gotTTL := r.client.TTL(context.Background(), skey).Val()
if !cmp.Equal(ttl.Seconds(), gotTTL.Seconds(), cmpopts.EquateApprox(0, 1)) { if !cmp.Equal(ttl.Seconds(), gotTTL.Seconds(), cmpopts.EquateApprox(0, 1)) {
t.Errorf("TTL of %q was %v, want %v", skey, gotTTL, ttl) t.Errorf("TTL of %q was %v, want %v", skey, gotTTL, ttl)
} }
// Check ServerInfo key was added to the set correctly. // Check ServerInfo key was added to the set correctly.
gotServerKeys := r.client.ZRange(ctx, base.AllServers, 0, -1).Val() gotServerKeys := r.client.ZRange(context.Background(), base.AllServers, 0, -1).Val()
wantServerKeys := []string{skey} wantServerKeys := []string{skey}
if diff := cmp.Diff(wantServerKeys, gotServerKeys); diff != "" { if diff := cmp.Diff(wantServerKeys, gotServerKeys); diff != "" {
t.Errorf("%q contained %v, want %v", base.AllServers, gotServerKeys, wantServerKeys) t.Errorf("%q contained %v, want %v", base.AllServers, gotServerKeys, wantServerKeys)
@ -1796,7 +1797,7 @@ func TestWriteServerStateWithWorkers(t *testing.T) {
// Check WorkersInfo was written correctly. // Check WorkersInfo was written correctly.
wkey := base.WorkersKey(host, pid, serverID) wkey := base.WorkersKey(host, pid, serverID)
wdata := r.client.HGetAll(ctx, wkey).Val() wdata := r.client.HGetAll(context.Background(), wkey).Val()
if len(wdata) != 2 { if len(wdata) != 2 {
t.Fatalf("HGETALL %q returned a hash of size %d, want 2", wkey, len(wdata)) t.Fatalf("HGETALL %q returned a hash of size %d, want 2", wkey, len(wdata))
} }
@ -1814,12 +1815,12 @@ func TestWriteServerStateWithWorkers(t *testing.T) {
} }
// Check WorkersInfo TTL was set correctly. // Check WorkersInfo TTL was set correctly.
gotTTL = r.client.TTL(ctx, wkey).Val() gotTTL = r.client.TTL(context.Background(), wkey).Val()
if !cmp.Equal(ttl.Seconds(), gotTTL.Seconds(), cmpopts.EquateApprox(0, 1)) { if !cmp.Equal(ttl.Seconds(), gotTTL.Seconds(), cmpopts.EquateApprox(0, 1)) {
t.Errorf("TTL of %q was %v, want %v", wkey, gotTTL, ttl) t.Errorf("TTL of %q was %v, want %v", wkey, gotTTL, ttl)
} }
// Check WorkersInfo key was added to the set correctly. // Check WorkersInfo key was added to the set correctly.
gotWorkerKeys := r.client.ZRange(ctx, base.AllWorkers, 0, -1).Val() gotWorkerKeys := r.client.ZRange(context.Background(), base.AllWorkers, 0, -1).Val()
wantWorkerKeys := []string{wkey} wantWorkerKeys := []string{wkey}
if diff := cmp.Diff(wantWorkerKeys, gotWorkerKeys); diff != "" { if diff := cmp.Diff(wantWorkerKeys, gotWorkerKeys); diff != "" {
t.Errorf("%q contained %v, want %v", base.AllWorkers, gotWorkerKeys, wantWorkerKeys) t.Errorf("%q contained %v, want %v", base.AllWorkers, gotWorkerKeys, wantWorkerKeys)
@ -1909,18 +1910,18 @@ func TestClearServerState(t *testing.T) {
otherSKey := base.ServerInfoKey(otherHost, otherPID, otherServerID) otherSKey := base.ServerInfoKey(otherHost, otherPID, otherServerID)
otherWKey := base.WorkersKey(otherHost, otherPID, otherServerID) otherWKey := base.WorkersKey(otherHost, otherPID, otherServerID)
// Check all keys are cleared. // Check all keys are cleared.
if r.client.Exists(ctx, skey).Val() != 0 { if r.client.Exists(context.Background(), skey).Val() != 0 {
t.Errorf("Redis key %q exists", skey) t.Errorf("Redis key %q exists", skey)
} }
if r.client.Exists(ctx, wkey).Val() != 0 { if r.client.Exists(context.Background(), wkey).Val() != 0 {
t.Errorf("Redis key %q exists", wkey) t.Errorf("Redis key %q exists", wkey)
} }
gotServerKeys := r.client.ZRange(ctx, base.AllServers, 0, -1).Val() gotServerKeys := r.client.ZRange(context.Background(), base.AllServers, 0, -1).Val()
wantServerKeys := []string{otherSKey} wantServerKeys := []string{otherSKey}
if diff := cmp.Diff(wantServerKeys, gotServerKeys); diff != "" { if diff := cmp.Diff(wantServerKeys, gotServerKeys); diff != "" {
t.Errorf("%q contained %v, want %v", base.AllServers, gotServerKeys, wantServerKeys) t.Errorf("%q contained %v, want %v", base.AllServers, gotServerKeys, wantServerKeys)
} }
gotWorkerKeys := r.client.ZRange(ctx, base.AllWorkers, 0, -1).Val() gotWorkerKeys := r.client.ZRange(context.Background(), base.AllWorkers, 0, -1).Val()
wantWorkerKeys := []string{otherWKey} wantWorkerKeys := []string{otherWKey}
if diff := cmp.Diff(wantWorkerKeys, gotWorkerKeys); diff != "" { if diff := cmp.Diff(wantWorkerKeys, gotWorkerKeys); diff != "" {
t.Errorf("%q contained %v, want %v", base.AllWorkers, gotWorkerKeys, wantWorkerKeys) t.Errorf("%q contained %v, want %v", base.AllWorkers, gotWorkerKeys, wantWorkerKeys)

View File

@ -119,7 +119,7 @@ func TestProcessorSuccessWithSingleQueue(t *testing.T) {
} }
} }
time.Sleep(2 * time.Second) // wait for two second to allow all pending tasks to be processed. time.Sleep(2 * time.Second) // wait for two second to allow all pending tasks to be processed.
if l := r.LLen(ctx, base.ActiveKey(base.DefaultQueueName)).Val(); l != 0 { if l := r.LLen(context.Background(), base.ActiveKey(base.DefaultQueueName)).Val(); l != 0 {
t.Errorf("%q has %d tasks, want 0", base.ActiveKey(base.DefaultQueueName), l) t.Errorf("%q has %d tasks, want 0", base.ActiveKey(base.DefaultQueueName), l)
} }
p.shutdown() p.shutdown()
@ -211,7 +211,7 @@ func TestProcessorSuccessWithMultipleQueues(t *testing.T) {
time.Sleep(2 * time.Second) time.Sleep(2 * time.Second)
// Make sure no messages are stuck in active list. // Make sure no messages are stuck in active list.
for _, qname := range tc.queues { for _, qname := range tc.queues {
if l := r.LLen(ctx, base.ActiveKey(qname)).Val(); l != 0 { if l := r.LLen(context.Background(), base.ActiveKey(qname)).Val(); l != 0 {
t.Errorf("%q has %d tasks, want 0", base.ActiveKey(qname), l) t.Errorf("%q has %d tasks, want 0", base.ActiveKey(qname), l)
} }
} }
@ -290,7 +290,7 @@ func TestProcessTasksWithLargeNumberInPayload(t *testing.T) {
p.start(&sync.WaitGroup{}) p.start(&sync.WaitGroup{})
time.Sleep(2 * time.Second) // wait for two second to allow all pending tasks to be processed. time.Sleep(2 * time.Second) // wait for two second to allow all pending tasks to be processed.
if l := r.LLen(ctx, base.ActiveKey(base.DefaultQueueName)).Val(); l != 0 { if l := r.LLen(context.Background(), base.ActiveKey(base.DefaultQueueName)).Val(); l != 0 {
t.Errorf("%q has %d tasks, want 0", base.ActiveKey(base.DefaultQueueName), l) t.Errorf("%q has %d tasks, want 0", base.ActiveKey(base.DefaultQueueName), l)
} }
p.shutdown() p.shutdown()
@ -439,7 +439,7 @@ func TestProcessorRetry(t *testing.T) {
t.Errorf("%s: mismatch found in %q after running processor; (-want, +got)\n%s", tc.desc, base.ArchivedKey(base.DefaultQueueName), diff) t.Errorf("%s: mismatch found in %q after running processor; (-want, +got)\n%s", tc.desc, base.ArchivedKey(base.DefaultQueueName), diff)
} }
if l := r.LLen(ctx, base.ActiveKey(base.DefaultQueueName)).Val(); l != 0 { if l := r.LLen(context.Background(), base.ActiveKey(base.DefaultQueueName)).Val(); l != 0 {
t.Errorf("%s: %q has %d tasks, want 0", base.ActiveKey(base.DefaultQueueName), tc.desc, l) t.Errorf("%s: %q has %d tasks, want 0", base.ActiveKey(base.DefaultQueueName), tc.desc, l)
} }
@ -593,7 +593,7 @@ func TestProcessorWithStrictPriority(t *testing.T) {
time.Sleep(tc.wait) time.Sleep(tc.wait)
// Make sure no tasks are stuck in active list. // Make sure no tasks are stuck in active list.
for _, qname := range tc.queues { for _, qname := range tc.queues {
if l := r.LLen(ctx, base.ActiveKey(qname)).Val(); l != 0 { if l := r.LLen(context.Background(), base.ActiveKey(qname)).Val(); l != 0 {
t.Errorf("%q has %d tasks, want 0", base.ActiveKey(qname), l) t.Errorf("%q has %d tasks, want 0", base.ActiveKey(qname), l)
} }
} }

View File

@ -20,8 +20,6 @@ import (
"github.com/spf13/cobra" "github.com/spf13/cobra"
) )
var ctx = context.Background()
// migrateCmd represents the migrate command. // migrateCmd represents the migrate command.
var migrateCmd = &cobra.Command{ var migrateCmd = &cobra.Command{
Use: "migrate", Use: "migrate",
@ -41,10 +39,10 @@ func backupKey(key string) string {
} }
func renameKeyAsBackup(c redis.UniversalClient, key string) error { func renameKeyAsBackup(c redis.UniversalClient, key string) error {
if c.Exists(ctx, key).Val() == 0 { if c.Exists(context.Background(), key).Val() == 0 {
return nil // key doesn't exist; no-op return nil // key doesn't exist; no-op
} }
return c.Rename(ctx, key, backupKey(key)).Err() return c.Rename(context.Background(), key, backupKey(key)).Err()
} }
func failIfError(err error, msg string) { func failIfError(err error, msg string) {
@ -90,11 +88,11 @@ func migrate(cmd *cobra.Command, args []string) {
fmt.Print("Renaming pending keys...") fmt.Print("Renaming pending keys...")
for _, qname := range queues { for _, qname := range queues {
oldKey := fmt.Sprintf("asynq:{%s}", qname) oldKey := fmt.Sprintf("asynq:{%s}", qname)
if r.Client().Exists(ctx, oldKey).Val() == 0 { if r.Client().Exists(context.Background(), oldKey).Val() == 0 {
continue continue
} }
newKey := base.PendingKey(qname) newKey := base.PendingKey(qname)
err := r.Client().Rename(ctx, oldKey, newKey).Err() err := r.Client().Rename(context.Background(), oldKey, newKey).Err()
failIfError(err, "Failed to rename key") failIfError(err, "Failed to rename key")
} }
fmt.Print("Done\n") fmt.Print("Done\n")
@ -143,7 +141,7 @@ func migrate(cmd *cobra.Command, args []string) {
backupKey(base.ArchivedKey(qname)), backupKey(base.ArchivedKey(qname)),
} }
for _, key := range keys { for _, key := range keys {
err := r.Client().Del(ctx, key).Err() err := r.Client().Del(context.Background(), key).Err()
failIfError(err, "Failed to delete backup key") failIfError(err, "Failed to delete backup key")
} }
} }
@ -231,7 +229,7 @@ func DecodeMessage(s string) (*OldTaskMessage, error) {
} }
func updatePendingMessages(r *rdb.RDB, qname string) { func updatePendingMessages(r *rdb.RDB, qname string) {
data, err := r.Client().LRange(ctx, backupKey(base.PendingKey(qname)), 0, -1).Result() data, err := r.Client().LRange(context.Background(), backupKey(base.PendingKey(qname)), 0, -1).Result()
failIfError(err, "Failed to read backup pending key") failIfError(err, "Failed to read backup pending key")
for _, s := range data { for _, s := range data {
@ -239,11 +237,11 @@ func updatePendingMessages(r *rdb.RDB, qname string) {
failIfError(err, "Failed to unmarshal message") failIfError(err, "Failed to unmarshal message")
if msg.UniqueKey != "" { if msg.UniqueKey != "" {
ttl, err := r.Client().TTL(ctx, msg.UniqueKey).Result() ttl, err := r.Client().TTL(context.Background(), msg.UniqueKey).Result()
failIfError(err, "Failed to get ttl") failIfError(err, "Failed to get ttl")
if ttl > 0 { if ttl > 0 {
err = r.Client().Del(ctx, msg.UniqueKey).Err() err = r.Client().Del(context.Background(), msg.UniqueKey).Err()
logIfError(err, "Failed to delete unique key") logIfError(err, "Failed to delete unique key")
} }
@ -292,7 +290,7 @@ func ZAddTask(c redis.UniversalClient, key string, msg *base.TaskMessage, score
if err != nil { if err != nil {
return err return err
} }
if err := c.SAdd(ctx, base.AllQueues, msg.Queue).Err(); err != nil { if err := c.SAdd(context.Background(), base.AllQueues, msg.Queue).Err(); err != nil {
return err return err
} }
keys := []string{ keys := []string{
@ -307,7 +305,7 @@ func ZAddTask(c redis.UniversalClient, key string, msg *base.TaskMessage, score
msg.Deadline, msg.Deadline,
state, state,
} }
return taskZAddCmd.Run(ctx, c, keys, argv...).Err() return taskZAddCmd.Run(context.Background(), c, keys, argv...).Err()
} }
// KEYS[1] -> unique key // KEYS[1] -> unique key
@ -343,7 +341,7 @@ func ZAddTaskUnique(c redis.UniversalClient, key string, msg *base.TaskMessage,
if err != nil { if err != nil {
return err return err
} }
if err := c.SAdd(ctx, base.AllQueues, msg.Queue).Err(); err != nil { if err := c.SAdd(context.Background(), base.AllQueues, msg.Queue).Err(); err != nil {
return err return err
} }
keys := []string{ keys := []string{
@ -360,7 +358,7 @@ func ZAddTaskUnique(c redis.UniversalClient, key string, msg *base.TaskMessage,
msg.Deadline, msg.Deadline,
state, state,
} }
res, err := taskZAddUniqueCmd.Run(ctx, c, keys, argv...).Result() res, err := taskZAddUniqueCmd.Run(context.Background(), c, keys, argv...).Result()
if err != nil { if err != nil {
return err return err
} }
@ -375,7 +373,7 @@ func ZAddTaskUnique(c redis.UniversalClient, key string, msg *base.TaskMessage,
} }
func updateZSetMessages(c redis.UniversalClient, key, state string) { func updateZSetMessages(c redis.UniversalClient, key, state string) {
zs, err := c.ZRangeWithScores(ctx, backupKey(key), 0, -1).Result() zs, err := c.ZRangeWithScores(context.Background(), backupKey(key), 0, -1).Result()
failIfError(err, "Failed to read") failIfError(err, "Failed to read")
for _, z := range zs { for _, z := range zs {
@ -383,11 +381,11 @@ func updateZSetMessages(c redis.UniversalClient, key, state string) {
failIfError(err, "Failed to unmarshal message") failIfError(err, "Failed to unmarshal message")
if msg.UniqueKey != "" { if msg.UniqueKey != "" {
ttl, err := c.TTL(ctx, msg.UniqueKey).Result() ttl, err := c.TTL(context.Background(), msg.UniqueKey).Result()
failIfError(err, "Failed to get ttl") failIfError(err, "Failed to get ttl")
if ttl > 0 { if ttl > 0 {
err = c.Del(ctx, msg.UniqueKey).Err() err = c.Del(context.Background(), msg.UniqueKey).Err()
logIfError(err, "Failed to delete unique key") logIfError(err, "Failed to delete unique key")
} }