mirror of
https://github.com/hibiken/asynq.git
synced 2025-04-22 16:50:18 +08:00
upgrade go-redis/redis to version 8
This commit is contained in:
parent
684a7e0c98
commit
afd3506be8
2
asynq.go
2
asynq.go
@ -12,7 +12,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-redis/redis/v7"
|
||||
"github.com/go-redis/redis/v8"
|
||||
"github.com/hibiken/asynq/internal/base"
|
||||
)
|
||||
|
||||
|
@ -5,12 +5,13 @@
|
||||
package asynq
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"flag"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/go-redis/redis/v7"
|
||||
"github.com/go-redis/redis/v8"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
h "github.com/hibiken/asynq/internal/asynqtest"
|
||||
"github.com/hibiken/asynq/internal/log"
|
||||
@ -22,11 +23,13 @@ import (
|
||||
|
||||
// variables used for package testing.
|
||||
var (
|
||||
redisAddr string
|
||||
redisDB int
|
||||
redisAddr string
|
||||
redisDB int
|
||||
redisPassword string
|
||||
|
||||
useRedisCluster bool
|
||||
redisClusterAddrs string // comma-separated list of host:port
|
||||
redisTLSServer string
|
||||
|
||||
testLogLevel = FatalLevel
|
||||
)
|
||||
@ -36,9 +39,10 @@ var testLogger *log.Logger
|
||||
func init() {
|
||||
flag.StringVar(&redisAddr, "redis_addr", "localhost:6379", "redis address to use in testing")
|
||||
flag.IntVar(&redisDB, "redis_db", 14, "redis db number to use in testing")
|
||||
flag.StringVar(&redisPassword, "redis_password", "", "redis password to use in testing")
|
||||
flag.BoolVar(&useRedisCluster, "redis_cluster", false, "use redis cluster as a broker in testing")
|
||||
flag.StringVar(&redisClusterAddrs, "redis_cluster_addrs", "localhost:7000,localhost:7001,localhost:7002", "comma separated list of redis server addresses")
|
||||
flag.Var(&testLogLevel, "loglevel", "log level to use in testing")
|
||||
flag.StringVar(&redisTLSServer, "redis_tls_server", "", "redis host for TLS verification")
|
||||
|
||||
testLogger = log.NewLogger(nil)
|
||||
testLogger.SetLevel(toInternalLogLevel(testLogLevel))
|
||||
@ -53,11 +57,15 @@ func setup(tb testing.TB) (r redis.UniversalClient) {
|
||||
}
|
||||
r = redis.NewClusterClient(&redis.ClusterOptions{
|
||||
Addrs: addrs,
|
||||
Password: redisPassword,
|
||||
TLSConfig: getTLSConfig(),
|
||||
})
|
||||
} else {
|
||||
r = redis.NewClient(&redis.Options{
|
||||
Addr: redisAddr,
|
||||
DB: redisDB,
|
||||
Password: redisPassword,
|
||||
TLSConfig: getTLSConfig(),
|
||||
})
|
||||
}
|
||||
// Start each test with a clean slate.
|
||||
@ -74,14 +82,25 @@ func getRedisConnOpt(tb testing.TB) RedisConnOpt {
|
||||
}
|
||||
return RedisClusterClientOpt{
|
||||
Addrs: addrs,
|
||||
Password: redisPassword,
|
||||
TLSConfig: getTLSConfig(),
|
||||
}
|
||||
}
|
||||
return RedisClientOpt{
|
||||
Addr: redisAddr,
|
||||
DB: redisDB,
|
||||
Password: redisPassword,
|
||||
TLSConfig: getTLSConfig(),
|
||||
}
|
||||
}
|
||||
|
||||
func getTLSConfig() *tls.Config {
|
||||
if redisTLSServer != "" {
|
||||
return &tls.Config{ServerName: redisTLSServer}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var sortTaskOpt = cmp.Transformer("SortMsg", func(in []*Task) []*Task {
|
||||
out := append([]*Task(nil), in...) // Copy input to avoid mutating it
|
||||
sort.Slice(out, func(i, j int) bool {
|
||||
|
@ -41,7 +41,7 @@ func BenchmarkEndToEndSimple(b *testing.B) {
|
||||
})
|
||||
// Create a bunch of tasks
|
||||
for i := 0; i < count; i++ {
|
||||
if _, err := client.Enqueue(makeTask(i)); err != nil {
|
||||
if _, err := client.Enqueue(ctx, makeTask(i)); err != nil {
|
||||
b.Fatalf("could not enqueue a task: %v", err)
|
||||
}
|
||||
}
|
||||
@ -81,12 +81,12 @@ func BenchmarkEndToEnd(b *testing.B) {
|
||||
})
|
||||
// Create a bunch of tasks
|
||||
for i := 0; i < count; i++ {
|
||||
if _, err := client.Enqueue(makeTask(i)); err != nil {
|
||||
if _, err := client.Enqueue(ctx, makeTask(i)); err != nil {
|
||||
b.Fatalf("could not enqueue a task: %v", err)
|
||||
}
|
||||
}
|
||||
for i := 0; i < count; i++ {
|
||||
if _, err := client.Enqueue(makeTask(i), ProcessIn(1*time.Second)); err != nil {
|
||||
if _, err := client.Enqueue(ctx, makeTask(i), ProcessIn(1*time.Second)); err != nil {
|
||||
b.Fatalf("could not enqueue a task: %v", err)
|
||||
}
|
||||
}
|
||||
@ -150,17 +150,17 @@ func BenchmarkEndToEndMultipleQueues(b *testing.B) {
|
||||
})
|
||||
// Create a bunch of tasks
|
||||
for i := 0; i < highCount; i++ {
|
||||
if _, err := client.Enqueue(makeTask(i), Queue("high")); err != nil {
|
||||
if _, err := client.Enqueue(ctx, makeTask(i), Queue("high")); err != nil {
|
||||
b.Fatalf("could not enqueue a task: %v", err)
|
||||
}
|
||||
}
|
||||
for i := 0; i < defaultCount; i++ {
|
||||
if _, err := client.Enqueue(makeTask(i)); err != nil {
|
||||
if _, err := client.Enqueue(ctx, makeTask(i)); err != nil {
|
||||
b.Fatalf("could not enqueue a task: %v", err)
|
||||
}
|
||||
}
|
||||
for i := 0; i < lowCount; i++ {
|
||||
if _, err := client.Enqueue(makeTask(i), Queue("low")); err != nil {
|
||||
if _, err := client.Enqueue(ctx, makeTask(i), Queue("low")); err != nil {
|
||||
b.Fatalf("could not enqueue a task: %v", err)
|
||||
}
|
||||
}
|
||||
@ -201,13 +201,13 @@ func BenchmarkClientWhileServerRunning(b *testing.B) {
|
||||
})
|
||||
// Enqueue 10,000 tasks.
|
||||
for i := 0; i < count; i++ {
|
||||
if _, err := client.Enqueue(makeTask(i)); err != nil {
|
||||
if _, err := client.Enqueue(ctx, makeTask(i)); err != nil {
|
||||
b.Fatalf("could not enqueue a task: %v", err)
|
||||
}
|
||||
}
|
||||
// Schedule 10,000 tasks.
|
||||
for i := 0; i < count; i++ {
|
||||
if _, err := client.Enqueue(makeTask(i), ProcessIn(1*time.Second)); err != nil {
|
||||
if _, err := client.Enqueue(ctx, makeTask(i), ProcessIn(1*time.Second)); err != nil {
|
||||
b.Fatalf("could not enqueue a task: %v", err)
|
||||
}
|
||||
}
|
||||
@ -223,7 +223,7 @@ func BenchmarkClientWhileServerRunning(b *testing.B) {
|
||||
enqueued := 0
|
||||
for enqueued < 100000 {
|
||||
t := NewTask(fmt.Sprintf("enqueued%d", enqueued), h.JSON(map[string]interface{}{"data": enqueued}))
|
||||
if _, err := client.Enqueue(t); err != nil {
|
||||
if _, err := client.Enqueue(ctx, t); err != nil {
|
||||
b.Logf("could not enqueue task %d: %v", enqueued, err)
|
||||
continue
|
||||
}
|
||||
|
21
client.go
21
client.go
@ -5,11 +5,12 @@
|
||||
package asynq
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-redis/redis/v7"
|
||||
"github.com/go-redis/redis/v8"
|
||||
"github.com/google/uuid"
|
||||
"github.com/hibiken/asynq/internal/base"
|
||||
"github.com/hibiken/asynq/internal/errors"
|
||||
@ -265,7 +266,7 @@ func (c *Client) Close() error {
|
||||
// By deafult, max retry is set to 25 and timeout is set to 30 minutes.
|
||||
//
|
||||
// If no ProcessAt or ProcessIn options are provided, the task will be pending immediately.
|
||||
func (c *Client) Enqueue(task *Task, opts ...Option) (*TaskInfo, error) {
|
||||
func (c *Client) Enqueue(ctx context.Context, task *Task, opts ...Option) (*TaskInfo, error) {
|
||||
c.mu.Lock()
|
||||
if defaults, ok := c.opts[task.Type()]; ok {
|
||||
opts = append(defaults, opts...)
|
||||
@ -305,10 +306,10 @@ func (c *Client) Enqueue(task *Task, opts ...Option) (*TaskInfo, error) {
|
||||
var state base.TaskState
|
||||
if opt.processAt.Before(now) || opt.processAt.Equal(now) {
|
||||
opt.processAt = now
|
||||
err = c.enqueue(msg, opt.uniqueTTL)
|
||||
err = c.enqueue(ctx, msg, opt.uniqueTTL)
|
||||
state = base.TaskStatePending
|
||||
} else {
|
||||
err = c.schedule(msg, opt.processAt, opt.uniqueTTL)
|
||||
err = c.schedule(ctx, msg, opt.processAt, opt.uniqueTTL)
|
||||
state = base.TaskStateScheduled
|
||||
}
|
||||
switch {
|
||||
@ -320,17 +321,17 @@ func (c *Client) Enqueue(task *Task, opts ...Option) (*TaskInfo, error) {
|
||||
return newTaskInfo(msg, state, opt.processAt), nil
|
||||
}
|
||||
|
||||
func (c *Client) enqueue(msg *base.TaskMessage, uniqueTTL time.Duration) error {
|
||||
func (c *Client) enqueue(ctx context.Context, msg *base.TaskMessage, uniqueTTL time.Duration) error {
|
||||
if uniqueTTL > 0 {
|
||||
return c.rdb.EnqueueUnique(msg, uniqueTTL)
|
||||
return c.rdb.EnqueueUnique(ctx, msg, uniqueTTL)
|
||||
}
|
||||
return c.rdb.Enqueue(msg)
|
||||
return c.rdb.Enqueue(ctx, msg)
|
||||
}
|
||||
|
||||
func (c *Client) schedule(msg *base.TaskMessage, t time.Time, uniqueTTL time.Duration) error {
|
||||
func (c *Client) schedule(ctx context.Context, msg *base.TaskMessage, t time.Time, uniqueTTL time.Duration) error {
|
||||
if uniqueTTL > 0 {
|
||||
ttl := t.Add(uniqueTTL).Sub(time.Now())
|
||||
return c.rdb.ScheduleUnique(msg, t, ttl)
|
||||
return c.rdb.ScheduleUnique(ctx, msg, t, ttl)
|
||||
}
|
||||
return c.rdb.Schedule(msg, t)
|
||||
return c.rdb.Schedule(ctx, msg, t)
|
||||
}
|
||||
|
@ -5,6 +5,7 @@
|
||||
package asynq
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
"time"
|
||||
@ -15,6 +16,8 @@ import (
|
||||
"github.com/hibiken/asynq/internal/base"
|
||||
)
|
||||
|
||||
var ctx = context.Background()
|
||||
|
||||
func TestClientEnqueueWithProcessAtOption(t *testing.T) {
|
||||
r := setup(t)
|
||||
client := NewClient(getRedisConnOpt(t))
|
||||
@ -113,7 +116,7 @@ func TestClientEnqueueWithProcessAtOption(t *testing.T) {
|
||||
h.FlushDB(t, r) // clean up db before each test case.
|
||||
|
||||
opts := append(tc.opts, ProcessAt(tc.processAt))
|
||||
gotInfo, err := client.Enqueue(tc.task, opts...)
|
||||
gotInfo, err := client.Enqueue(ctx, tc.task, opts...)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
continue
|
||||
@ -420,7 +423,7 @@ func TestClientEnqueue(t *testing.T) {
|
||||
for _, tc := range tests {
|
||||
h.FlushDB(t, r) // clean up db before each test case.
|
||||
|
||||
gotInfo, err := client.Enqueue(tc.task, tc.opts...)
|
||||
gotInfo, err := client.Enqueue(ctx, tc.task, tc.opts...)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
continue
|
||||
@ -537,7 +540,7 @@ func TestClientEnqueueWithProcessInOption(t *testing.T) {
|
||||
h.FlushDB(t, r) // clean up db before each test case.
|
||||
|
||||
opts := append(tc.opts, ProcessIn(tc.delay))
|
||||
gotInfo, err := client.Enqueue(tc.task, opts...)
|
||||
gotInfo, err := client.Enqueue(ctx, tc.task, opts...)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
continue
|
||||
@ -590,7 +593,7 @@ func TestClientEnqueueError(t *testing.T) {
|
||||
for _, tc := range tests {
|
||||
h.FlushDB(t, r)
|
||||
|
||||
_, err := client.Enqueue(tc.task, tc.opts...)
|
||||
_, err := client.Enqueue(ctx, tc.task, tc.opts...)
|
||||
if err == nil {
|
||||
t.Errorf("%s; client.Enqueue(task, opts...) did not return non-nil error", tc.desc)
|
||||
}
|
||||
@ -701,7 +704,7 @@ func TestClientDefaultOptions(t *testing.T) {
|
||||
c := NewClient(getRedisConnOpt(t))
|
||||
defer c.Close()
|
||||
c.SetDefaultOptions(tc.task.Type(), tc.defaultOpts...)
|
||||
gotInfo, err := c.Enqueue(tc.task, tc.opts...)
|
||||
gotInfo, err := c.Enqueue(ctx, tc.task, tc.opts...)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -746,19 +749,19 @@ func TestClientEnqueueUnique(t *testing.T) {
|
||||
h.FlushDB(t, r) // clean up db before each test case.
|
||||
|
||||
// Enqueue the task first. It should succeed.
|
||||
_, err := c.Enqueue(tc.task, Unique(tc.ttl))
|
||||
_, err := c.Enqueue(ctx, tc.task, Unique(tc.ttl))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
gotTTL := r.TTL(base.UniqueKey(base.DefaultQueueName, tc.task.Type(), tc.task.Payload())).Val()
|
||||
gotTTL := r.TTL(ctx, base.UniqueKey(base.DefaultQueueName, tc.task.Type(), tc.task.Payload())).Val()
|
||||
if !cmp.Equal(tc.ttl.Seconds(), gotTTL.Seconds(), cmpopts.EquateApprox(0, 1)) {
|
||||
t.Errorf("TTL = %v, want %v", gotTTL, tc.ttl)
|
||||
continue
|
||||
}
|
||||
|
||||
// Enqueue the task again. It should fail.
|
||||
_, err = c.Enqueue(tc.task, Unique(tc.ttl))
|
||||
_, err = c.Enqueue(ctx, tc.task, Unique(tc.ttl))
|
||||
if err == nil {
|
||||
t.Errorf("Enqueueing %+v did not return an error", tc.task)
|
||||
continue
|
||||
@ -791,12 +794,12 @@ func TestClientEnqueueUniqueWithProcessInOption(t *testing.T) {
|
||||
h.FlushDB(t, r) // clean up db before each test case.
|
||||
|
||||
// Enqueue the task first. It should succeed.
|
||||
_, err := c.Enqueue(tc.task, ProcessIn(tc.d), Unique(tc.ttl))
|
||||
_, err := c.Enqueue(ctx, tc.task, ProcessIn(tc.d), Unique(tc.ttl))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
gotTTL := r.TTL(base.UniqueKey(base.DefaultQueueName, tc.task.Type(), tc.task.Payload())).Val()
|
||||
gotTTL := r.TTL(ctx, base.UniqueKey(base.DefaultQueueName, tc.task.Type(), tc.task.Payload())).Val()
|
||||
wantTTL := time.Duration(tc.ttl.Seconds()+tc.d.Seconds()) * time.Second
|
||||
if !cmp.Equal(wantTTL.Seconds(), gotTTL.Seconds(), cmpopts.EquateApprox(0, 1)) {
|
||||
t.Errorf("TTL = %v, want %v", gotTTL, wantTTL)
|
||||
@ -804,7 +807,7 @@ func TestClientEnqueueUniqueWithProcessInOption(t *testing.T) {
|
||||
}
|
||||
|
||||
// Enqueue the task again. It should fail.
|
||||
_, err = c.Enqueue(tc.task, ProcessIn(tc.d), Unique(tc.ttl))
|
||||
_, err = c.Enqueue(ctx, tc.task, ProcessIn(tc.d), Unique(tc.ttl))
|
||||
if err == nil {
|
||||
t.Errorf("Enqueueing %+v did not return an error", tc.task)
|
||||
continue
|
||||
@ -837,12 +840,12 @@ func TestClientEnqueueUniqueWithProcessAtOption(t *testing.T) {
|
||||
h.FlushDB(t, r) // clean up db before each test case.
|
||||
|
||||
// Enqueue the task first. It should succeed.
|
||||
_, err := c.Enqueue(tc.task, ProcessAt(tc.at), Unique(tc.ttl))
|
||||
_, err := c.Enqueue(ctx, tc.task, ProcessAt(tc.at), Unique(tc.ttl))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
gotTTL := r.TTL(base.UniqueKey(base.DefaultQueueName, tc.task.Type(), tc.task.Payload())).Val()
|
||||
gotTTL := r.TTL(ctx, base.UniqueKey(base.DefaultQueueName, tc.task.Type(), tc.task.Payload())).Val()
|
||||
wantTTL := tc.at.Add(tc.ttl).Sub(time.Now())
|
||||
if !cmp.Equal(wantTTL.Seconds(), gotTTL.Seconds(), cmpopts.EquateApprox(0, 1)) {
|
||||
t.Errorf("TTL = %v, want %v", gotTTL, wantTTL)
|
||||
@ -850,7 +853,7 @@ func TestClientEnqueueUniqueWithProcessAtOption(t *testing.T) {
|
||||
}
|
||||
|
||||
// Enqueue the task again. It should fail.
|
||||
_, err = c.Enqueue(tc.task, ProcessAt(tc.at), Unique(tc.ttl))
|
||||
_, err = c.Enqueue(ctx, tc.task, ProcessAt(tc.at), Unique(tc.ttl))
|
||||
if err == nil {
|
||||
t.Errorf("Enqueueing %+v did not return an error", tc.task)
|
||||
continue
|
||||
|
@ -5,6 +5,7 @@
|
||||
package asynq_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
@ -85,10 +86,10 @@ func ExampleScheduler() {
|
||||
&asynq.SchedulerOpts{Location: time.Local},
|
||||
)
|
||||
|
||||
if _, err := scheduler.Register("* * * * *", asynq.NewTask("task1", nil)); err != nil {
|
||||
if _, err := scheduler.Register(context.Background(), "* * * * *", asynq.NewTask("task1", nil)); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if _, err := scheduler.Register("@every 30s", asynq.NewTask("task2", nil)); err != nil {
|
||||
if _, err := scheduler.Register(context.Background(),"@every 30s", asynq.NewTask("task2", nil)); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -5,6 +5,7 @@
|
||||
package asynq
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@ -69,7 +70,7 @@ func (f *forwarder) start(wg *sync.WaitGroup) {
|
||||
}
|
||||
|
||||
func (f *forwarder) exec() {
|
||||
if err := f.broker.ForwardIfReady(f.queues...); err != nil {
|
||||
if err := f.broker.ForwardIfReady(context.Background(), f.queues...); err != nil {
|
||||
f.logger.Errorf("Could not enqueue scheduled tasks: %v", err)
|
||||
}
|
||||
}
|
||||
|
12
go.mod
12
go.mod
@ -3,15 +3,17 @@ module github.com/hibiken/asynq
|
||||
go 1.13
|
||||
|
||||
require (
|
||||
github.com/go-redis/redis/v7 v7.4.0
|
||||
github.com/golang/protobuf v1.4.1
|
||||
github.com/google/go-cmp v0.5.0
|
||||
github.com/go-redis/redis/v8 v8.11.0
|
||||
github.com/golang/protobuf v1.4.2
|
||||
github.com/google/go-cmp v0.5.6
|
||||
github.com/google/uuid v1.2.0
|
||||
github.com/kr/pretty v0.1.0 // indirect
|
||||
github.com/robfig/cron/v3 v3.0.1
|
||||
github.com/spf13/cast v1.3.1
|
||||
github.com/stretchr/testify v1.6.1 // indirect
|
||||
go.uber.org/goleak v0.10.0
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e
|
||||
golang.org/x/sys v0.0.0-20210112080510-489259a85091
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4
|
||||
google.golang.org/protobuf v1.25.0
|
||||
gopkg.in/yaml.v2 v2.2.7 // indirect
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect
|
||||
)
|
||||
|
90
go.sum
90
go.sum
@ -1,52 +1,58 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/go-redis/redis/v7 v7.2.0 h1:CrCexy/jYWZjW0AyVoHlcJUeZN19VWlbepTh1Vq6dJs=
|
||||
github.com/go-redis/redis/v7 v7.2.0/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRfnwdHj/Dcg=
|
||||
github.com/go-redis/redis/v7 v7.4.0 h1:7obg6wUoj05T0EpY0o8B59S9w5yeMWql7sw2kwNW1x4=
|
||||
github.com/go-redis/redis/v7 v7.4.0/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRfnwdHj/Dcg=
|
||||
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/go-redis/redis/v8 v8.11.0 h1:O1Td0mQ8UFChQ3N9zFQqo6kTU2cJ+/it88gDB+zg0wo=
|
||||
github.com/go-redis/redis/v8 v8.11.0/go.mod h1:DLomh7y2e3ggQXQLd1YgmvIfecPJoFl7WU5SOQ/r06M=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.1 h1:ZFgWrT+bLgsYPirOnRfKLYJLvssAegOj/hgyMFdJZe0=
|
||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w=
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
|
||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs=
|
||||
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
|
||||
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo=
|
||||
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME=
|
||||
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||
github.com/onsi/ginkgo v1.15.0 h1:1V1NfVQR87RtWAgp1lv9JZJ5Jap+XFGKPi00andXGi4=
|
||||
github.com/onsi/ginkgo v1.15.0/go.mod h1:hF8qUzuuC8DJGygJH3726JnCZX4MYbRB8yFfISqnKUg=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/onsi/gomega v1.10.5 h1:7n6FEkpFmfCoo2t+YYqXH0evK+a9ICQz0xcAy9dYcaQ=
|
||||
github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
@ -54,38 +60,51 @@ github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
|
||||
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
|
||||
github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng=
|
||||
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
go.uber.org/goleak v0.10.0 h1:G3eWbSNIskeRqtsN/1uI5B+eP73y3JUuBsv9AZjehb4=
|
||||
go.uber.org/goleak v0.10.0/go.mod h1:VCZuO8V8mFPlL0F5J5GK1rtHV3DrFcQ1R8ryq7FK0aI=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190923162816-aa69164e4478 h1:l5EDrHhldLYb3ZRHDUhXF7Om7MvYXnkV9/iQNo1lX6g=
|
||||
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb h1:eBmm0M9fYhWpKZLjQUUKka/LtIxf46G4fxeEz5KJr9U=
|
||||
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e h1:o3PsSEY8E4eXWkXrIP9YJALUkVZqzHJT5DOasTyn8Vs=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e h1:9vRrk9YW2BTzLP0VCB9ZDjU4cPqkg+IDWL7XgxA1yxQ=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210112080510-489259a85091 h1:DMyOG0U+gKfu8JZzg2UQe9MeaC1X+xQWlAKcRnjxjCw=
|
||||
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
@ -93,8 +112,13 @@ golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGm
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
@ -109,20 +133,20 @@ google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQ
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c=
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo=
|
||||
gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
|
@ -5,6 +5,7 @@
|
||||
package asynq
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@ -71,7 +72,7 @@ func (hc *healthchecker) start(wg *sync.WaitGroup) {
|
||||
timer.Stop()
|
||||
return
|
||||
case <-timer.C:
|
||||
err := hc.broker.Ping()
|
||||
err := hc.broker.Ping(context.Background())
|
||||
hc.healthcheckFunc(err)
|
||||
timer.Reset(hc.interval)
|
||||
}
|
||||
|
@ -5,6 +5,7 @@
|
||||
package asynq
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
@ -115,7 +116,7 @@ func (h *heartbeater) start(wg *sync.WaitGroup) {
|
||||
for {
|
||||
select {
|
||||
case <-h.done:
|
||||
h.broker.ClearServerState(h.host, h.pid, h.serverID)
|
||||
h.broker.ClearServerState(context.Background(), h.host, h.pid, h.serverID)
|
||||
h.logger.Debug("Heartbeater done")
|
||||
timer.Stop()
|
||||
return
|
||||
@ -164,7 +165,7 @@ func (h *heartbeater) beat() {
|
||||
|
||||
// Note: Set TTL to be long enough so that it won't expire before we write again
|
||||
// and short enough to expire quickly once the process is shut down or killed.
|
||||
if err := h.broker.WriteServerState(&info, ws, h.interval*2); err != nil {
|
||||
if err := h.broker.WriteServerState(context.Background(), &info, ws, h.interval*2); err != nil {
|
||||
h.logger.Errorf("could not write server state data: %v", err)
|
||||
}
|
||||
}
|
||||
|
@ -71,7 +71,7 @@ func TestHeartbeater(t *testing.T) {
|
||||
// allow for heartbeater to write to redis
|
||||
time.Sleep(tc.interval)
|
||||
|
||||
ss, err := rdbClient.ListServers()
|
||||
ss, err := rdbClient.ListServers(ctx)
|
||||
if err != nil {
|
||||
t.Errorf("could not read server info from redis: %v", err)
|
||||
hb.shutdown()
|
||||
@ -97,7 +97,7 @@ func TestHeartbeater(t *testing.T) {
|
||||
time.Sleep(tc.interval * 2)
|
||||
|
||||
want.Status = "closed"
|
||||
ss, err = rdbClient.ListServers()
|
||||
ss, err = rdbClient.ListServers(ctx)
|
||||
if err != nil {
|
||||
t.Errorf("could not read process status from redis: %v", err)
|
||||
hb.shutdown()
|
||||
|
129
inspector.go
129
inspector.go
@ -5,12 +5,13 @@
|
||||
package asynq
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-redis/redis/v7"
|
||||
"github.com/go-redis/redis/v8"
|
||||
"github.com/google/uuid"
|
||||
"github.com/hibiken/asynq/internal/base"
|
||||
"github.com/hibiken/asynq/internal/errors"
|
||||
@ -40,8 +41,8 @@ func (i *Inspector) Close() error {
|
||||
}
|
||||
|
||||
// Queues returns a list of all queue names.
|
||||
func (i *Inspector) Queues() ([]string, error) {
|
||||
return i.rdb.AllQueues()
|
||||
func (i *Inspector) Queues(ctx context.Context) ([]string, error) {
|
||||
return i.rdb.AllQueues(ctx)
|
||||
}
|
||||
|
||||
// QueueInfo represents a state of queues at a certain time.
|
||||
@ -82,11 +83,11 @@ type QueueInfo struct {
|
||||
}
|
||||
|
||||
// GetQueueInfo returns current information of the given queue.
|
||||
func (i *Inspector) GetQueueInfo(qname string) (*QueueInfo, error) {
|
||||
func (i *Inspector) GetQueueInfo(ctx context.Context, qname string) (*QueueInfo, error) {
|
||||
if err := base.ValidateQueueName(qname); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stats, err := i.rdb.CurrentStats(qname)
|
||||
stats, err := i.rdb.CurrentStats(ctx, qname)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -120,11 +121,11 @@ type DailyStats struct {
|
||||
}
|
||||
|
||||
// History returns a list of stats from the last n days.
|
||||
func (i *Inspector) History(qname string, n int) ([]*DailyStats, error) {
|
||||
func (i *Inspector) History(ctx context.Context, qname string, n int) ([]*DailyStats, error) {
|
||||
if err := base.ValidateQueueName(qname); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stats, err := i.rdb.HistoricalStats(qname, n)
|
||||
stats, err := i.rdb.HistoricalStats(ctx, qname, n)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -161,8 +162,8 @@ var (
|
||||
// If the specified queue does not exist, DeleteQueue returns ErrQueueNotFound.
|
||||
// If force is set to false and the specified queue is not empty, DeleteQueue
|
||||
// returns ErrQueueNotEmpty.
|
||||
func (i *Inspector) DeleteQueue(qname string, force bool) error {
|
||||
err := i.rdb.RemoveQueue(qname, force)
|
||||
func (i *Inspector) DeleteQueue(ctx context.Context, qname string, force bool) error {
|
||||
err := i.rdb.RemoveQueue(ctx, qname, force)
|
||||
if errors.IsQueueNotFound(err) {
|
||||
return fmt.Errorf("%w: queue=%q", ErrQueueNotFound, qname)
|
||||
}
|
||||
@ -176,12 +177,12 @@ func (i *Inspector) DeleteQueue(qname string, force bool) error {
|
||||
//
|
||||
// Returns ErrQueueNotFound if a queue with the given name doesn't exist.
|
||||
// Returns ErrTaskNotFound if a task with the given id doesn't exist in the queue.
|
||||
func (i *Inspector) GetTaskInfo(qname, id string) (*TaskInfo, error) {
|
||||
func (i *Inspector) GetTaskInfo(ctx context.Context, qname, id string) (*TaskInfo, error) {
|
||||
taskid, err := uuid.Parse(id)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("asynq: %s is not a valid task id", id)
|
||||
}
|
||||
info, err := i.rdb.GetTaskInfo(qname, taskid)
|
||||
info, err := i.rdb.GetTaskInfo(ctx, qname, taskid)
|
||||
switch {
|
||||
case errors.IsQueueNotFound(err):
|
||||
return nil, fmt.Errorf("asynq: %w", ErrQueueNotFound)
|
||||
@ -257,13 +258,13 @@ func Page(n int) ListOption {
|
||||
// ListPendingTasks retrieves pending tasks from the specified queue.
|
||||
//
|
||||
// By default, it retrieves the first 30 tasks.
|
||||
func (i *Inspector) ListPendingTasks(qname string, opts ...ListOption) ([]*TaskInfo, error) {
|
||||
func (i *Inspector) ListPendingTasks(ctx context.Context, qname string, opts ...ListOption) ([]*TaskInfo, error) {
|
||||
if err := base.ValidateQueueName(qname); err != nil {
|
||||
return nil, fmt.Errorf("asynq: %v", err)
|
||||
}
|
||||
opt := composeListOptions(opts...)
|
||||
pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1}
|
||||
msgs, err := i.rdb.ListPending(qname, pgn)
|
||||
msgs, err := i.rdb.ListPending(ctx, qname, pgn)
|
||||
switch {
|
||||
case errors.IsQueueNotFound(err):
|
||||
return nil, fmt.Errorf("asynq: %w", ErrQueueNotFound)
|
||||
@ -281,13 +282,13 @@ func (i *Inspector) ListPendingTasks(qname string, opts ...ListOption) ([]*TaskI
|
||||
// ListActiveTasks retrieves active tasks from the specified queue.
|
||||
//
|
||||
// By default, it retrieves the first 30 tasks.
|
||||
func (i *Inspector) ListActiveTasks(qname string, opts ...ListOption) ([]*TaskInfo, error) {
|
||||
func (i *Inspector) ListActiveTasks(ctx context.Context, qname string, opts ...ListOption) ([]*TaskInfo, error) {
|
||||
if err := base.ValidateQueueName(qname); err != nil {
|
||||
return nil, fmt.Errorf("asynq: %v", err)
|
||||
}
|
||||
opt := composeListOptions(opts...)
|
||||
pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1}
|
||||
msgs, err := i.rdb.ListActive(qname, pgn)
|
||||
msgs, err := i.rdb.ListActive(ctx, qname, pgn)
|
||||
switch {
|
||||
case errors.IsQueueNotFound(err):
|
||||
return nil, fmt.Errorf("asynq: %w", ErrQueueNotFound)
|
||||
@ -305,13 +306,13 @@ func (i *Inspector) ListActiveTasks(qname string, opts ...ListOption) ([]*TaskIn
|
||||
// Tasks are sorted by NextProcessAt in ascending order.
|
||||
//
|
||||
// By default, it retrieves the first 30 tasks.
|
||||
func (i *Inspector) ListScheduledTasks(qname string, opts ...ListOption) ([]*TaskInfo, error) {
|
||||
func (i *Inspector) ListScheduledTasks(ctx context.Context, qname string, opts ...ListOption) ([]*TaskInfo, error) {
|
||||
if err := base.ValidateQueueName(qname); err != nil {
|
||||
return nil, fmt.Errorf("asynq: %v", err)
|
||||
}
|
||||
opt := composeListOptions(opts...)
|
||||
pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1}
|
||||
zs, err := i.rdb.ListScheduled(qname, pgn)
|
||||
zs, err := i.rdb.ListScheduled(ctx, qname, pgn)
|
||||
switch {
|
||||
case errors.IsQueueNotFound(err):
|
||||
return nil, fmt.Errorf("asynq: %w", ErrQueueNotFound)
|
||||
@ -333,13 +334,13 @@ func (i *Inspector) ListScheduledTasks(qname string, opts ...ListOption) ([]*Tas
|
||||
// Tasks are sorted by NextProcessAt in ascending order.
|
||||
//
|
||||
// By default, it retrieves the first 30 tasks.
|
||||
func (i *Inspector) ListRetryTasks(qname string, opts ...ListOption) ([]*TaskInfo, error) {
|
||||
func (i *Inspector) ListRetryTasks(ctx context.Context, qname string, opts ...ListOption) ([]*TaskInfo, error) {
|
||||
if err := base.ValidateQueueName(qname); err != nil {
|
||||
return nil, fmt.Errorf("asynq: %v", err)
|
||||
}
|
||||
opt := composeListOptions(opts...)
|
||||
pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1}
|
||||
zs, err := i.rdb.ListRetry(qname, pgn)
|
||||
zs, err := i.rdb.ListRetry(ctx, qname, pgn)
|
||||
switch {
|
||||
case errors.IsQueueNotFound(err):
|
||||
return nil, fmt.Errorf("asynq: %w", ErrQueueNotFound)
|
||||
@ -361,13 +362,13 @@ func (i *Inspector) ListRetryTasks(qname string, opts ...ListOption) ([]*TaskInf
|
||||
// Tasks are sorted by LastFailedAt in descending order.
|
||||
//
|
||||
// By default, it retrieves the first 30 tasks.
|
||||
func (i *Inspector) ListArchivedTasks(qname string, opts ...ListOption) ([]*TaskInfo, error) {
|
||||
func (i *Inspector) ListArchivedTasks(ctx context.Context, qname string, opts ...ListOption) ([]*TaskInfo, error) {
|
||||
if err := base.ValidateQueueName(qname); err != nil {
|
||||
return nil, fmt.Errorf("asynq: %v", err)
|
||||
}
|
||||
opt := composeListOptions(opts...)
|
||||
pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1}
|
||||
zs, err := i.rdb.ListArchived(qname, pgn)
|
||||
zs, err := i.rdb.ListArchived(ctx, qname, pgn)
|
||||
switch {
|
||||
case errors.IsQueueNotFound(err):
|
||||
return nil, fmt.Errorf("asynq: %w", ErrQueueNotFound)
|
||||
@ -387,41 +388,41 @@ func (i *Inspector) ListArchivedTasks(qname string, opts ...ListOption) ([]*Task
|
||||
|
||||
// DeleteAllPendingTasks deletes all pending tasks from the specified queue,
|
||||
// and reports the number tasks deleted.
|
||||
func (i *Inspector) DeleteAllPendingTasks(qname string) (int, error) {
|
||||
func (i *Inspector) DeleteAllPendingTasks(ctx context.Context, qname string) (int, error) {
|
||||
if err := base.ValidateQueueName(qname); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
n, err := i.rdb.DeleteAllPendingTasks(qname)
|
||||
n, err := i.rdb.DeleteAllPendingTasks(ctx, qname)
|
||||
return int(n), err
|
||||
}
|
||||
|
||||
// DeleteAllScheduledTasks deletes all scheduled tasks from the specified queue,
|
||||
// and reports the number tasks deleted.
|
||||
func (i *Inspector) DeleteAllScheduledTasks(qname string) (int, error) {
|
||||
func (i *Inspector) DeleteAllScheduledTasks(ctx context.Context, qname string) (int, error) {
|
||||
if err := base.ValidateQueueName(qname); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
n, err := i.rdb.DeleteAllScheduledTasks(qname)
|
||||
n, err := i.rdb.DeleteAllScheduledTasks(ctx, qname)
|
||||
return int(n), err
|
||||
}
|
||||
|
||||
// DeleteAllRetryTasks deletes all retry tasks from the specified queue,
|
||||
// and reports the number tasks deleted.
|
||||
func (i *Inspector) DeleteAllRetryTasks(qname string) (int, error) {
|
||||
func (i *Inspector) DeleteAllRetryTasks(ctx context.Context, qname string) (int, error) {
|
||||
if err := base.ValidateQueueName(qname); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
n, err := i.rdb.DeleteAllRetryTasks(qname)
|
||||
n, err := i.rdb.DeleteAllRetryTasks(ctx, qname)
|
||||
return int(n), err
|
||||
}
|
||||
|
||||
// DeleteAllArchivedTasks deletes all archived tasks from the specified queue,
|
||||
// and reports the number tasks deleted.
|
||||
func (i *Inspector) DeleteAllArchivedTasks(qname string) (int, error) {
|
||||
func (i *Inspector) DeleteAllArchivedTasks(ctx context.Context, qname string) (int, error) {
|
||||
if err := base.ValidateQueueName(qname); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
n, err := i.rdb.DeleteAllArchivedTasks(qname)
|
||||
n, err := i.rdb.DeleteAllArchivedTasks(ctx, qname)
|
||||
return int(n), err
|
||||
}
|
||||
|
||||
@ -432,7 +433,7 @@ func (i *Inspector) DeleteAllArchivedTasks(qname string) (int, error) {
|
||||
// If a queue with the given name doesn't exist, it returns ErrQueueNotFound.
|
||||
// If a task with the given id doesn't exist in the queue, it returns ErrTaskNotFound.
|
||||
// If the task is in active state, it returns a non-nil error.
|
||||
func (i *Inspector) DeleteTask(qname, id string) error {
|
||||
func (i *Inspector) DeleteTask(ctx context.Context, qname, id string) error {
|
||||
if err := base.ValidateQueueName(qname); err != nil {
|
||||
return fmt.Errorf("asynq: %v", err)
|
||||
}
|
||||
@ -440,7 +441,7 @@ func (i *Inspector) DeleteTask(qname, id string) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("asynq: %s is not a valid task id", id)
|
||||
}
|
||||
err = i.rdb.DeleteTask(qname, taskid)
|
||||
err = i.rdb.DeleteTask(ctx, qname, taskid)
|
||||
switch {
|
||||
case errors.IsQueueNotFound(err):
|
||||
return fmt.Errorf("asynq: %w", ErrQueueNotFound)
|
||||
@ -455,31 +456,31 @@ func (i *Inspector) DeleteTask(qname, id string) error {
|
||||
|
||||
// RunAllScheduledTasks transition all scheduled tasks to pending state from the given queue,
|
||||
// and reports the number of tasks transitioned.
|
||||
func (i *Inspector) RunAllScheduledTasks(qname string) (int, error) {
|
||||
func (i *Inspector) RunAllScheduledTasks(ctx context.Context, qname string) (int, error) {
|
||||
if err := base.ValidateQueueName(qname); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
n, err := i.rdb.RunAllScheduledTasks(qname)
|
||||
n, err := i.rdb.RunAllScheduledTasks(ctx, qname)
|
||||
return int(n), err
|
||||
}
|
||||
|
||||
// RunAllRetryTasks transition all retry tasks to pending state from the given queue,
|
||||
// and reports the number of tasks transitioned.
|
||||
func (i *Inspector) RunAllRetryTasks(qname string) (int, error) {
|
||||
func (i *Inspector) RunAllRetryTasks(ctx context.Context, qname string) (int, error) {
|
||||
if err := base.ValidateQueueName(qname); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
n, err := i.rdb.RunAllRetryTasks(qname)
|
||||
n, err := i.rdb.RunAllRetryTasks(ctx, qname)
|
||||
return int(n), err
|
||||
}
|
||||
|
||||
// RunAllArchivedTasks transition all archived tasks to pending state from the given queue,
|
||||
// and reports the number of tasks transitioned.
|
||||
func (i *Inspector) RunAllArchivedTasks(qname string) (int, error) {
|
||||
func (i *Inspector) RunAllArchivedTasks(ctx context.Context, qname string) (int, error) {
|
||||
if err := base.ValidateQueueName(qname); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
n, err := i.rdb.RunAllArchivedTasks(qname)
|
||||
n, err := i.rdb.RunAllArchivedTasks(ctx, qname)
|
||||
return int(n), err
|
||||
}
|
||||
|
||||
@ -490,7 +491,7 @@ func (i *Inspector) RunAllArchivedTasks(qname string) (int, error) {
|
||||
// If a queue with the given name doesn't exist, it returns ErrQueueNotFound.
|
||||
// If a task with the given id doesn't exist in the queue, it returns ErrTaskNotFound.
|
||||
// If the task is in pending or active state, it returns a non-nil error.
|
||||
func (i *Inspector) RunTask(qname, id string) error {
|
||||
func (i *Inspector) RunTask(ctx context.Context, qname, id string) error {
|
||||
if err := base.ValidateQueueName(qname); err != nil {
|
||||
return fmt.Errorf("asynq: %v", err)
|
||||
}
|
||||
@ -498,7 +499,7 @@ func (i *Inspector) RunTask(qname, id string) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("asynq: %s is not a valid task id", id)
|
||||
}
|
||||
err = i.rdb.RunTask(qname, taskid)
|
||||
err = i.rdb.RunTask(ctx, qname, taskid)
|
||||
switch {
|
||||
case errors.IsQueueNotFound(err):
|
||||
return fmt.Errorf("asynq: %w", ErrQueueNotFound)
|
||||
@ -512,31 +513,31 @@ func (i *Inspector) RunTask(qname, id string) error {
|
||||
|
||||
// ArchiveAllPendingTasks archives all pending tasks from the given queue,
|
||||
// and reports the number of tasks archived.
|
||||
func (i *Inspector) ArchiveAllPendingTasks(qname string) (int, error) {
|
||||
func (i *Inspector) ArchiveAllPendingTasks(ctx context.Context, qname string) (int, error) {
|
||||
if err := base.ValidateQueueName(qname); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
n, err := i.rdb.ArchiveAllPendingTasks(qname)
|
||||
n, err := i.rdb.ArchiveAllPendingTasks(ctx, qname)
|
||||
return int(n), err
|
||||
}
|
||||
|
||||
// ArchiveAllScheduledTasks archives all scheduled tasks from the given queue,
|
||||
// and reports the number of tasks archiveed.
|
||||
func (i *Inspector) ArchiveAllScheduledTasks(qname string) (int, error) {
|
||||
func (i *Inspector) ArchiveAllScheduledTasks(ctx context.Context, qname string) (int, error) {
|
||||
if err := base.ValidateQueueName(qname); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
n, err := i.rdb.ArchiveAllScheduledTasks(qname)
|
||||
n, err := i.rdb.ArchiveAllScheduledTasks(ctx, qname)
|
||||
return int(n), err
|
||||
}
|
||||
|
||||
// ArchiveAllRetryTasks archives all retry tasks from the given queue,
|
||||
// and reports the number of tasks archiveed.
|
||||
func (i *Inspector) ArchiveAllRetryTasks(qname string) (int, error) {
|
||||
func (i *Inspector) ArchiveAllRetryTasks(ctx context.Context, qname string) (int, error) {
|
||||
if err := base.ValidateQueueName(qname); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
n, err := i.rdb.ArchiveAllRetryTasks(qname)
|
||||
n, err := i.rdb.ArchiveAllRetryTasks(ctx, qname)
|
||||
return int(n), err
|
||||
}
|
||||
|
||||
@ -547,7 +548,7 @@ func (i *Inspector) ArchiveAllRetryTasks(qname string) (int, error) {
|
||||
// If a queue with the given name doesn't exist, it returns ErrQueueNotFound.
|
||||
// If a task with the given id doesn't exist in the queue, it returns ErrTaskNotFound.
|
||||
// If the task is in already archived, it returns a non-nil error.
|
||||
func (i *Inspector) ArchiveTask(qname, id string) error {
|
||||
func (i *Inspector) ArchiveTask(ctx context.Context, qname, id string) error {
|
||||
if err := base.ValidateQueueName(qname); err != nil {
|
||||
return fmt.Errorf("asynq: err")
|
||||
}
|
||||
@ -555,7 +556,7 @@ func (i *Inspector) ArchiveTask(qname, id string) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("asynq: %s is not a valid task id", id)
|
||||
}
|
||||
err = i.rdb.ArchiveTask(qname, taskid)
|
||||
err = i.rdb.ArchiveTask(ctx, qname, taskid)
|
||||
switch {
|
||||
case errors.IsQueueNotFound(err):
|
||||
return fmt.Errorf("asynq: %w", ErrQueueNotFound)
|
||||
@ -571,35 +572,35 @@ func (i *Inspector) ArchiveTask(qname, id string) error {
|
||||
// given a task id. CancelProcessing is best-effort, which means that it does not
|
||||
// guarantee that the task with the given id will be canceled. The return
|
||||
// value only indicates whether the cancelation signal has been sent.
|
||||
func (i *Inspector) CancelProcessing(id string) error {
|
||||
return i.rdb.PublishCancelation(id)
|
||||
func (i *Inspector) CancelProcessing(ctx context.Context, id string) error {
|
||||
return i.rdb.PublishCancelation(ctx, id)
|
||||
}
|
||||
|
||||
// PauseQueue pauses task processing on the specified queue.
|
||||
// If the queue is already paused, it will return a non-nil error.
|
||||
func (i *Inspector) PauseQueue(qname string) error {
|
||||
func (i *Inspector) PauseQueue(ctx context.Context, qname string) error {
|
||||
if err := base.ValidateQueueName(qname); err != nil {
|
||||
return err
|
||||
}
|
||||
return i.rdb.Pause(qname)
|
||||
return i.rdb.Pause(ctx, qname)
|
||||
}
|
||||
|
||||
// UnpauseQueue resumes task processing on the specified queue.
|
||||
// If the queue is not paused, it will return a non-nil error.
|
||||
func (i *Inspector) UnpauseQueue(qname string) error {
|
||||
func (i *Inspector) UnpauseQueue(ctx context.Context, qname string) error {
|
||||
if err := base.ValidateQueueName(qname); err != nil {
|
||||
return err
|
||||
}
|
||||
return i.rdb.Unpause(qname)
|
||||
return i.rdb.Unpause(ctx, qname)
|
||||
}
|
||||
|
||||
// Servers return a list of running servers' information.
|
||||
func (i *Inspector) Servers() ([]*ServerInfo, error) {
|
||||
servers, err := i.rdb.ListServers()
|
||||
func (i *Inspector) Servers(ctx context.Context) ([]*ServerInfo, error) {
|
||||
servers, err := i.rdb.ListServers(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
workers, err := i.rdb.ListWorkers()
|
||||
workers, err := i.rdb.ListWorkers(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -680,8 +681,8 @@ type WorkerInfo struct {
|
||||
}
|
||||
|
||||
// ClusterKeySlot returns an integer identifying the hash slot the given queue hashes to.
|
||||
func (i *Inspector) ClusterKeySlot(qname string) (int64, error) {
|
||||
return i.rdb.ClusterKeySlot(qname)
|
||||
func (i *Inspector) ClusterKeySlot(ctx context.Context, qname string) (int64, error) {
|
||||
return i.rdb.ClusterKeySlot(ctx, qname)
|
||||
}
|
||||
|
||||
// ClusterNode describes a node in redis cluster.
|
||||
@ -696,8 +697,8 @@ type ClusterNode struct {
|
||||
// ClusterNodes returns a list of nodes the given queue belongs to.
|
||||
//
|
||||
// Only relevant if task queues are stored in redis cluster.
|
||||
func (i *Inspector) ClusterNodes(qname string) ([]*ClusterNode, error) {
|
||||
nodes, err := i.rdb.ClusterNodes(qname)
|
||||
func (i *Inspector) ClusterNodes(ctx context.Context, qname string) ([]*ClusterNode, error) {
|
||||
nodes, err := i.rdb.ClusterNodes(ctx, qname)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -732,9 +733,9 @@ type SchedulerEntry struct {
|
||||
|
||||
// SchedulerEntries returns a list of all entries registered with
|
||||
// currently running schedulers.
|
||||
func (i *Inspector) SchedulerEntries() ([]*SchedulerEntry, error) {
|
||||
func (i *Inspector) SchedulerEntries(ctx context.Context) ([]*SchedulerEntry, error) {
|
||||
var entries []*SchedulerEntry
|
||||
res, err := i.rdb.ListSchedulerEntries()
|
||||
res, err := i.rdb.ListSchedulerEntries(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -839,10 +840,10 @@ type SchedulerEnqueueEvent struct {
|
||||
// ListSchedulerEnqueueEvents retrieves a list of enqueue events from the specified scheduler entry.
|
||||
//
|
||||
// By default, it retrieves the first 30 tasks.
|
||||
func (i *Inspector) ListSchedulerEnqueueEvents(entryID string, opts ...ListOption) ([]*SchedulerEnqueueEvent, error) {
|
||||
func (i *Inspector) ListSchedulerEnqueueEvents(ctx context.Context, entryID string, opts ...ListOption) ([]*SchedulerEnqueueEvent, error) {
|
||||
opt := composeListOptions(opts...)
|
||||
pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1}
|
||||
data, err := i.rdb.ListSchedulerEnqueueEvents(entryID, pgn)
|
||||
data, err := i.rdb.ListSchedulerEnqueueEvents(ctx, entryID, pgn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -37,11 +37,11 @@ func TestInspectorQueues(t *testing.T) {
|
||||
for _, tc := range tests {
|
||||
h.FlushDB(t, r)
|
||||
for _, qname := range tc.queues {
|
||||
if err := r.SAdd(base.AllQueues, qname).Err(); err != nil {
|
||||
if err := r.SAdd(ctx, base.AllQueues, qname).Err(); err != nil {
|
||||
t.Fatalf("could not initialize all queue set: %v", err)
|
||||
}
|
||||
}
|
||||
got, err := inspector.Queues()
|
||||
got, err := inspector.Queues(ctx)
|
||||
if err != nil {
|
||||
t.Errorf("Queues() returned an error: %v", err)
|
||||
continue
|
||||
@ -130,13 +130,13 @@ func TestInspectorDeleteQueue(t *testing.T) {
|
||||
h.SeedAllRetryQueues(t, r, tc.retry)
|
||||
h.SeedAllArchivedQueues(t, r, tc.archived)
|
||||
|
||||
err := inspector.DeleteQueue(tc.qname, tc.force)
|
||||
err := inspector.DeleteQueue(ctx, tc.qname, tc.force)
|
||||
if err != nil {
|
||||
t.Errorf("DeleteQueue(%q, %t) = %v, want nil",
|
||||
tc.qname, tc.force, err)
|
||||
continue
|
||||
}
|
||||
if r.SIsMember(base.AllQueues, tc.qname).Val() {
|
||||
if r.SIsMember(ctx, base.AllQueues, tc.qname).Val() {
|
||||
t.Errorf("%q is a member of %q", tc.qname, base.AllQueues)
|
||||
}
|
||||
}
|
||||
@ -190,7 +190,7 @@ func TestInspectorDeleteQueueErrorQueueNotEmpty(t *testing.T) {
|
||||
h.SeedAllRetryQueues(t, r, tc.retry)
|
||||
h.SeedAllArchivedQueues(t, r, tc.archived)
|
||||
|
||||
err := inspector.DeleteQueue(tc.qname, tc.force)
|
||||
err := inspector.DeleteQueue(ctx, tc.qname, tc.force)
|
||||
if !errors.Is(err, ErrQueueNotEmpty) {
|
||||
t.Errorf("DeleteQueue(%v, %t) did not return ErrQueueNotEmpty",
|
||||
tc.qname, tc.force)
|
||||
@ -246,7 +246,7 @@ func TestInspectorDeleteQueueErrorQueueNotFound(t *testing.T) {
|
||||
h.SeedAllRetryQueues(t, r, tc.retry)
|
||||
h.SeedAllArchivedQueues(t, r, tc.archived)
|
||||
|
||||
err := inspector.DeleteQueue(tc.qname, tc.force)
|
||||
err := inspector.DeleteQueue(ctx, tc.qname, tc.force)
|
||||
if !errors.Is(err, ErrQueueNotFound) {
|
||||
t.Errorf("DeleteQueue(%v, %t) did not return ErrQueueNotFound",
|
||||
tc.qname, tc.force)
|
||||
@ -345,14 +345,14 @@ func TestInspectorGetQueueInfo(t *testing.T) {
|
||||
h.SeedAllArchivedQueues(t, r, tc.archived)
|
||||
for qname, n := range tc.processed {
|
||||
processedKey := base.ProcessedKey(qname, now)
|
||||
r.Set(processedKey, n, 0)
|
||||
r.Set(ctx, processedKey, n, 0)
|
||||
}
|
||||
for qname, n := range tc.failed {
|
||||
failedKey := base.FailedKey(qname, now)
|
||||
r.Set(failedKey, n, 0)
|
||||
r.Set(ctx, failedKey, n, 0)
|
||||
}
|
||||
|
||||
got, err := inspector.GetQueueInfo(tc.qname)
|
||||
got, err := inspector.GetQueueInfo(ctx, tc.qname)
|
||||
if err != nil {
|
||||
t.Errorf("r.GetQueueInfo(%q) = %v, %v, want %v, nil",
|
||||
tc.qname, got, err, tc.want)
|
||||
@ -385,17 +385,17 @@ func TestInspectorHistory(t *testing.T) {
|
||||
for _, tc := range tests {
|
||||
h.FlushDB(t, r)
|
||||
|
||||
r.SAdd(base.AllQueues, tc.qname)
|
||||
r.SAdd(ctx, base.AllQueues, tc.qname)
|
||||
// populate last n days data
|
||||
for i := 0; i < tc.n; i++ {
|
||||
ts := now.Add(-time.Duration(i) * 24 * time.Hour)
|
||||
processedKey := base.ProcessedKey(tc.qname, ts)
|
||||
failedKey := base.FailedKey(tc.qname, ts)
|
||||
r.Set(processedKey, (i+1)*1000, 0)
|
||||
r.Set(failedKey, (i+1)*10, 0)
|
||||
r.Set(ctx, processedKey, (i+1)*1000, 0)
|
||||
r.Set(ctx, failedKey, (i+1)*10, 0)
|
||||
}
|
||||
|
||||
got, err := inspector.History(tc.qname, tc.n)
|
||||
got, err := inspector.History(ctx, tc.qname, tc.n)
|
||||
if err != nil {
|
||||
t.Errorf("Inspector.History(%q, %d) returned error: %v", tc.qname, tc.n, err)
|
||||
continue
|
||||
@ -530,7 +530,7 @@ func TestInspectorGetTaskInfo(t *testing.T) {
|
||||
|
||||
inspector := NewInspector(getRedisConnOpt(t))
|
||||
for _, tc := range tests {
|
||||
got, err := inspector.GetTaskInfo(tc.qname, tc.id)
|
||||
got, err := inspector.GetTaskInfo(ctx, tc.qname, tc.id)
|
||||
if err != nil {
|
||||
t.Errorf("GetTaskInfo(%q, %q) returned error: %v", tc.qname, tc.id, err)
|
||||
continue
|
||||
@ -615,7 +615,7 @@ func TestInspectorGetTaskInfoError(t *testing.T) {
|
||||
inspector := NewInspector(getRedisConnOpt(t))
|
||||
|
||||
for _, tc := range tests {
|
||||
info, err := inspector.GetTaskInfo(tc.qname, tc.id)
|
||||
info, err := inspector.GetTaskInfo(ctx, tc.qname, tc.id)
|
||||
if info != nil {
|
||||
t.Errorf("GetTaskInfo(%q, %q) returned info: %v", tc.qname, tc.id, info)
|
||||
}
|
||||
@ -680,7 +680,7 @@ func TestInspectorListPendingTasks(t *testing.T) {
|
||||
h.SeedPendingQueue(t, r, msgs, q)
|
||||
}
|
||||
|
||||
got, err := inspector.ListPendingTasks(tc.qname)
|
||||
got, err := inspector.ListPendingTasks(ctx, tc.qname)
|
||||
if err != nil {
|
||||
t.Errorf("%s; ListPendingTasks(%q) returned error: %v",
|
||||
tc.desc, tc.qname, err)
|
||||
@ -731,7 +731,7 @@ func TestInspectorListActiveTasks(t *testing.T) {
|
||||
h.FlushDB(t, r)
|
||||
h.SeedAllActiveQueues(t, r, tc.active)
|
||||
|
||||
got, err := inspector.ListActiveTasks(tc.qname)
|
||||
got, err := inspector.ListActiveTasks(ctx, tc.qname)
|
||||
if err != nil {
|
||||
t.Errorf("%s; ListActiveTasks(%q) returned error: %v", tc.qname, tc.desc, err)
|
||||
continue
|
||||
@ -800,7 +800,7 @@ func TestInspectorListScheduledTasks(t *testing.T) {
|
||||
h.FlushDB(t, r)
|
||||
h.SeedAllScheduledQueues(t, r, tc.scheduled)
|
||||
|
||||
got, err := inspector.ListScheduledTasks(tc.qname)
|
||||
got, err := inspector.ListScheduledTasks(ctx, tc.qname)
|
||||
if err != nil {
|
||||
t.Errorf("%s; ListScheduledTasks(%q) returned error: %v", tc.desc, tc.qname, err)
|
||||
continue
|
||||
@ -870,7 +870,7 @@ func TestInspectorListRetryTasks(t *testing.T) {
|
||||
h.FlushDB(t, r)
|
||||
h.SeedAllRetryQueues(t, r, tc.retry)
|
||||
|
||||
got, err := inspector.ListRetryTasks(tc.qname)
|
||||
got, err := inspector.ListRetryTasks(ctx, tc.qname)
|
||||
if err != nil {
|
||||
t.Errorf("%s; ListRetryTasks(%q) returned error: %v", tc.desc, tc.qname, err)
|
||||
continue
|
||||
@ -939,7 +939,7 @@ func TestInspectorListArchivedTasks(t *testing.T) {
|
||||
h.FlushDB(t, r)
|
||||
h.SeedAllArchivedQueues(t, r, tc.archived)
|
||||
|
||||
got, err := inspector.ListArchivedTasks(tc.qname)
|
||||
got, err := inspector.ListArchivedTasks(ctx, tc.qname)
|
||||
if err != nil {
|
||||
t.Errorf("%s; ListArchivedTasks(%q) returned error: %v", tc.desc, tc.qname, err)
|
||||
continue
|
||||
@ -999,7 +999,7 @@ func TestInspectorListPagination(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
got, err := inspector.ListPendingTasks("default", Page(tc.page), PageSize(tc.pageSize))
|
||||
got, err := inspector.ListPendingTasks(ctx, "default", Page(tc.page), PageSize(tc.pageSize))
|
||||
if err != nil {
|
||||
t.Errorf("ListPendingTask('default') returned error: %v", err)
|
||||
continue
|
||||
@ -1034,19 +1034,19 @@ func TestInspectorListTasksQueueNotFoundError(t *testing.T) {
|
||||
for _, tc := range tests {
|
||||
h.FlushDB(t, r)
|
||||
|
||||
if _, err := inspector.ListActiveTasks(tc.qname); !errors.Is(err, tc.wantErr) {
|
||||
if _, err := inspector.ListActiveTasks(ctx, tc.qname); !errors.Is(err, tc.wantErr) {
|
||||
t.Errorf("ListActiveTasks(%q) returned error %v, want %v", tc.qname, err, tc.wantErr)
|
||||
}
|
||||
if _, err := inspector.ListPendingTasks(tc.qname); !errors.Is(err, tc.wantErr) {
|
||||
if _, err := inspector.ListPendingTasks(ctx, tc.qname); !errors.Is(err, tc.wantErr) {
|
||||
t.Errorf("ListPendingTasks(%q) returned error %v, want %v", tc.qname, err, tc.wantErr)
|
||||
}
|
||||
if _, err := inspector.ListScheduledTasks(tc.qname); !errors.Is(err, tc.wantErr) {
|
||||
if _, err := inspector.ListScheduledTasks(ctx, tc.qname); !errors.Is(err, tc.wantErr) {
|
||||
t.Errorf("ListScheduledTasks(%q) returned error %v, want %v", tc.qname, err, tc.wantErr)
|
||||
}
|
||||
if _, err := inspector.ListRetryTasks(tc.qname); !errors.Is(err, tc.wantErr) {
|
||||
if _, err := inspector.ListRetryTasks(ctx, tc.qname); !errors.Is(err, tc.wantErr) {
|
||||
t.Errorf("ListRetryTasks(%q) returned error %v, want %v", tc.qname, err, tc.wantErr)
|
||||
}
|
||||
if _, err := inspector.ListArchivedTasks(tc.qname); !errors.Is(err, tc.wantErr) {
|
||||
if _, err := inspector.ListArchivedTasks(ctx, tc.qname); !errors.Is(err, tc.wantErr) {
|
||||
t.Errorf("ListArchivedTasks(%q) returned error %v, want %v", tc.qname, err, tc.wantErr)
|
||||
}
|
||||
}
|
||||
@ -1098,7 +1098,7 @@ func TestInspectorDeleteAllPendingTasks(t *testing.T) {
|
||||
h.FlushDB(t, r)
|
||||
h.SeedAllPendingQueues(t, r, tc.pending)
|
||||
|
||||
got, err := inspector.DeleteAllPendingTasks(tc.qname)
|
||||
got, err := inspector.DeleteAllPendingTasks(ctx, tc.qname)
|
||||
if err != nil {
|
||||
t.Errorf("DeleteAllPendingTasks(%q) returned error: %v", tc.qname, err)
|
||||
continue
|
||||
@ -1165,7 +1165,7 @@ func TestInspectorDeleteAllScheduledTasks(t *testing.T) {
|
||||
h.FlushDB(t, r)
|
||||
h.SeedAllScheduledQueues(t, r, tc.scheduled)
|
||||
|
||||
got, err := inspector.DeleteAllScheduledTasks(tc.qname)
|
||||
got, err := inspector.DeleteAllScheduledTasks(ctx, tc.qname)
|
||||
if err != nil {
|
||||
t.Errorf("DeleteAllScheduledTasks(%q) returned error: %v", tc.qname, err)
|
||||
continue
|
||||
@ -1231,7 +1231,7 @@ func TestInspectorDeleteAllRetryTasks(t *testing.T) {
|
||||
h.FlushDB(t, r)
|
||||
h.SeedAllRetryQueues(t, r, tc.retry)
|
||||
|
||||
got, err := inspector.DeleteAllRetryTasks(tc.qname)
|
||||
got, err := inspector.DeleteAllRetryTasks(ctx, tc.qname)
|
||||
if err != nil {
|
||||
t.Errorf("DeleteAllRetryTasks(%q) returned error: %v", tc.qname, err)
|
||||
continue
|
||||
@ -1297,7 +1297,7 @@ func TestInspectorDeleteAllArchivedTasks(t *testing.T) {
|
||||
h.FlushDB(t, r)
|
||||
h.SeedAllArchivedQueues(t, r, tc.archived)
|
||||
|
||||
got, err := inspector.DeleteAllArchivedTasks(tc.qname)
|
||||
got, err := inspector.DeleteAllArchivedTasks(ctx, tc.qname)
|
||||
if err != nil {
|
||||
t.Errorf("DeleteAllArchivedTasks(%q) returned error: %v", tc.qname, err)
|
||||
continue
|
||||
@ -1401,7 +1401,7 @@ func TestInspectorArchiveAllPendingTasks(t *testing.T) {
|
||||
h.SeedAllPendingQueues(t, r, tc.pending)
|
||||
h.SeedAllArchivedQueues(t, r, tc.archived)
|
||||
|
||||
got, err := inspector.ArchiveAllPendingTasks(tc.qname)
|
||||
got, err := inspector.ArchiveAllPendingTasks(ctx, tc.qname)
|
||||
if err != nil {
|
||||
t.Errorf("ArchiveAllPendingTasks(%q) returned error: %v", tc.qname, err)
|
||||
continue
|
||||
@ -1534,7 +1534,7 @@ func TestInspectorArchiveAllScheduledTasks(t *testing.T) {
|
||||
h.SeedAllScheduledQueues(t, r, tc.scheduled)
|
||||
h.SeedAllArchivedQueues(t, r, tc.archived)
|
||||
|
||||
got, err := inspector.ArchiveAllScheduledTasks(tc.qname)
|
||||
got, err := inspector.ArchiveAllScheduledTasks(ctx, tc.qname)
|
||||
if err != nil {
|
||||
t.Errorf("ArchiveAllScheduledTasks(%q) returned error: %v", tc.qname, err)
|
||||
continue
|
||||
@ -1651,7 +1651,7 @@ func TestInspectorArchiveAllRetryTasks(t *testing.T) {
|
||||
h.SeedAllRetryQueues(t, r, tc.retry)
|
||||
h.SeedAllArchivedQueues(t, r, tc.archived)
|
||||
|
||||
got, err := inspector.ArchiveAllRetryTasks(tc.qname)
|
||||
got, err := inspector.ArchiveAllRetryTasks(ctx, tc.qname)
|
||||
if err != nil {
|
||||
t.Errorf("ArchiveAllRetryTasks(%q) returned error: %v", tc.qname, err)
|
||||
continue
|
||||
@ -1769,7 +1769,7 @@ func TestInspectorRunAllScheduledTasks(t *testing.T) {
|
||||
h.SeedAllScheduledQueues(t, r, tc.scheduled)
|
||||
h.SeedAllPendingQueues(t, r, tc.pending)
|
||||
|
||||
got, err := inspector.RunAllScheduledTasks(tc.qname)
|
||||
got, err := inspector.RunAllScheduledTasks(ctx, tc.qname)
|
||||
if err != nil {
|
||||
t.Errorf("RunAllScheduledTasks(%q) returned error: %v", tc.qname, err)
|
||||
continue
|
||||
@ -1886,7 +1886,7 @@ func TestInspectorRunAllRetryTasks(t *testing.T) {
|
||||
h.SeedAllRetryQueues(t, r, tc.retry)
|
||||
h.SeedAllPendingQueues(t, r, tc.pending)
|
||||
|
||||
got, err := inspector.RunAllRetryTasks(tc.qname)
|
||||
got, err := inspector.RunAllRetryTasks(ctx, tc.qname)
|
||||
if err != nil {
|
||||
t.Errorf("RunAllRetryTasks(%q) returned error: %v", tc.qname, err)
|
||||
continue
|
||||
@ -1999,7 +1999,7 @@ func TestInspectorRunAllArchivedTasks(t *testing.T) {
|
||||
h.SeedAllArchivedQueues(t, r, tc.archived)
|
||||
h.SeedAllPendingQueues(t, r, tc.pending)
|
||||
|
||||
got, err := inspector.RunAllArchivedTasks(tc.qname)
|
||||
got, err := inspector.RunAllArchivedTasks(ctx, tc.qname)
|
||||
if err != nil {
|
||||
t.Errorf("RunAllArchivedTasks(%q) returned error: %v", tc.qname, err)
|
||||
continue
|
||||
@ -2067,7 +2067,7 @@ func TestInspectorDeleteTaskDeletesPendingTask(t *testing.T) {
|
||||
h.FlushDB(t, r)
|
||||
h.SeedAllPendingQueues(t, r, tc.pending)
|
||||
|
||||
if err := inspector.DeleteTask(tc.qname, tc.id); err != nil {
|
||||
if err := inspector.DeleteTask(ctx, tc.qname, tc.id); err != nil {
|
||||
t.Errorf("DeleteTask(%q, %q) returned error: %v", tc.qname, tc.id, err)
|
||||
continue
|
||||
}
|
||||
@ -2120,7 +2120,7 @@ func TestInspectorDeleteTaskDeletesScheduledTask(t *testing.T) {
|
||||
h.FlushDB(t, r)
|
||||
h.SeedAllScheduledQueues(t, r, tc.scheduled)
|
||||
|
||||
if err := inspector.DeleteTask(tc.qname, tc.id); err != nil {
|
||||
if err := inspector.DeleteTask(ctx, tc.qname, tc.id); err != nil {
|
||||
t.Errorf("DeleteTask(%q, %q) returned error: %v", tc.qname, tc.id, err)
|
||||
}
|
||||
for qname, want := range tc.wantScheduled {
|
||||
@ -2170,7 +2170,7 @@ func TestInspectorDeleteTaskDeletesRetryTask(t *testing.T) {
|
||||
h.FlushDB(t, r)
|
||||
h.SeedAllRetryQueues(t, r, tc.retry)
|
||||
|
||||
if err := inspector.DeleteTask(tc.qname, tc.id); err != nil {
|
||||
if err := inspector.DeleteTask(ctx, tc.qname, tc.id); err != nil {
|
||||
t.Errorf("DeleteTask(%q, %q) returned error: %v", tc.qname, tc.id, err)
|
||||
continue
|
||||
}
|
||||
@ -2220,7 +2220,7 @@ func TestInspectorDeleteTaskDeletesArchivedTask(t *testing.T) {
|
||||
h.FlushDB(t, r)
|
||||
h.SeedAllArchivedQueues(t, r, tc.archived)
|
||||
|
||||
if err := inspector.DeleteTask(tc.qname, tc.id); err != nil {
|
||||
if err := inspector.DeleteTask(ctx, tc.qname, tc.id); err != nil {
|
||||
t.Errorf("DeleteTask(%q, %q) returned error: %v", tc.qname, tc.id, err)
|
||||
continue
|
||||
}
|
||||
@ -2285,7 +2285,7 @@ func TestInspectorDeleteTaskError(t *testing.T) {
|
||||
h.FlushDB(t, r)
|
||||
h.SeedAllArchivedQueues(t, r, tc.archived)
|
||||
|
||||
if err := inspector.DeleteTask(tc.qname, tc.id); !errors.Is(err, tc.wantErr) {
|
||||
if err := inspector.DeleteTask(ctx, tc.qname, tc.id); !errors.Is(err, tc.wantErr) {
|
||||
t.Errorf("DeleteTask(%q, %q) = %v, want %v", tc.qname, tc.id, err, tc.wantErr)
|
||||
continue
|
||||
}
|
||||
@ -2346,7 +2346,7 @@ func TestInspectorRunTaskRunsScheduledTask(t *testing.T) {
|
||||
h.SeedAllScheduledQueues(t, r, tc.scheduled)
|
||||
h.SeedAllPendingQueues(t, r, tc.pending)
|
||||
|
||||
if err := inspector.RunTask(tc.qname, tc.id); err != nil {
|
||||
if err := inspector.RunTask(ctx, tc.qname, tc.id); err != nil {
|
||||
t.Errorf("RunTask(%q, %q) returned error: %v", tc.qname, tc.id, err)
|
||||
continue
|
||||
}
|
||||
@ -2416,7 +2416,7 @@ func TestInspectorRunTaskRunsRetryTask(t *testing.T) {
|
||||
h.SeedAllRetryQueues(t, r, tc.retry)
|
||||
h.SeedAllPendingQueues(t, r, tc.pending)
|
||||
|
||||
if err := inspector.RunTask(tc.qname, tc.id); err != nil {
|
||||
if err := inspector.RunTask(ctx, tc.qname, tc.id); err != nil {
|
||||
t.Errorf("RunTaskBy(%q, %q) returned error: %v", tc.qname, tc.id, err)
|
||||
continue
|
||||
}
|
||||
@ -2489,7 +2489,7 @@ func TestInspectorRunTaskRunsArchivedTask(t *testing.T) {
|
||||
h.SeedAllArchivedQueues(t, r, tc.archived)
|
||||
h.SeedAllPendingQueues(t, r, tc.pending)
|
||||
|
||||
if err := inspector.RunTask(tc.qname, tc.id); err != nil {
|
||||
if err := inspector.RunTask(ctx, tc.qname, tc.id); err != nil {
|
||||
t.Errorf("RunTask(%q, %q) returned error: %v", tc.qname, tc.id, err)
|
||||
continue
|
||||
}
|
||||
@ -2589,7 +2589,7 @@ func TestInspectorRunTaskError(t *testing.T) {
|
||||
h.SeedAllArchivedQueues(t, r, tc.archived)
|
||||
h.SeedAllPendingQueues(t, r, tc.pending)
|
||||
|
||||
if err := inspector.RunTask(tc.qname, tc.id); !errors.Is(err, tc.wantErr) {
|
||||
if err := inspector.RunTask(ctx, tc.qname, tc.id); !errors.Is(err, tc.wantErr) {
|
||||
t.Errorf("RunTask(%q, %q) = %v, want %v", tc.qname, tc.id, err, tc.wantErr)
|
||||
continue
|
||||
}
|
||||
@ -2678,7 +2678,7 @@ func TestInspectorArchiveTaskArchivesPendingTask(t *testing.T) {
|
||||
h.SeedAllPendingQueues(t, r, tc.pending)
|
||||
h.SeedAllArchivedQueues(t, r, tc.archived)
|
||||
|
||||
if err := inspector.ArchiveTask(tc.qname, tc.id); err != nil {
|
||||
if err := inspector.ArchiveTask(ctx, tc.qname, tc.id); err != nil {
|
||||
t.Errorf("ArchiveTask(%q, %q) returned error: %v", tc.qname, tc.id, err)
|
||||
continue
|
||||
}
|
||||
@ -2754,7 +2754,7 @@ func TestInspectorArchiveTaskArchivesScheduledTask(t *testing.T) {
|
||||
h.SeedAllScheduledQueues(t, r, tc.scheduled)
|
||||
h.SeedAllArchivedQueues(t, r, tc.archived)
|
||||
|
||||
if err := inspector.ArchiveTask(tc.qname, tc.id); err != nil {
|
||||
if err := inspector.ArchiveTask(ctx, tc.qname, tc.id); err != nil {
|
||||
t.Errorf("ArchiveTask(%q, %q) returned error: %v", tc.qname, tc.id, err)
|
||||
continue
|
||||
}
|
||||
@ -2829,7 +2829,7 @@ func TestInspectorArchiveTaskArchivesRetryTask(t *testing.T) {
|
||||
h.SeedAllRetryQueues(t, r, tc.retry)
|
||||
h.SeedAllArchivedQueues(t, r, tc.archived)
|
||||
|
||||
if err := inspector.ArchiveTask(tc.qname, tc.id); err != nil {
|
||||
if err := inspector.ArchiveTask(ctx, tc.qname, tc.id); err != nil {
|
||||
t.Errorf("ArchiveTask(%q, %q) returned error: %v", tc.qname, tc.id, err)
|
||||
continue
|
||||
}
|
||||
@ -2921,7 +2921,7 @@ func TestInspectorArchiveTaskError(t *testing.T) {
|
||||
h.SeedAllRetryQueues(t, r, tc.retry)
|
||||
h.SeedAllArchivedQueues(t, r, tc.archived)
|
||||
|
||||
if err := inspector.ArchiveTask(tc.qname, tc.id); !errors.Is(err, tc.wantErr) {
|
||||
if err := inspector.ArchiveTask(ctx, tc.qname, tc.id); !errors.Is(err, tc.wantErr) {
|
||||
t.Errorf("ArchiveTask(%q, %q) = %v, want %v", tc.qname, tc.id, err, tc.wantErr)
|
||||
continue
|
||||
}
|
||||
@ -3002,11 +3002,11 @@ func TestInspectorSchedulerEntries(t *testing.T) {
|
||||
|
||||
for _, tc := range tests {
|
||||
h.FlushDB(t, r)
|
||||
err := rdbClient.WriteSchedulerEntries(schedulerID, tc.data, time.Minute)
|
||||
err := rdbClient.WriteSchedulerEntries(ctx, schedulerID, tc.data, time.Minute)
|
||||
if err != nil {
|
||||
t.Fatalf("could not write data: %v", err)
|
||||
}
|
||||
got, err := inspector.SchedulerEntries()
|
||||
got, err := inspector.SchedulerEntries(ctx)
|
||||
if err != nil {
|
||||
t.Errorf("SchedulerEntries() returned error: %v", err)
|
||||
continue
|
||||
|
@ -6,19 +6,22 @@
|
||||
package asynqtest
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"math"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/go-redis/redis/v7"
|
||||
"github.com/go-redis/redis/v8"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
"github.com/google/uuid"
|
||||
"github.com/hibiken/asynq/internal/base"
|
||||
)
|
||||
|
||||
var ctx = context.Background()
|
||||
|
||||
// EquateInt64Approx returns a Comparer option that treats int64 values
|
||||
// to be equal if they are within the given margin.
|
||||
func EquateInt64Approx(margin int64) cmp.Option {
|
||||
@ -165,12 +168,12 @@ func FlushDB(tb testing.TB, r redis.UniversalClient) {
|
||||
tb.Helper()
|
||||
switch r := r.(type) {
|
||||
case *redis.Client:
|
||||
if err := r.FlushDB().Err(); err != nil {
|
||||
if err := r.FlushDB(ctx).Err(); err != nil {
|
||||
tb.Fatal(err)
|
||||
}
|
||||
case *redis.ClusterClient:
|
||||
err := r.ForEachMaster(func(c *redis.Client) error {
|
||||
if err := c.FlushAll().Err(); err != nil {
|
||||
err := r.ForEachMaster(ctx, func(ctx context.Context, c *redis.Client) error {
|
||||
if err := c.FlushAll(ctx).Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@ -184,42 +187,42 @@ func FlushDB(tb testing.TB, r redis.UniversalClient) {
|
||||
// SeedPendingQueue initializes the specified queue with the given messages.
|
||||
func SeedPendingQueue(tb testing.TB, r redis.UniversalClient, msgs []*base.TaskMessage, qname string) {
|
||||
tb.Helper()
|
||||
r.SAdd(base.AllQueues, qname)
|
||||
r.SAdd(ctx, base.AllQueues, qname)
|
||||
seedRedisList(tb, r, base.PendingKey(qname), msgs, base.TaskStatePending)
|
||||
}
|
||||
|
||||
// SeedActiveQueue initializes the active queue with the given messages.
|
||||
func SeedActiveQueue(tb testing.TB, r redis.UniversalClient, msgs []*base.TaskMessage, qname string) {
|
||||
tb.Helper()
|
||||
r.SAdd(base.AllQueues, qname)
|
||||
r.SAdd(ctx, base.AllQueues, qname)
|
||||
seedRedisList(tb, r, base.ActiveKey(qname), msgs, base.TaskStateActive)
|
||||
}
|
||||
|
||||
// SeedScheduledQueue initializes the scheduled queue with the given messages.
|
||||
func SeedScheduledQueue(tb testing.TB, r redis.UniversalClient, entries []base.Z, qname string) {
|
||||
tb.Helper()
|
||||
r.SAdd(base.AllQueues, qname)
|
||||
r.SAdd(ctx, base.AllQueues, qname)
|
||||
seedRedisZSet(tb, r, base.ScheduledKey(qname), entries, base.TaskStateScheduled)
|
||||
}
|
||||
|
||||
// SeedRetryQueue initializes the retry queue with the given messages.
|
||||
func SeedRetryQueue(tb testing.TB, r redis.UniversalClient, entries []base.Z, qname string) {
|
||||
tb.Helper()
|
||||
r.SAdd(base.AllQueues, qname)
|
||||
r.SAdd(ctx, base.AllQueues, qname)
|
||||
seedRedisZSet(tb, r, base.RetryKey(qname), entries, base.TaskStateRetry)
|
||||
}
|
||||
|
||||
// SeedArchivedQueue initializes the archived queue with the given messages.
|
||||
func SeedArchivedQueue(tb testing.TB, r redis.UniversalClient, entries []base.Z, qname string) {
|
||||
tb.Helper()
|
||||
r.SAdd(base.AllQueues, qname)
|
||||
r.SAdd(ctx, base.AllQueues, qname)
|
||||
seedRedisZSet(tb, r, base.ArchivedKey(qname), entries, base.TaskStateArchived)
|
||||
}
|
||||
|
||||
// SeedDeadlines initializes the deadlines set with the given entries.
|
||||
func SeedDeadlines(tb testing.TB, r redis.UniversalClient, entries []base.Z, qname string) {
|
||||
tb.Helper()
|
||||
r.SAdd(base.AllQueues, qname)
|
||||
r.SAdd(ctx, base.AllQueues, qname)
|
||||
seedRedisZSet(tb, r, base.DeadlinesKey(qname), entries, base.TaskStateActive)
|
||||
}
|
||||
|
||||
@ -278,7 +281,7 @@ func seedRedisList(tb testing.TB, c redis.UniversalClient, key string,
|
||||
tb.Helper()
|
||||
for _, msg := range msgs {
|
||||
encoded := MustMarshal(tb, msg)
|
||||
if err := c.LPush(key, msg.ID.String()).Err(); err != nil {
|
||||
if err := c.LPush(ctx, key, msg.ID.String()).Err(); err != nil {
|
||||
tb.Fatal(err)
|
||||
}
|
||||
key := base.TaskKey(msg.Queue, msg.ID.String())
|
||||
@ -289,11 +292,11 @@ func seedRedisList(tb testing.TB, c redis.UniversalClient, key string,
|
||||
"deadline": msg.Deadline,
|
||||
"unique_key": msg.UniqueKey,
|
||||
}
|
||||
if err := c.HSet(key, data).Err(); err != nil {
|
||||
if err := c.HSet(context.Background(), key, data).Err(); err != nil {
|
||||
tb.Fatal(err)
|
||||
}
|
||||
if len(msg.UniqueKey) > 0 {
|
||||
err := c.SetNX(msg.UniqueKey, msg.ID.String(), 1*time.Minute).Err()
|
||||
err := c.SetNX(context.Background(), msg.UniqueKey, msg.ID.String(), 1*time.Minute).Err()
|
||||
if err != nil {
|
||||
tb.Fatalf("Failed to set unique lock in redis: %v", err)
|
||||
}
|
||||
@ -308,7 +311,7 @@ func seedRedisZSet(tb testing.TB, c redis.UniversalClient, key string,
|
||||
msg := item.Message
|
||||
encoded := MustMarshal(tb, msg)
|
||||
z := &redis.Z{Member: msg.ID.String(), Score: float64(item.Score)}
|
||||
if err := c.ZAdd(key, z).Err(); err != nil {
|
||||
if err := c.ZAdd(ctx, key, z).Err(); err != nil {
|
||||
tb.Fatal(err)
|
||||
}
|
||||
key := base.TaskKey(msg.Queue, msg.ID.String())
|
||||
@ -319,11 +322,11 @@ func seedRedisZSet(tb testing.TB, c redis.UniversalClient, key string,
|
||||
"deadline": msg.Deadline,
|
||||
"unique_key": msg.UniqueKey,
|
||||
}
|
||||
if err := c.HSet(key, data).Err(); err != nil {
|
||||
if err := c.HSet(ctx, key, data).Err(); err != nil {
|
||||
tb.Fatal(err)
|
||||
}
|
||||
if len(msg.UniqueKey) > 0 {
|
||||
err := c.SetNX(msg.UniqueKey, msg.ID.String(), 1*time.Minute).Err()
|
||||
err := c.SetNX(ctx, msg.UniqueKey, msg.ID.String(), 1*time.Minute).Err()
|
||||
if err != nil {
|
||||
tb.Fatalf("Failed to set unique lock in redis: %v", err)
|
||||
}
|
||||
@ -398,13 +401,13 @@ func GetDeadlinesEntries(tb testing.TB, r redis.UniversalClient, qname string) [
|
||||
func getMessagesFromList(tb testing.TB, r redis.UniversalClient, qname string,
|
||||
keyFn func(qname string) string, state base.TaskState) []*base.TaskMessage {
|
||||
tb.Helper()
|
||||
ids := r.LRange(keyFn(qname), 0, -1).Val()
|
||||
ids := r.LRange(ctx, keyFn(qname), 0, -1).Val()
|
||||
var msgs []*base.TaskMessage
|
||||
for _, id := range ids {
|
||||
taskKey := base.TaskKey(qname, id)
|
||||
data := r.HGet(taskKey, "msg").Val()
|
||||
data := r.HGet(ctx, taskKey, "msg").Val()
|
||||
msgs = append(msgs, MustUnmarshal(tb, data))
|
||||
if gotState := r.HGet(taskKey, "state").Val(); gotState != state.String() {
|
||||
if gotState := r.HGet(ctx, taskKey, "state").Val(); gotState != state.String() {
|
||||
tb.Errorf("task (id=%q) is in %q state, want %v", id, gotState, state)
|
||||
}
|
||||
}
|
||||
@ -415,13 +418,13 @@ func getMessagesFromList(tb testing.TB, r redis.UniversalClient, qname string,
|
||||
func getMessagesFromZSet(tb testing.TB, r redis.UniversalClient, qname string,
|
||||
keyFn func(qname string) string, state base.TaskState) []*base.TaskMessage {
|
||||
tb.Helper()
|
||||
ids := r.ZRange(keyFn(qname), 0, -1).Val()
|
||||
ids := r.ZRange(ctx, keyFn(qname), 0, -1).Val()
|
||||
var msgs []*base.TaskMessage
|
||||
for _, id := range ids {
|
||||
taskKey := base.TaskKey(qname, id)
|
||||
msg := r.HGet(taskKey, "msg").Val()
|
||||
msg := r.HGet(ctx, taskKey, "msg").Val()
|
||||
msgs = append(msgs, MustUnmarshal(tb, msg))
|
||||
if gotState := r.HGet(taskKey, "state").Val(); gotState != state.String() {
|
||||
if gotState := r.HGet(ctx, taskKey, "state").Val(); gotState != state.String() {
|
||||
tb.Errorf("task (id=%q) is in %q state, want %v", id, gotState, state)
|
||||
}
|
||||
}
|
||||
@ -432,14 +435,14 @@ func getMessagesFromZSet(tb testing.TB, r redis.UniversalClient, qname string,
|
||||
func getMessagesFromZSetWithScores(tb testing.TB, r redis.UniversalClient,
|
||||
qname string, keyFn func(qname string) string, state base.TaskState) []base.Z {
|
||||
tb.Helper()
|
||||
zs := r.ZRangeWithScores(keyFn(qname), 0, -1).Val()
|
||||
zs := r.ZRangeWithScores(ctx, keyFn(qname), 0, -1).Val()
|
||||
var res []base.Z
|
||||
for _, z := range zs {
|
||||
taskID := z.Member.(string)
|
||||
taskKey := base.TaskKey(qname, taskID)
|
||||
msg := r.HGet(taskKey, "msg").Val()
|
||||
msg := r.HGet(ctx, taskKey, "msg").Val()
|
||||
res = append(res, base.Z{Message: MustUnmarshal(tb, msg), Score: int64(z.Score)})
|
||||
if gotState := r.HGet(taskKey, "state").Val(); gotState != state.String() {
|
||||
if gotState := r.HGet(ctx, taskKey, "state").Val(); gotState != state.String() {
|
||||
tb.Errorf("task (id=%q) is in %q state, want %v", taskID, gotState, state)
|
||||
}
|
||||
}
|
||||
|
@ -14,7 +14,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-redis/redis/v7"
|
||||
"github.com/go-redis/redis/v8"
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
"github.com/google/uuid"
|
||||
"github.com/hibiken/asynq/internal/errors"
|
||||
@ -637,21 +637,21 @@ func (c *Cancelations) Get(id string) (fn context.CancelFunc, ok bool) {
|
||||
//
|
||||
// See rdb.RDB as a reference implementation.
|
||||
type Broker interface {
|
||||
Ping() error
|
||||
Enqueue(msg *TaskMessage) error
|
||||
EnqueueUnique(msg *TaskMessage, ttl time.Duration) error
|
||||
Dequeue(qnames ...string) (*TaskMessage, time.Time, error)
|
||||
Done(msg *TaskMessage) error
|
||||
Requeue(msg *TaskMessage) error
|
||||
Schedule(msg *TaskMessage, processAt time.Time) error
|
||||
ScheduleUnique(msg *TaskMessage, processAt time.Time, ttl time.Duration) error
|
||||
Retry(msg *TaskMessage, processAt time.Time, errMsg string) error
|
||||
Archive(msg *TaskMessage, errMsg string) error
|
||||
ForwardIfReady(qnames ...string) error
|
||||
ListDeadlineExceeded(deadline time.Time, qnames ...string) ([]*TaskMessage, error)
|
||||
WriteServerState(info *ServerInfo, workers []*WorkerInfo, ttl time.Duration) error
|
||||
ClearServerState(host string, pid int, serverID string) error
|
||||
CancelationPubSub() (*redis.PubSub, error) // TODO: Need to decouple from redis to support other brokers
|
||||
PublishCancelation(id string) error
|
||||
Ping(context.Context) error
|
||||
Enqueue(ctx context.Context, msg *TaskMessage) error
|
||||
EnqueueUnique(ctx context.Context, msg *TaskMessage, ttl time.Duration) error
|
||||
Dequeue(ctx context.Context, qnames ...string) (*TaskMessage, time.Time, error)
|
||||
Done(ctx context.Context, msg *TaskMessage) error
|
||||
Requeue(ctx context.Context, msg *TaskMessage) error
|
||||
Schedule(ctx context.Context, msg *TaskMessage, processAt time.Time) error
|
||||
ScheduleUnique(ctx context.Context, msg *TaskMessage, processAt time.Time, ttl time.Duration) error
|
||||
Retry(ctx context.Context, msg *TaskMessage, processAt time.Time, errMsg string) error
|
||||
Archive(ctx context.Context, msg *TaskMessage, errMsg string) error
|
||||
ForwardIfReady(ctx context.Context, qnames ...string) error
|
||||
ListDeadlineExceeded(ctx context.Context, deadline time.Time, qnames ...string) ([]*TaskMessage, error)
|
||||
WriteServerState(ctx context.Context, info *ServerInfo, workers []*WorkerInfo, ttl time.Duration) error
|
||||
ClearServerState(ctx context.Context, host string, pid int, serverID string) error
|
||||
CancelationPubSub(ctx context.Context) (*redis.PubSub, error) // TODO: Need to decouple from redis to support other brokers
|
||||
PublishCancelation(ctx context.Context, id string) error
|
||||
Close() error
|
||||
}
|
||||
|
@ -23,7 +23,7 @@ func BenchmarkEnqueue(b *testing.B) {
|
||||
asynqtest.FlushDB(b, r.client)
|
||||
b.StartTimer()
|
||||
|
||||
if err := r.Enqueue(msg); err != nil {
|
||||
if err := r.Enqueue(ctx, msg); err != nil {
|
||||
b.Fatalf("Enqueue failed: %v", err)
|
||||
}
|
||||
}
|
||||
@ -45,7 +45,7 @@ func BenchmarkEnqueueUnique(b *testing.B) {
|
||||
asynqtest.FlushDB(b, r.client)
|
||||
b.StartTimer()
|
||||
|
||||
if err := r.EnqueueUnique(msg, uniqueTTL); err != nil {
|
||||
if err := r.EnqueueUnique(ctx, msg, uniqueTTL); err != nil {
|
||||
b.Fatalf("EnqueueUnique failed: %v", err)
|
||||
}
|
||||
}
|
||||
@ -62,7 +62,7 @@ func BenchmarkSchedule(b *testing.B) {
|
||||
asynqtest.FlushDB(b, r.client)
|
||||
b.StartTimer()
|
||||
|
||||
if err := r.Schedule(msg, processAt); err != nil {
|
||||
if err := r.Schedule(ctx, msg, processAt); err != nil {
|
||||
b.Fatalf("Schedule failed: %v", err)
|
||||
}
|
||||
}
|
||||
@ -85,7 +85,7 @@ func BenchmarkScheduleUnique(b *testing.B) {
|
||||
asynqtest.FlushDB(b, r.client)
|
||||
b.StartTimer()
|
||||
|
||||
if err := r.ScheduleUnique(msg, processAt, uniqueTTL); err != nil {
|
||||
if err := r.ScheduleUnique(ctx, msg, processAt, uniqueTTL); err != nil {
|
||||
b.Fatalf("EnqueueUnique failed: %v", err)
|
||||
}
|
||||
}
|
||||
@ -101,13 +101,13 @@ func BenchmarkDequeueSingleQueue(b *testing.B) {
|
||||
for i := 0; i < 10; i++ {
|
||||
m := asynqtest.NewTaskMessageWithQueue(
|
||||
fmt.Sprintf("task%d", i), nil, base.DefaultQueueName)
|
||||
if err := r.Enqueue(m); err != nil {
|
||||
if err := r.Enqueue(ctx, m); err != nil {
|
||||
b.Fatalf("Enqueue failed: %v", err)
|
||||
}
|
||||
}
|
||||
b.StartTimer()
|
||||
|
||||
if _, _, err := r.Dequeue(base.DefaultQueueName); err != nil {
|
||||
if _, _, err := r.Dequeue(ctx, base.DefaultQueueName); err != nil {
|
||||
b.Fatalf("Dequeue failed: %v", err)
|
||||
}
|
||||
}
|
||||
@ -125,14 +125,14 @@ func BenchmarkDequeueMultipleQueues(b *testing.B) {
|
||||
for _, qname := range qnames {
|
||||
m := asynqtest.NewTaskMessageWithQueue(
|
||||
fmt.Sprintf("%s_task%d", qname, i), nil, qname)
|
||||
if err := r.Enqueue(m); err != nil {
|
||||
if err := r.Enqueue(ctx, m); err != nil {
|
||||
b.Fatalf("Enqueue failed: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
b.StartTimer()
|
||||
|
||||
if _, _, err := r.Dequeue(qnames...); err != nil {
|
||||
if _, _, err := r.Dequeue(ctx, qnames...); err != nil {
|
||||
b.Fatalf("Dequeue failed: %v", err)
|
||||
}
|
||||
}
|
||||
@ -158,7 +158,7 @@ func BenchmarkDone(b *testing.B) {
|
||||
asynqtest.SeedDeadlines(b, r.client, zs, base.DefaultQueueName)
|
||||
b.StartTimer()
|
||||
|
||||
if err := r.Done(msgs[0]); err != nil {
|
||||
if err := r.Done(ctx, msgs[0]); err != nil {
|
||||
b.Fatalf("Done failed: %v", err)
|
||||
}
|
||||
}
|
||||
@ -184,7 +184,7 @@ func BenchmarkRetry(b *testing.B) {
|
||||
asynqtest.SeedDeadlines(b, r.client, zs, base.DefaultQueueName)
|
||||
b.StartTimer()
|
||||
|
||||
if err := r.Retry(msgs[0], time.Now().Add(1*time.Minute), "error"); err != nil {
|
||||
if err := r.Retry(ctx, msgs[0], time.Now().Add(1*time.Minute), "error"); err != nil {
|
||||
b.Fatalf("Retry failed: %v", err)
|
||||
}
|
||||
}
|
||||
@ -210,7 +210,7 @@ func BenchmarkArchive(b *testing.B) {
|
||||
asynqtest.SeedDeadlines(b, r.client, zs, base.DefaultQueueName)
|
||||
b.StartTimer()
|
||||
|
||||
if err := r.Archive(msgs[0], "error"); err != nil {
|
||||
if err := r.Archive(ctx, msgs[0], "error"); err != nil {
|
||||
b.Fatalf("Archive failed: %v", err)
|
||||
}
|
||||
}
|
||||
@ -236,7 +236,7 @@ func BenchmarkRequeue(b *testing.B) {
|
||||
asynqtest.SeedDeadlines(b, r.client, zs, base.DefaultQueueName)
|
||||
b.StartTimer()
|
||||
|
||||
if err := r.Requeue(msgs[0]); err != nil {
|
||||
if err := r.Requeue(ctx, msgs[0]); err != nil {
|
||||
b.Fatalf("Requeue failed: %v", err)
|
||||
}
|
||||
}
|
||||
@ -259,7 +259,7 @@ func BenchmarkCheckAndEnqueue(b *testing.B) {
|
||||
asynqtest.SeedScheduledQueue(b, r.client, zs, base.DefaultQueueName)
|
||||
b.StartTimer()
|
||||
|
||||
if err := r.ForwardIfReady(base.DefaultQueueName); err != nil {
|
||||
if err := r.ForwardIfReady(ctx, base.DefaultQueueName); err != nil {
|
||||
b.Fatalf("ForwardIfReady failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
@ -5,11 +5,12 @@
|
||||
package rdb
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-redis/redis/v7"
|
||||
"github.com/go-redis/redis/v8"
|
||||
"github.com/google/uuid"
|
||||
"github.com/hibiken/asynq/internal/base"
|
||||
"github.com/hibiken/asynq/internal/errors"
|
||||
@ -17,8 +18,8 @@ import (
|
||||
)
|
||||
|
||||
// AllQueues returns a list of all queue names.
|
||||
func (r *RDB) AllQueues() ([]string, error) {
|
||||
return r.client.SMembers(base.AllQueues).Result()
|
||||
func (r *RDB) AllQueues(ctx context.Context) ([]string, error) {
|
||||
return r.client.SMembers(ctx, base.AllQueues).Result()
|
||||
}
|
||||
|
||||
// Stats represents a state of queues at a certain time.
|
||||
@ -100,9 +101,9 @@ table.insert(res, redis.call("EXISTS", KEYS[8]))
|
||||
return res`)
|
||||
|
||||
// CurrentStats returns a current state of the queues.
|
||||
func (r *RDB) CurrentStats(qname string) (*Stats, error) {
|
||||
func (r *RDB) CurrentStats(ctx context.Context, qname string) (*Stats, error) {
|
||||
var op errors.Op = "rdb.CurrentStats"
|
||||
exists, err := r.client.SIsMember(base.AllQueues, qname).Result()
|
||||
exists, err := r.client.SIsMember(ctx, base.AllQueues, qname).Result()
|
||||
if err != nil {
|
||||
return nil, errors.E(op, errors.Unknown, err)
|
||||
}
|
||||
@ -110,7 +111,7 @@ func (r *RDB) CurrentStats(qname string) (*Stats, error) {
|
||||
return nil, errors.E(op, errors.NotFound, &errors.QueueNotFoundError{Queue: qname})
|
||||
}
|
||||
now := time.Now()
|
||||
res, err := currentStatsCmd.Run(r.client, []string{
|
||||
res, err := currentStatsCmd.Run(ctx, r.client, []string{
|
||||
base.PendingKey(qname),
|
||||
base.ActiveKey(qname),
|
||||
base.ScheduledKey(qname),
|
||||
@ -164,7 +165,7 @@ func (r *RDB) CurrentStats(qname string) (*Stats, error) {
|
||||
}
|
||||
}
|
||||
stats.Size = size
|
||||
memusg, err := r.memoryUsage(qname)
|
||||
memusg, err := r.memoryUsage(ctx, qname)
|
||||
if err != nil {
|
||||
return nil, errors.E(op, errors.CanonicalCode(err), err)
|
||||
}
|
||||
@ -172,7 +173,7 @@ func (r *RDB) CurrentStats(qname string) (*Stats, error) {
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
func (r *RDB) memoryUsage(qname string) (int64, error) {
|
||||
func (r *RDB) memoryUsage(ctx context.Context, qname string) (int64, error) {
|
||||
var op errors.Op = "rdb.memoryUsage"
|
||||
var (
|
||||
keys []string
|
||||
@ -181,7 +182,7 @@ func (r *RDB) memoryUsage(qname string) (int64, error) {
|
||||
err error
|
||||
)
|
||||
for {
|
||||
data, cursor, err = r.client.Scan(cursor, fmt.Sprintf("asynq:{%s}*", qname), 100).Result()
|
||||
data, cursor, err = r.client.Scan(ctx, cursor, fmt.Sprintf("asynq:{%s}*", qname), 100).Result()
|
||||
if err != nil {
|
||||
return 0, errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "scan", Err: err})
|
||||
}
|
||||
@ -192,7 +193,7 @@ func (r *RDB) memoryUsage(qname string) (int64, error) {
|
||||
}
|
||||
var usg int64
|
||||
for _, k := range keys {
|
||||
n, err := r.client.MemoryUsage(k).Result()
|
||||
n, err := r.client.MemoryUsage(ctx, k).Result()
|
||||
if err != nil {
|
||||
return 0, errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "memory usage", Err: err})
|
||||
}
|
||||
@ -213,12 +214,12 @@ end
|
||||
return res`)
|
||||
|
||||
// HistoricalStats returns a list of stats from the last n days for the given queue.
|
||||
func (r *RDB) HistoricalStats(qname string, n int) ([]*DailyStats, error) {
|
||||
func (r *RDB) HistoricalStats(ctx context.Context, qname string, n int) ([]*DailyStats, error) {
|
||||
var op errors.Op = "rdb.HistoricalStats"
|
||||
if n < 1 {
|
||||
return nil, errors.E(op, errors.FailedPrecondition, "the number of days must be positive")
|
||||
}
|
||||
exists, err := r.client.SIsMember(base.AllQueues, qname).Result()
|
||||
exists, err := r.client.SIsMember(ctx, base.AllQueues, qname).Result()
|
||||
if err != nil {
|
||||
return nil, errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "sismember", Err: err})
|
||||
}
|
||||
@ -235,7 +236,7 @@ func (r *RDB) HistoricalStats(qname string, n int) ([]*DailyStats, error) {
|
||||
keys = append(keys, base.ProcessedKey(qname, ts))
|
||||
keys = append(keys, base.FailedKey(qname, ts))
|
||||
}
|
||||
res, err := historicalStatsCmd.Run(r.client, keys).Result()
|
||||
res, err := historicalStatsCmd.Run(ctx, r.client, keys).Result()
|
||||
if err != nil {
|
||||
return nil, errors.E(op, errors.Unknown, fmt.Sprintf("redis eval error: %v", err))
|
||||
}
|
||||
@ -256,8 +257,8 @@ func (r *RDB) HistoricalStats(qname string, n int) ([]*DailyStats, error) {
|
||||
}
|
||||
|
||||
// RedisInfo returns a map of redis info.
|
||||
func (r *RDB) RedisInfo() (map[string]string, error) {
|
||||
res, err := r.client.Info().Result()
|
||||
func (r *RDB) RedisInfo(ctx context.Context) (map[string]string, error) {
|
||||
res, err := r.client.Info(ctx).Result()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -265,8 +266,8 @@ func (r *RDB) RedisInfo() (map[string]string, error) {
|
||||
}
|
||||
|
||||
// RedisClusterInfo returns a map of redis cluster info.
|
||||
func (r *RDB) RedisClusterInfo() (map[string]string, error) {
|
||||
res, err := r.client.ClusterInfo().Result()
|
||||
func (r *RDB) RedisClusterInfo(ctx context.Context) (map[string]string, error) {
|
||||
res, err := r.client.ClusterInfo(ctx).Result()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -294,8 +295,8 @@ func reverse(x []string) {
|
||||
|
||||
// checkQueueExists verifies whether the queue exists.
|
||||
// It returns QueueNotFoundError if queue doesn't exist.
|
||||
func (r *RDB) checkQueueExists(qname string) error {
|
||||
exists, err := r.client.SIsMember(base.AllQueues, qname).Result()
|
||||
func (r *RDB) checkQueueExists(ctx context.Context, qname string) error {
|
||||
exists, err := r.client.SIsMember(ctx, base.AllQueues, qname).Result()
|
||||
if err != nil {
|
||||
return errors.E(errors.Unknown, &errors.RedisCommandError{Command: "sismember", Err: err})
|
||||
}
|
||||
@ -333,9 +334,9 @@ var getTaskInfoCmd = redis.NewScript(`
|
||||
`)
|
||||
|
||||
// GetTaskInfo returns a TaskInfo describing the task from the given queue.
|
||||
func (r *RDB) GetTaskInfo(qname string, id uuid.UUID) (*base.TaskInfo, error) {
|
||||
func (r *RDB) GetTaskInfo(ctx context.Context, qname string, id uuid.UUID) (*base.TaskInfo, error) {
|
||||
var op errors.Op = "rdb.GetTaskInfo"
|
||||
if err := r.checkQueueExists(qname); err != nil {
|
||||
if err := r.checkQueueExists(ctx,qname); err != nil {
|
||||
return nil, errors.E(op, errors.CanonicalCode(err), err)
|
||||
}
|
||||
keys := []string{base.TaskKey(qname, id.String())}
|
||||
@ -344,7 +345,7 @@ func (r *RDB) GetTaskInfo(qname string, id uuid.UUID) (*base.TaskInfo, error) {
|
||||
time.Now().Unix(),
|
||||
base.QueueKeyPrefix(qname),
|
||||
}
|
||||
res, err := getTaskInfoCmd.Run(r.client, keys, argv...).Result()
|
||||
res, err := getTaskInfoCmd.Run(ctx, r.client, keys, argv...).Result()
|
||||
if err != nil {
|
||||
if err.Error() == "NOT FOUND" {
|
||||
return nil, errors.E(op, errors.NotFound, &errors.TaskNotFoundError{Queue: qname, ID: id.String()})
|
||||
@ -408,12 +409,12 @@ func (p Pagination) stop() int64 {
|
||||
}
|
||||
|
||||
// ListPending returns pending tasks that are ready to be processed.
|
||||
func (r *RDB) ListPending(qname string, pgn Pagination) ([]*base.TaskMessage, error) {
|
||||
func (r *RDB) ListPending(ctx context.Context, qname string, pgn Pagination) ([]*base.TaskMessage, error) {
|
||||
var op errors.Op = "rdb.ListPending"
|
||||
if !r.client.SIsMember(base.AllQueues, qname).Val() {
|
||||
if !r.client.SIsMember(ctx, base.AllQueues, qname).Val() {
|
||||
return nil, errors.E(op, errors.NotFound, &errors.QueueNotFoundError{Queue: qname})
|
||||
}
|
||||
res, err := r.listMessages(base.PendingKey(qname), qname, pgn)
|
||||
res, err := r.listMessages(ctx, base.PendingKey(qname), qname, pgn)
|
||||
if err != nil {
|
||||
return nil, errors.E(op, errors.CanonicalCode(err), err)
|
||||
}
|
||||
@ -421,12 +422,12 @@ func (r *RDB) ListPending(qname string, pgn Pagination) ([]*base.TaskMessage, er
|
||||
}
|
||||
|
||||
// ListActive returns all tasks that are currently being processed for the given queue.
|
||||
func (r *RDB) ListActive(qname string, pgn Pagination) ([]*base.TaskMessage, error) {
|
||||
func (r *RDB) ListActive(ctx context.Context, qname string, pgn Pagination) ([]*base.TaskMessage, error) {
|
||||
var op errors.Op = "rdb.ListActive"
|
||||
if !r.client.SIsMember(base.AllQueues, qname).Val() {
|
||||
if !r.client.SIsMember(ctx, base.AllQueues, qname).Val() {
|
||||
return nil, errors.E(op, errors.NotFound, &errors.QueueNotFoundError{Queue: qname})
|
||||
}
|
||||
res, err := r.listMessages(base.ActiveKey(qname), qname, pgn)
|
||||
res, err := r.listMessages(ctx, base.ActiveKey(qname), qname, pgn)
|
||||
if err != nil {
|
||||
return nil, errors.E(op, errors.CanonicalCode(err), err)
|
||||
}
|
||||
@ -448,12 +449,12 @@ return res
|
||||
`)
|
||||
|
||||
// listMessages returns a list of TaskMessage in Redis list with the given key.
|
||||
func (r *RDB) listMessages(key, qname string, pgn Pagination) ([]*base.TaskMessage, error) {
|
||||
func (r *RDB) listMessages(ctx context.Context, key, qname string, pgn Pagination) ([]*base.TaskMessage, error) {
|
||||
// Note: Because we use LPUSH to redis list, we need to calculate the
|
||||
// correct range and reverse the list to get the tasks with pagination.
|
||||
stop := -pgn.start() - 1
|
||||
start := -pgn.stop() - 1
|
||||
res, err := listMessagesCmd.Run(r.client,
|
||||
res, err := listMessagesCmd.Run(ctx, r.client,
|
||||
[]string{key}, start, stop, base.TaskKeyPrefix(qname)).Result()
|
||||
if err != nil {
|
||||
return nil, errors.E(errors.Unknown, err)
|
||||
@ -477,12 +478,12 @@ func (r *RDB) listMessages(key, qname string, pgn Pagination) ([]*base.TaskMessa
|
||||
|
||||
// ListScheduled returns all tasks from the given queue that are scheduled
|
||||
// to be processed in the future.
|
||||
func (r *RDB) ListScheduled(qname string, pgn Pagination) ([]base.Z, error) {
|
||||
func (r *RDB) ListScheduled(ctx context.Context, qname string, pgn Pagination) ([]base.Z, error) {
|
||||
var op errors.Op = "rdb.ListScheduled"
|
||||
if !r.client.SIsMember(base.AllQueues, qname).Val() {
|
||||
if !r.client.SIsMember(ctx, base.AllQueues, qname).Val() {
|
||||
return nil, errors.E(op, errors.NotFound, &errors.QueueNotFoundError{Queue: qname})
|
||||
}
|
||||
res, err := r.listZSetEntries(base.ScheduledKey(qname), qname, pgn)
|
||||
res, err := r.listZSetEntries(ctx, base.ScheduledKey(qname), qname, pgn)
|
||||
if err != nil {
|
||||
return nil, errors.E(op, errors.CanonicalCode(err), err)
|
||||
}
|
||||
@ -491,12 +492,12 @@ func (r *RDB) ListScheduled(qname string, pgn Pagination) ([]base.Z, error) {
|
||||
|
||||
// ListRetry returns all tasks from the given queue that have failed before
|
||||
// and willl be retried in the future.
|
||||
func (r *RDB) ListRetry(qname string, pgn Pagination) ([]base.Z, error) {
|
||||
func (r *RDB) ListRetry(ctx context.Context, qname string, pgn Pagination) ([]base.Z, error) {
|
||||
var op errors.Op = "rdb.ListRetry"
|
||||
if !r.client.SIsMember(base.AllQueues, qname).Val() {
|
||||
if !r.client.SIsMember(ctx, base.AllQueues, qname).Val() {
|
||||
return nil, errors.E(op, errors.NotFound, &errors.QueueNotFoundError{Queue: qname})
|
||||
}
|
||||
res, err := r.listZSetEntries(base.RetryKey(qname), qname, pgn)
|
||||
res, err := r.listZSetEntries(ctx, base.RetryKey(qname), qname, pgn)
|
||||
if err != nil {
|
||||
return nil, errors.E(op, errors.CanonicalCode(err), err)
|
||||
}
|
||||
@ -504,12 +505,12 @@ func (r *RDB) ListRetry(qname string, pgn Pagination) ([]base.Z, error) {
|
||||
}
|
||||
|
||||
// ListArchived returns all tasks from the given queue that have exhausted its retry limit.
|
||||
func (r *RDB) ListArchived(qname string, pgn Pagination) ([]base.Z, error) {
|
||||
func (r *RDB) ListArchived(ctx context.Context, qname string, pgn Pagination) ([]base.Z, error) {
|
||||
var op errors.Op = "rdb.ListArchived"
|
||||
if !r.client.SIsMember(base.AllQueues, qname).Val() {
|
||||
if !r.client.SIsMember(ctx, base.AllQueues, qname).Val() {
|
||||
return nil, errors.E(op, errors.NotFound, &errors.QueueNotFoundError{Queue: qname})
|
||||
}
|
||||
zs, err := r.listZSetEntries(base.ArchivedKey(qname), qname, pgn)
|
||||
zs, err := r.listZSetEntries(ctx, base.ArchivedKey(qname), qname, pgn)
|
||||
if err != nil {
|
||||
return nil, errors.E(op, errors.CanonicalCode(err), err)
|
||||
}
|
||||
@ -536,8 +537,8 @@ return res
|
||||
|
||||
// listZSetEntries returns a list of message and score pairs in Redis sorted-set
|
||||
// with the given key.
|
||||
func (r *RDB) listZSetEntries(key, qname string, pgn Pagination) ([]base.Z, error) {
|
||||
res, err := listZSetEntriesCmd.Run(r.client, []string{key},
|
||||
func (r *RDB) listZSetEntries(ctx context.Context, key, qname string, pgn Pagination) ([]base.Z, error) {
|
||||
res, err := listZSetEntriesCmd.Run(ctx, r.client, []string{key},
|
||||
pgn.start(), pgn.stop(), base.TaskKeyPrefix(qname)).Result()
|
||||
if err != nil {
|
||||
return nil, errors.E(errors.Unknown, err)
|
||||
@ -568,9 +569,9 @@ func (r *RDB) listZSetEntries(key, qname string, pgn Pagination) ([]base.Z, erro
|
||||
// RunAllScheduledTasks enqueues all scheduled tasks from the given queue
|
||||
// and returns the number of tasks enqueued.
|
||||
// If a queue with the given name doesn't exist, it returns QueueNotFoundError.
|
||||
func (r *RDB) RunAllScheduledTasks(qname string) (int64, error) {
|
||||
func (r *RDB) RunAllScheduledTasks(ctx context.Context, qname string) (int64, error) {
|
||||
var op errors.Op = "rdb.RunAllScheduledTasks"
|
||||
n, err := r.runAll(base.ScheduledKey(qname), qname)
|
||||
n, err := r.runAll(ctx, base.ScheduledKey(qname), qname)
|
||||
if errors.IsQueueNotFound(err) {
|
||||
return 0, errors.E(op, errors.NotFound, err)
|
||||
}
|
||||
@ -583,9 +584,9 @@ func (r *RDB) RunAllScheduledTasks(qname string) (int64, error) {
|
||||
// RunAllRetryTasks enqueues all retry tasks from the given queue
|
||||
// and returns the number of tasks enqueued.
|
||||
// If a queue with the given name doesn't exist, it returns QueueNotFoundError.
|
||||
func (r *RDB) RunAllRetryTasks(qname string) (int64, error) {
|
||||
func (r *RDB) RunAllRetryTasks(ctx context.Context, qname string) (int64, error) {
|
||||
var op errors.Op = "rdb.RunAllRetryTasks"
|
||||
n, err := r.runAll(base.RetryKey(qname), qname)
|
||||
n, err := r.runAll(ctx, base.RetryKey(qname), qname)
|
||||
if errors.IsQueueNotFound(err) {
|
||||
return 0, errors.E(op, errors.NotFound, err)
|
||||
}
|
||||
@ -598,9 +599,9 @@ func (r *RDB) RunAllRetryTasks(qname string) (int64, error) {
|
||||
// RunAllArchivedTasks enqueues all archived tasks from the given queue
|
||||
// and returns the number of tasks enqueued.
|
||||
// If a queue with the given name doesn't exist, it returns QueueNotFoundError.
|
||||
func (r *RDB) RunAllArchivedTasks(qname string) (int64, error) {
|
||||
func (r *RDB) RunAllArchivedTasks(ctx context.Context, qname string) (int64, error) {
|
||||
var op errors.Op = "rdb.RunAllArchivedTasks"
|
||||
n, err := r.runAll(base.ArchivedKey(qname), qname)
|
||||
n, err := r.runAll(ctx, base.ArchivedKey(qname), qname)
|
||||
if errors.IsQueueNotFound(err) {
|
||||
return 0, errors.E(op, errors.NotFound, err)
|
||||
}
|
||||
@ -651,9 +652,9 @@ return 1
|
||||
// If a queue with the given name doesn't exist, it returns QueueNotFoundError.
|
||||
// If a task with the given id doesn't exist in the queue, it returns TaskNotFoundError
|
||||
// If a task is in active or pending state it returns non-nil error with Code FailedPrecondition.
|
||||
func (r *RDB) RunTask(qname string, id uuid.UUID) error {
|
||||
func (r *RDB) RunTask(ctx context.Context, qname string, id uuid.UUID) error {
|
||||
var op errors.Op = "rdb.RunTask"
|
||||
if err := r.checkQueueExists(qname); err != nil {
|
||||
if err := r.checkQueueExists(ctx, qname); err != nil {
|
||||
return errors.E(op, errors.CanonicalCode(err), err)
|
||||
}
|
||||
keys := []string{
|
||||
@ -664,7 +665,7 @@ func (r *RDB) RunTask(qname string, id uuid.UUID) error {
|
||||
id.String(),
|
||||
base.QueueKeyPrefix(qname),
|
||||
}
|
||||
res, err := runTaskCmd.Run(r.client, keys, argv...).Result()
|
||||
res, err := runTaskCmd.Run(ctx, r.client, keys, argv...).Result()
|
||||
if err != nil {
|
||||
return errors.E(op, errors.Unknown, err)
|
||||
}
|
||||
@ -706,8 +707,8 @@ end
|
||||
redis.call("DEL", KEYS[1])
|
||||
return table.getn(ids)`)
|
||||
|
||||
func (r *RDB) runAll(zset, qname string) (int64, error) {
|
||||
if err := r.checkQueueExists(qname); err != nil {
|
||||
func (r *RDB) runAll(ctx context.Context, zset, qname string) (int64, error) {
|
||||
if err := r.checkQueueExists(ctx, qname); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
keys := []string{
|
||||
@ -717,7 +718,7 @@ func (r *RDB) runAll(zset, qname string) (int64, error) {
|
||||
argv := []interface{}{
|
||||
base.TaskKeyPrefix(qname),
|
||||
}
|
||||
res, err := runAllCmd.Run(r.client, keys, argv...).Result()
|
||||
res, err := runAllCmd.Run(ctx, r.client, keys, argv...).Result()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@ -734,9 +735,9 @@ func (r *RDB) runAll(zset, qname string) (int64, error) {
|
||||
// ArchiveAllRetryTasks archives all retry tasks from the given queue and
|
||||
// returns the number of tasks that were moved.
|
||||
// If a queue with the given name doesn't exist, it returns QueueNotFoundError.
|
||||
func (r *RDB) ArchiveAllRetryTasks(qname string) (int64, error) {
|
||||
func (r *RDB) ArchiveAllRetryTasks(ctx context.Context, qname string) (int64, error) {
|
||||
var op errors.Op = "rdb.ArchiveAllRetryTasks"
|
||||
n, err := r.archiveAll(base.RetryKey(qname), base.ArchivedKey(qname), qname)
|
||||
n, err := r.archiveAll(ctx, base.RetryKey(qname), base.ArchivedKey(qname), qname)
|
||||
if errors.IsQueueNotFound(err) {
|
||||
return 0, errors.E(op, errors.NotFound, err)
|
||||
}
|
||||
@ -749,9 +750,9 @@ func (r *RDB) ArchiveAllRetryTasks(qname string) (int64, error) {
|
||||
// ArchiveAllScheduledTasks archives all scheduled tasks from the given queue and
|
||||
// returns the number of tasks that were moved.
|
||||
// If a queue with the given name doesn't exist, it returns QueueNotFoundError.
|
||||
func (r *RDB) ArchiveAllScheduledTasks(qname string) (int64, error) {
|
||||
func (r *RDB) ArchiveAllScheduledTasks(ctx context.Context, qname string) (int64, error) {
|
||||
var op errors.Op = "rdb.ArchiveAllScheduledTasks"
|
||||
n, err := r.archiveAll(base.ScheduledKey(qname), base.ArchivedKey(qname), qname)
|
||||
n, err := r.archiveAll(ctx, base.ScheduledKey(qname), base.ArchivedKey(qname), qname)
|
||||
if errors.IsQueueNotFound(err) {
|
||||
return 0, errors.E(op, errors.NotFound, err)
|
||||
}
|
||||
@ -789,9 +790,9 @@ return table.getn(ids)`)
|
||||
// ArchiveAllPendingTasks archives all pending tasks from the given queue and
|
||||
// returns the number of tasks moved.
|
||||
// If a queue with the given name doesn't exist, it returns QueueNotFoundError.
|
||||
func (r *RDB) ArchiveAllPendingTasks(qname string) (int64, error) {
|
||||
func (r *RDB) ArchiveAllPendingTasks(ctx context.Context, qname string) (int64, error) {
|
||||
var op errors.Op = "rdb.ArchiveAllPendingTasks"
|
||||
if err := r.checkQueueExists(qname); err != nil {
|
||||
if err := r.checkQueueExists(ctx, qname); err != nil {
|
||||
return 0, errors.E(op, errors.CanonicalCode(err), err)
|
||||
}
|
||||
keys := []string{
|
||||
@ -805,7 +806,7 @@ func (r *RDB) ArchiveAllPendingTasks(qname string) (int64, error) {
|
||||
maxArchiveSize,
|
||||
base.TaskKeyPrefix(qname),
|
||||
}
|
||||
res, err := archiveAllPendingCmd.Run(r.client, keys, argv...).Result()
|
||||
res, err := archiveAllPendingCmd.Run(ctx, r.client, keys, argv...).Result()
|
||||
if err != nil {
|
||||
return 0, errors.E(op, errors.Internal, err)
|
||||
}
|
||||
@ -869,9 +870,9 @@ return 1
|
||||
// If a task with the given id doesn't exist in the queue, it returns TaskNotFoundError
|
||||
// If a task is already archived, it returns TaskAlreadyArchivedError.
|
||||
// If a task is in active state it returns non-nil error with FailedPrecondition code.
|
||||
func (r *RDB) ArchiveTask(qname string, id uuid.UUID) error {
|
||||
func (r *RDB) ArchiveTask(ctx context.Context, qname string, id uuid.UUID) error {
|
||||
var op errors.Op = "rdb.ArchiveTask"
|
||||
if err := r.checkQueueExists(qname); err != nil {
|
||||
if err := r.checkQueueExists(ctx, qname); err != nil {
|
||||
return errors.E(op, errors.CanonicalCode(err), err)
|
||||
}
|
||||
keys := []string{
|
||||
@ -886,7 +887,7 @@ func (r *RDB) ArchiveTask(qname string, id uuid.UUID) error {
|
||||
maxArchiveSize,
|
||||
base.QueueKeyPrefix(qname),
|
||||
}
|
||||
res, err := archiveTaskCmd.Run(r.client, keys, argv...).Result()
|
||||
res, err := archiveTaskCmd.Run(ctx, r.client, keys, argv...).Result()
|
||||
if err != nil {
|
||||
return errors.E(op, errors.Unknown, err)
|
||||
}
|
||||
@ -935,8 +936,8 @@ redis.call("ZREMRANGEBYRANK", KEYS[2], 0, -ARGV[3])
|
||||
redis.call("DEL", KEYS[1])
|
||||
return table.getn(ids)`)
|
||||
|
||||
func (r *RDB) archiveAll(src, dst, qname string) (int64, error) {
|
||||
if err := r.checkQueueExists(qname); err != nil {
|
||||
func (r *RDB) archiveAll(ctx context.Context, src, dst, qname string) (int64, error) {
|
||||
if err := r.checkQueueExists(ctx, qname); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
keys := []string{
|
||||
@ -951,7 +952,7 @@ func (r *RDB) archiveAll(src, dst, qname string) (int64, error) {
|
||||
base.TaskKeyPrefix(qname),
|
||||
qname,
|
||||
}
|
||||
res, err := archiveAllCmd.Run(r.client, keys, argv...).Result()
|
||||
res, err := archiveAllCmd.Run(ctx, r.client, keys, argv...).Result()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@ -1006,9 +1007,9 @@ return redis.call("DEL", KEYS[1])
|
||||
// If a queue with the given name doesn't exist, it returns QueueNotFoundError.
|
||||
// If a task with the given id doesn't exist in the queue, it returns TaskNotFoundError
|
||||
// If a task is in active state it returns non-nil error with Code FailedPrecondition.
|
||||
func (r *RDB) DeleteTask(qname string, id uuid.UUID) error {
|
||||
func (r *RDB) DeleteTask(ctx context.Context, qname string, id uuid.UUID) error {
|
||||
var op errors.Op = "rdb.DeleteTask"
|
||||
if err := r.checkQueueExists(qname); err != nil {
|
||||
if err := r.checkQueueExists(ctx, qname); err != nil {
|
||||
return errors.E(op, errors.CanonicalCode(err), err)
|
||||
}
|
||||
keys := []string{
|
||||
@ -1018,7 +1019,7 @@ func (r *RDB) DeleteTask(qname string, id uuid.UUID) error {
|
||||
id.String(),
|
||||
base.QueueKeyPrefix(qname),
|
||||
}
|
||||
res, err := deleteTaskCmd.Run(r.client, keys, argv...).Result()
|
||||
res, err := deleteTaskCmd.Run(ctx, r.client, keys, argv...).Result()
|
||||
if err != nil {
|
||||
return errors.E(op, errors.Unknown, err)
|
||||
}
|
||||
@ -1040,9 +1041,9 @@ func (r *RDB) DeleteTask(qname string, id uuid.UUID) error {
|
||||
|
||||
// DeleteAllArchivedTasks deletes all archived tasks from the given queue
|
||||
// and returns the number of tasks deleted.
|
||||
func (r *RDB) DeleteAllArchivedTasks(qname string) (int64, error) {
|
||||
func (r *RDB) DeleteAllArchivedTasks(ctx context.Context, qname string) (int64, error) {
|
||||
var op errors.Op = "rdb.DeleteAllArchivedTasks"
|
||||
n, err := r.deleteAll(base.ArchivedKey(qname), qname)
|
||||
n, err := r.deleteAll(ctx, base.ArchivedKey(qname), qname)
|
||||
if errors.IsQueueNotFound(err) {
|
||||
return 0, errors.E(op, errors.NotFound, err)
|
||||
}
|
||||
@ -1054,9 +1055,9 @@ func (r *RDB) DeleteAllArchivedTasks(qname string) (int64, error) {
|
||||
|
||||
// DeleteAllRetryTasks deletes all retry tasks from the given queue
|
||||
// and returns the number of tasks deleted.
|
||||
func (r *RDB) DeleteAllRetryTasks(qname string) (int64, error) {
|
||||
func (r *RDB) DeleteAllRetryTasks(ctx context.Context, qname string) (int64, error) {
|
||||
var op errors.Op = "rdb.DeleteAllRetryTasks"
|
||||
n, err := r.deleteAll(base.RetryKey(qname), qname)
|
||||
n, err := r.deleteAll(ctx, base.RetryKey(qname), qname)
|
||||
if errors.IsQueueNotFound(err) {
|
||||
return 0, errors.E(op, errors.NotFound, err)
|
||||
}
|
||||
@ -1068,9 +1069,9 @@ func (r *RDB) DeleteAllRetryTasks(qname string) (int64, error) {
|
||||
|
||||
// DeleteAllScheduledTasks deletes all scheduled tasks from the given queue
|
||||
// and returns the number of tasks deleted.
|
||||
func (r *RDB) DeleteAllScheduledTasks(qname string) (int64, error) {
|
||||
func (r *RDB) DeleteAllScheduledTasks(ctx context.Context, qname string) (int64, error) {
|
||||
var op errors.Op = "rdb.DeleteAllScheduledTasks"
|
||||
n, err := r.deleteAll(base.ScheduledKey(qname), qname)
|
||||
n, err := r.deleteAll(ctx, base.ScheduledKey(qname), qname)
|
||||
if errors.IsQueueNotFound(err) {
|
||||
return 0, errors.E(op, errors.NotFound, err)
|
||||
}
|
||||
@ -1102,15 +1103,15 @@ end
|
||||
redis.call("DEL", KEYS[1])
|
||||
return table.getn(ids)`)
|
||||
|
||||
func (r *RDB) deleteAll(key, qname string) (int64, error) {
|
||||
if err := r.checkQueueExists(qname); err != nil {
|
||||
func (r *RDB) deleteAll(ctx context.Context, key, qname string) (int64, error) {
|
||||
if err := r.checkQueueExists(ctx, qname); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
argv := []interface{}{
|
||||
base.TaskKeyPrefix(qname),
|
||||
qname,
|
||||
}
|
||||
res, err := deleteAllCmd.Run(r.client, []string{key}, argv...).Result()
|
||||
res, err := deleteAllCmd.Run(ctx, r.client, []string{key}, argv...).Result()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@ -1140,9 +1141,9 @@ return table.getn(ids)`)
|
||||
|
||||
// DeleteAllPendingTasks deletes all pending tasks from the given queue
|
||||
// and returns the number of tasks deleted.
|
||||
func (r *RDB) DeleteAllPendingTasks(qname string) (int64, error) {
|
||||
func (r *RDB) DeleteAllPendingTasks(ctx context.Context, qname string) (int64, error) {
|
||||
var op errors.Op = "rdb.DeleteAllPendingTasks"
|
||||
if err := r.checkQueueExists(qname); err != nil {
|
||||
if err := r.checkQueueExists(ctx, qname); err != nil {
|
||||
return 0, errors.E(op, errors.CanonicalCode(err), err)
|
||||
}
|
||||
keys := []string{
|
||||
@ -1151,7 +1152,7 @@ func (r *RDB) DeleteAllPendingTasks(qname string) (int64, error) {
|
||||
argv := []interface{}{
|
||||
base.TaskKeyPrefix(qname),
|
||||
}
|
||||
res, err := deleteAllPendingCmd.Run(r.client, keys, argv...).Result()
|
||||
res, err := deleteAllPendingCmd.Run(ctx, r.client, keys, argv...).Result()
|
||||
if err != nil {
|
||||
return 0, errors.E(op, errors.Unknown, err)
|
||||
}
|
||||
@ -1280,9 +1281,9 @@ return 1`)
|
||||
// as long as no tasks are active for the queue.
|
||||
// If force is set to false, it will only remove the queue if
|
||||
// the queue is empty.
|
||||
func (r *RDB) RemoveQueue(qname string, force bool) error {
|
||||
func (r *RDB) RemoveQueue(ctx context.Context, qname string, force bool) error {
|
||||
var op errors.Op = "rdb.RemoveQueue"
|
||||
exists, err := r.client.SIsMember(base.AllQueues, qname).Result()
|
||||
exists, err := r.client.SIsMember(ctx, base.AllQueues, qname).Result()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -1303,7 +1304,7 @@ func (r *RDB) RemoveQueue(qname string, force bool) error {
|
||||
base.ArchivedKey(qname),
|
||||
base.DeadlinesKey(qname),
|
||||
}
|
||||
res, err := script.Run(r.client, keys, base.TaskKeyPrefix(qname)).Result()
|
||||
res, err := script.Run(ctx, r.client, keys, base.TaskKeyPrefix(qname)).Result()
|
||||
if err != nil {
|
||||
return errors.E(op, errors.Unknown, err)
|
||||
}
|
||||
@ -1313,7 +1314,7 @@ func (r *RDB) RemoveQueue(qname string, force bool) error {
|
||||
}
|
||||
switch n {
|
||||
case 1:
|
||||
if err := r.client.SRem(base.AllQueues, qname).Err(); err != nil {
|
||||
if err := r.client.SRem(ctx, base.AllQueues, qname).Err(); err != nil {
|
||||
return errors.E(op, errors.Unknown, err)
|
||||
}
|
||||
return nil
|
||||
@ -1334,9 +1335,9 @@ redis.call("ZREMRANGEBYSCORE", KEYS[1], "-inf", now-1)
|
||||
return keys`)
|
||||
|
||||
// ListServers returns the list of server info.
|
||||
func (r *RDB) ListServers() ([]*base.ServerInfo, error) {
|
||||
func (r *RDB) ListServers(ctx context.Context) ([]*base.ServerInfo, error) {
|
||||
now := time.Now()
|
||||
res, err := listServerKeysCmd.Run(r.client, []string{base.AllServers}, now.Unix()).Result()
|
||||
res, err := listServerKeysCmd.Run(ctx, r.client, []string{base.AllServers}, now.Unix()).Result()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -1346,7 +1347,7 @@ func (r *RDB) ListServers() ([]*base.ServerInfo, error) {
|
||||
}
|
||||
var servers []*base.ServerInfo
|
||||
for _, key := range keys {
|
||||
data, err := r.client.Get(key).Result()
|
||||
data, err := r.client.Get(ctx, key).Result()
|
||||
if err != nil {
|
||||
continue // skip bad data
|
||||
}
|
||||
@ -1367,10 +1368,10 @@ redis.call("ZREMRANGEBYSCORE", KEYS[1], "-inf", now-1)
|
||||
return keys`)
|
||||
|
||||
// ListWorkers returns the list of worker stats.
|
||||
func (r *RDB) ListWorkers() ([]*base.WorkerInfo, error) {
|
||||
func (r *RDB) ListWorkers(ctx context.Context) ([]*base.WorkerInfo, error) {
|
||||
var op errors.Op = "rdb.ListWorkers"
|
||||
now := time.Now()
|
||||
res, err := listWorkersCmd.Run(r.client, []string{base.AllWorkers}, now.Unix()).Result()
|
||||
res, err := listWorkersCmd.Run(ctx, r.client, []string{base.AllWorkers}, now.Unix()).Result()
|
||||
if err != nil {
|
||||
return nil, errors.E(op, errors.Unknown, err)
|
||||
}
|
||||
@ -1380,7 +1381,7 @@ func (r *RDB) ListWorkers() ([]*base.WorkerInfo, error) {
|
||||
}
|
||||
var workers []*base.WorkerInfo
|
||||
for _, key := range keys {
|
||||
data, err := r.client.HVals(key).Result()
|
||||
data, err := r.client.HVals(ctx, key).Result()
|
||||
if err != nil {
|
||||
continue // skip bad data
|
||||
}
|
||||
@ -1403,9 +1404,9 @@ redis.call("ZREMRANGEBYSCORE", KEYS[1], "-inf", now-1)
|
||||
return keys`)
|
||||
|
||||
// ListSchedulerEntries returns the list of scheduler entries.
|
||||
func (r *RDB) ListSchedulerEntries() ([]*base.SchedulerEntry, error) {
|
||||
func (r *RDB) ListSchedulerEntries(ctx context.Context) ([]*base.SchedulerEntry, error) {
|
||||
now := time.Now()
|
||||
res, err := listSchedulerKeysCmd.Run(r.client, []string{base.AllSchedulers}, now.Unix()).Result()
|
||||
res, err := listSchedulerKeysCmd.Run(ctx, r.client, []string{base.AllSchedulers}, now.Unix()).Result()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -1415,7 +1416,7 @@ func (r *RDB) ListSchedulerEntries() ([]*base.SchedulerEntry, error) {
|
||||
}
|
||||
var entries []*base.SchedulerEntry
|
||||
for _, key := range keys {
|
||||
data, err := r.client.LRange(key, 0, -1).Result()
|
||||
data, err := r.client.LRange(ctx, key, 0, -1).Result()
|
||||
if err != nil {
|
||||
continue // skip bad data
|
||||
}
|
||||
@ -1431,9 +1432,9 @@ func (r *RDB) ListSchedulerEntries() ([]*base.SchedulerEntry, error) {
|
||||
}
|
||||
|
||||
// ListSchedulerEnqueueEvents returns the list of scheduler enqueue events.
|
||||
func (r *RDB) ListSchedulerEnqueueEvents(entryID string, pgn Pagination) ([]*base.SchedulerEnqueueEvent, error) {
|
||||
func (r *RDB) ListSchedulerEnqueueEvents(ctx context.Context, entryID string, pgn Pagination) ([]*base.SchedulerEnqueueEvent, error) {
|
||||
key := base.SchedulerHistoryKey(entryID)
|
||||
zs, err := r.client.ZRevRangeWithScores(key, pgn.start(), pgn.stop()).Result()
|
||||
zs, err := r.client.ZRevRangeWithScores(ctx, key, pgn.start(), pgn.stop()).Result()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -1453,9 +1454,9 @@ func (r *RDB) ListSchedulerEnqueueEvents(entryID string, pgn Pagination) ([]*bas
|
||||
}
|
||||
|
||||
// Pause pauses processing of tasks from the given queue.
|
||||
func (r *RDB) Pause(qname string) error {
|
||||
func (r *RDB) Pause(ctx context.Context, qname string) error {
|
||||
key := base.PausedKey(qname)
|
||||
ok, err := r.client.SetNX(key, time.Now().Unix(), 0).Result()
|
||||
ok, err := r.client.SetNX(ctx, key, time.Now().Unix(), 0).Result()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -1466,9 +1467,9 @@ func (r *RDB) Pause(qname string) error {
|
||||
}
|
||||
|
||||
// Unpause resumes processing of tasks from the given queue.
|
||||
func (r *RDB) Unpause(qname string) error {
|
||||
func (r *RDB) Unpause(ctx context.Context, qname string) error {
|
||||
key := base.PausedKey(qname)
|
||||
deleted, err := r.client.Del(key).Result()
|
||||
deleted, err := r.client.Del(ctx, key).Result()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -1479,18 +1480,18 @@ func (r *RDB) Unpause(qname string) error {
|
||||
}
|
||||
|
||||
// ClusterKeySlot returns an integer identifying the hash slot the given queue hashes to.
|
||||
func (r *RDB) ClusterKeySlot(qname string) (int64, error) {
|
||||
func (r *RDB) ClusterKeySlot(ctx context.Context, qname string) (int64, error) {
|
||||
key := base.PendingKey(qname)
|
||||
return r.client.ClusterKeySlot(key).Result()
|
||||
return r.client.ClusterKeySlot(ctx, key).Result()
|
||||
}
|
||||
|
||||
// ClusterNodes returns a list of nodes the given queue belongs to.
|
||||
func (r *RDB) ClusterNodes(qname string) ([]redis.ClusterNode, error) {
|
||||
keyslot, err := r.ClusterKeySlot(qname)
|
||||
func (r *RDB) ClusterNodes(ctx context.Context, qname string) ([]redis.ClusterNode, error) {
|
||||
keyslot, err := r.ClusterKeySlot(ctx, qname)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
clusterSlots, err := r.client.ClusterSlots().Result()
|
||||
clusterSlots, err := r.client.ClusterSlots(ctx).Result()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -34,11 +34,11 @@ func TestAllQueues(t *testing.T) {
|
||||
for _, tc := range tests {
|
||||
h.FlushDB(t, r.client)
|
||||
for _, qname := range tc.queues {
|
||||
if err := r.client.SAdd(base.AllQueues, qname).Err(); err != nil {
|
||||
if err := r.client.SAdd(ctx, base.AllQueues, qname).Err(); err != nil {
|
||||
t.Fatalf("could not initialize all queue set: %v", err)
|
||||
}
|
||||
}
|
||||
got, err := r.AllQueues()
|
||||
got, err := r.AllQueues(ctx)
|
||||
if err != nil {
|
||||
t.Errorf("AllQueues() returned an error: %v", err)
|
||||
continue
|
||||
@ -187,7 +187,7 @@ func TestCurrentStats(t *testing.T) {
|
||||
for _, tc := range tests {
|
||||
h.FlushDB(t, r.client) // clean up db before each test case
|
||||
for _, qname := range tc.paused {
|
||||
if err := r.Pause(qname); err != nil {
|
||||
if err := r.Pause(ctx, qname); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
@ -198,14 +198,14 @@ func TestCurrentStats(t *testing.T) {
|
||||
h.SeedAllArchivedQueues(t, r.client, tc.archived)
|
||||
for qname, n := range tc.processed {
|
||||
processedKey := base.ProcessedKey(qname, now)
|
||||
r.client.Set(processedKey, n, 0)
|
||||
r.client.Set(ctx, processedKey, n, 0)
|
||||
}
|
||||
for qname, n := range tc.failed {
|
||||
failedKey := base.FailedKey(qname, now)
|
||||
r.client.Set(failedKey, n, 0)
|
||||
r.client.Set(ctx, failedKey, n, 0)
|
||||
}
|
||||
|
||||
got, err := r.CurrentStats(tc.qname)
|
||||
got, err := r.CurrentStats(ctx, tc.qname)
|
||||
if err != nil {
|
||||
t.Errorf("r.CurrentStats(%q) = %v, %v, want %v, nil", tc.qname, got, err, tc.want)
|
||||
continue
|
||||
@ -224,7 +224,7 @@ func TestCurrentStatsWithNonExistentQueue(t *testing.T) {
|
||||
defer r.Close()
|
||||
|
||||
qname := "non-existent"
|
||||
got, err := r.CurrentStats(qname)
|
||||
got, err := r.CurrentStats(ctx, qname)
|
||||
if !errors.IsQueueNotFound(err) {
|
||||
t.Fatalf("r.CurrentStats(%q) = %v, %v, want nil, %v", qname, got, err, &errors.QueueNotFoundError{Queue: qname})
|
||||
}
|
||||
@ -247,17 +247,17 @@ func TestHistoricalStats(t *testing.T) {
|
||||
for _, tc := range tests {
|
||||
h.FlushDB(t, r.client)
|
||||
|
||||
r.client.SAdd(base.AllQueues, tc.qname)
|
||||
r.client.SAdd(ctx, base.AllQueues, tc.qname)
|
||||
// populate last n days data
|
||||
for i := 0; i < tc.n; i++ {
|
||||
ts := now.Add(-time.Duration(i) * 24 * time.Hour)
|
||||
processedKey := base.ProcessedKey(tc.qname, ts)
|
||||
failedKey := base.FailedKey(tc.qname, ts)
|
||||
r.client.Set(processedKey, (i+1)*1000, 0)
|
||||
r.client.Set(failedKey, (i+1)*10, 0)
|
||||
r.client.Set(ctx, processedKey, (i+1)*1000, 0)
|
||||
r.client.Set(ctx, failedKey, (i+1)*10, 0)
|
||||
}
|
||||
|
||||
got, err := r.HistoricalStats(tc.qname, tc.n)
|
||||
got, err := r.HistoricalStats(ctx, tc.qname, tc.n)
|
||||
if err != nil {
|
||||
t.Errorf("RDB.HistoricalStats(%q, %d) returned error: %v", tc.qname, tc.n, err)
|
||||
continue
|
||||
@ -289,7 +289,7 @@ func TestRedisInfo(t *testing.T) {
|
||||
r := setup(t)
|
||||
defer r.Close()
|
||||
|
||||
info, err := r.RedisInfo()
|
||||
info, err := r.RedisInfo(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("RDB.RedisInfo() returned error: %v", err)
|
||||
}
|
||||
@ -413,7 +413,7 @@ func TestGetTaskInfo(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
got, err := r.GetTaskInfo(tc.qname, tc.id)
|
||||
got, err := r.GetTaskInfo(ctx, tc.qname, tc.id)
|
||||
if err != nil {
|
||||
t.Errorf("GetTaskInfo(%q, %v) returned error: %v", tc.qname, tc.id, err)
|
||||
continue
|
||||
@ -493,7 +493,7 @@ func TestGetTaskInfoError(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
info, err := r.GetTaskInfo(tc.qname, tc.id)
|
||||
info, err := r.GetTaskInfo(ctx, tc.qname, tc.id)
|
||||
if info != nil {
|
||||
t.Errorf("GetTaskInfo(%q, %v) returned info: %v", tc.qname, tc.id, info)
|
||||
}
|
||||
@ -555,7 +555,7 @@ func TestListPending(t *testing.T) {
|
||||
h.FlushDB(t, r.client) // clean up db before each test case
|
||||
h.SeedAllPendingQueues(t, r.client, tc.pending)
|
||||
|
||||
got, err := r.ListPending(tc.qname, Pagination{Size: 20, Page: 0})
|
||||
got, err := r.ListPending(ctx, tc.qname, Pagination{Size: 20, Page: 0})
|
||||
op := fmt.Sprintf("r.ListPending(%q, Pagination{Size: 20, Page: 0})", tc.qname)
|
||||
if err != nil {
|
||||
t.Errorf("%s = %v, %v, want %v, nil", op, got, err, tc.want)
|
||||
@ -605,7 +605,7 @@ func TestListPendingPagination(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
got, err := r.ListPending(tc.qname, Pagination{Size: tc.size, Page: tc.page})
|
||||
got, err := r.ListPending(ctx, tc.qname, Pagination{Size: tc.size, Page: tc.page})
|
||||
op := fmt.Sprintf("r.ListPending(%q, Pagination{Size: %d, Page: %d})", tc.qname, tc.size, tc.page)
|
||||
if err != nil {
|
||||
t.Errorf("%s; %s returned error %v", tc.desc, op, err)
|
||||
@ -671,7 +671,7 @@ func TestListActive(t *testing.T) {
|
||||
h.FlushDB(t, r.client) // clean up db before each test case
|
||||
h.SeedAllActiveQueues(t, r.client, tc.inProgress)
|
||||
|
||||
got, err := r.ListActive(tc.qname, Pagination{Size: 20, Page: 0})
|
||||
got, err := r.ListActive(ctx, tc.qname, Pagination{Size: 20, Page: 0})
|
||||
op := fmt.Sprintf("r.ListActive(%q, Pagination{Size: 20, Page: 0})", tc.qname)
|
||||
if err != nil {
|
||||
t.Errorf("%s = %v, %v, want %v, nil", op, got, err, tc.inProgress)
|
||||
@ -711,7 +711,7 @@ func TestListActivePagination(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
got, err := r.ListActive(tc.qname, Pagination{Size: tc.size, Page: tc.page})
|
||||
got, err := r.ListActive(ctx, tc.qname, Pagination{Size: tc.size, Page: tc.page})
|
||||
op := fmt.Sprintf("r.ListActive(%q, Pagination{Size: %d, Page: %d})", tc.qname, tc.size, tc.page)
|
||||
if err != nil {
|
||||
t.Errorf("%s; %s returned error %v", tc.desc, op, err)
|
||||
@ -806,7 +806,7 @@ func TestListScheduled(t *testing.T) {
|
||||
h.FlushDB(t, r.client) // clean up db before each test case
|
||||
h.SeedAllScheduledQueues(t, r.client, tc.scheduled)
|
||||
|
||||
got, err := r.ListScheduled(tc.qname, Pagination{Size: 20, Page: 0})
|
||||
got, err := r.ListScheduled(ctx, tc.qname, Pagination{Size: 20, Page: 0})
|
||||
op := fmt.Sprintf("r.ListScheduled(%q, Pagination{Size: 20, Page: 0})", tc.qname)
|
||||
if err != nil {
|
||||
t.Errorf("%s = %v, %v, want %v, nil", op, got, err, tc.want)
|
||||
@ -825,7 +825,7 @@ func TestListScheduledPagination(t *testing.T) {
|
||||
// create 100 tasks with an increasing number of wait time.
|
||||
for i := 0; i < 100; i++ {
|
||||
msg := h.NewTaskMessage(fmt.Sprintf("task %d", i), nil)
|
||||
if err := r.Schedule(msg, time.Now().Add(time.Duration(i)*time.Second)); err != nil {
|
||||
if err := r.Schedule(ctx, msg, time.Now().Add(time.Duration(i)*time.Second)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
@ -847,7 +847,7 @@ func TestListScheduledPagination(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
got, err := r.ListScheduled(tc.qname, Pagination{Size: tc.size, Page: tc.page})
|
||||
got, err := r.ListScheduled(ctx, tc.qname, Pagination{Size: tc.size, Page: tc.page})
|
||||
op := fmt.Sprintf("r.ListScheduled(%q, Pagination{Size: %d, Page: %d})", tc.qname, tc.size, tc.page)
|
||||
if err != nil {
|
||||
t.Errorf("%s; %s returned error %v", tc.desc, op, err)
|
||||
@ -960,7 +960,7 @@ func TestListRetry(t *testing.T) {
|
||||
h.FlushDB(t, r.client) // clean up db before each test case
|
||||
h.SeedAllRetryQueues(t, r.client, tc.retry)
|
||||
|
||||
got, err := r.ListRetry(tc.qname, Pagination{Size: 20, Page: 0})
|
||||
got, err := r.ListRetry(ctx, tc.qname, Pagination{Size: 20, Page: 0})
|
||||
op := fmt.Sprintf("r.ListRetry(%q, Pagination{Size: 20, Page: 0})", tc.qname)
|
||||
if err != nil {
|
||||
t.Errorf("%s = %v, %v, want %v, nil", op, got, err, tc.want)
|
||||
@ -1004,7 +1004,7 @@ func TestListRetryPagination(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
got, err := r.ListRetry(tc.qname, Pagination{Size: tc.size, Page: tc.page})
|
||||
got, err := r.ListRetry(ctx, tc.qname, Pagination{Size: tc.size, Page: tc.page})
|
||||
op := fmt.Sprintf("r.ListRetry(%q, Pagination{Size: %d, Page: %d})",
|
||||
tc.qname, tc.size, tc.page)
|
||||
if err != nil {
|
||||
@ -1113,7 +1113,7 @@ func TestListArchived(t *testing.T) {
|
||||
h.FlushDB(t, r.client) // clean up db before each test case
|
||||
h.SeedAllArchivedQueues(t, r.client, tc.archived)
|
||||
|
||||
got, err := r.ListArchived(tc.qname, Pagination{Size: 20, Page: 0})
|
||||
got, err := r.ListArchived(ctx, tc.qname, Pagination{Size: 20, Page: 0})
|
||||
op := fmt.Sprintf("r.ListDead(%q, Pagination{Size: 20, Page: 0})", tc.qname)
|
||||
if err != nil {
|
||||
t.Errorf("%s = %v, %v, want %v, nil", op, got, err, tc.want)
|
||||
@ -1154,7 +1154,7 @@ func TestListArchivedPagination(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
got, err := r.ListArchived(tc.qname, Pagination{Size: tc.size, Page: tc.page})
|
||||
got, err := r.ListArchived(ctx, tc.qname, Pagination{Size: tc.size, Page: tc.page})
|
||||
op := fmt.Sprintf("r.ListDead(Pagination{Size: %d, Page: %d})",
|
||||
tc.size, tc.page)
|
||||
if err != nil {
|
||||
@ -1204,19 +1204,19 @@ func TestListTasksError(t *testing.T) {
|
||||
|
||||
for _, tc := range tests {
|
||||
pgn := Pagination{Page: 0, Size: 20}
|
||||
if _, got := r.ListActive(tc.qname, pgn); !tc.match(got) {
|
||||
if _, got := r.ListActive(ctx, tc.qname, pgn); !tc.match(got) {
|
||||
t.Errorf("%s: ListActive returned %v", tc.desc, got)
|
||||
}
|
||||
if _, got := r.ListPending(tc.qname, pgn); !tc.match(got) {
|
||||
if _, got := r.ListPending(ctx, tc.qname, pgn); !tc.match(got) {
|
||||
t.Errorf("%s: ListPending returned %v", tc.desc, got)
|
||||
}
|
||||
if _, got := r.ListScheduled(tc.qname, pgn); !tc.match(got) {
|
||||
if _, got := r.ListScheduled(ctx, tc.qname, pgn); !tc.match(got) {
|
||||
t.Errorf("%s: ListScheduled returned %v", tc.desc, got)
|
||||
}
|
||||
if _, got := r.ListRetry(tc.qname, pgn); !tc.match(got) {
|
||||
if _, got := r.ListRetry(ctx, tc.qname, pgn); !tc.match(got) {
|
||||
t.Errorf("%s: ListRetry returned %v", tc.desc, got)
|
||||
}
|
||||
if _, got := r.ListArchived(tc.qname, pgn); !tc.match(got) {
|
||||
if _, got := r.ListArchived(ctx, tc.qname, pgn); !tc.match(got) {
|
||||
t.Errorf("%s: ListArchived returned %v", tc.desc, got)
|
||||
}
|
||||
}
|
||||
@ -1286,7 +1286,7 @@ func TestRunArchivedTask(t *testing.T) {
|
||||
h.FlushDB(t, r.client) // clean up db before each test case
|
||||
h.SeedAllArchivedQueues(t, r.client, tc.archived)
|
||||
|
||||
if got := r.RunTask(tc.qname, tc.id); got != nil {
|
||||
if got := r.RunTask(ctx, tc.qname, tc.id); got != nil {
|
||||
t.Errorf("r.RunTask(%q, %s) returned error: %v", tc.qname, tc.id, got)
|
||||
continue
|
||||
}
|
||||
@ -1366,7 +1366,7 @@ func TestRunRetryTask(t *testing.T) {
|
||||
h.FlushDB(t, r.client) // clean up db before each test case
|
||||
h.SeedAllRetryQueues(t, r.client, tc.retry) // initialize retry queue
|
||||
|
||||
if got := r.RunTask(tc.qname, tc.id); got != nil {
|
||||
if got := r.RunTask(ctx, tc.qname, tc.id); got != nil {
|
||||
t.Errorf("r.RunTask(%q, %s) returned error: %v", tc.qname, tc.id, got)
|
||||
continue
|
||||
}
|
||||
@ -1446,7 +1446,7 @@ func TestRunScheduledTask(t *testing.T) {
|
||||
h.FlushDB(t, r.client) // clean up db before each test case
|
||||
h.SeedAllScheduledQueues(t, r.client, tc.scheduled)
|
||||
|
||||
if got := r.RunTask(tc.qname, tc.id); got != nil {
|
||||
if got := r.RunTask(ctx, tc.qname, tc.id); got != nil {
|
||||
t.Errorf("r.RunTask(%q, %s) returned error: %v", tc.qname, tc.id, got)
|
||||
continue
|
||||
}
|
||||
@ -1593,7 +1593,7 @@ func TestRunTaskError(t *testing.T) {
|
||||
h.SeedAllPendingQueues(t, r.client, tc.pending)
|
||||
h.SeedAllScheduledQueues(t, r.client, tc.scheduled)
|
||||
|
||||
got := r.RunTask(tc.qname, tc.id)
|
||||
got := r.RunTask(ctx, tc.qname, tc.id)
|
||||
if !tc.match(got) {
|
||||
t.Errorf("%s: unexpected return value %v", tc.desc, got)
|
||||
continue
|
||||
@ -1702,7 +1702,7 @@ func TestRunAllScheduledTasks(t *testing.T) {
|
||||
h.FlushDB(t, r.client) // clean up db before each test case
|
||||
h.SeedAllScheduledQueues(t, r.client, tc.scheduled)
|
||||
|
||||
got, err := r.RunAllScheduledTasks(tc.qname)
|
||||
got, err := r.RunAllScheduledTasks(ctx, tc.qname)
|
||||
if err != nil {
|
||||
t.Errorf("%s; r.RunAllScheduledTasks(%q) = %v, %v; want %v, nil",
|
||||
tc.desc, tc.qname, got, err, tc.want)
|
||||
@ -1808,7 +1808,7 @@ func TestRunAllRetryTasks(t *testing.T) {
|
||||
h.FlushDB(t, r.client) // clean up db before each test case
|
||||
h.SeedAllRetryQueues(t, r.client, tc.retry)
|
||||
|
||||
got, err := r.RunAllRetryTasks(tc.qname)
|
||||
got, err := r.RunAllRetryTasks(ctx, tc.qname)
|
||||
if err != nil {
|
||||
t.Errorf("%s; r.RunAllRetryTasks(%q) = %v, %v; want %v, nil",
|
||||
tc.desc, tc.qname, got, err, tc.want)
|
||||
@ -1914,7 +1914,7 @@ func TestRunAllArchivedTasks(t *testing.T) {
|
||||
h.FlushDB(t, r.client) // clean up db before each test case
|
||||
h.SeedAllArchivedQueues(t, r.client, tc.archived)
|
||||
|
||||
got, err := r.RunAllArchivedTasks(tc.qname)
|
||||
got, err := r.RunAllArchivedTasks(ctx, tc.qname)
|
||||
if err != nil {
|
||||
t.Errorf("%s; r.RunAllDeadTasks(%q) = %v, %v; want %v, nil",
|
||||
tc.desc, tc.qname, got, err, tc.want)
|
||||
@ -1958,13 +1958,13 @@ func TestRunAllTasksError(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
if _, got := r.RunAllScheduledTasks(tc.qname); !tc.match(got) {
|
||||
if _, got := r.RunAllScheduledTasks(ctx, tc.qname); !tc.match(got) {
|
||||
t.Errorf("%s: RunAllScheduledTasks returned %v", tc.desc, got)
|
||||
}
|
||||
if _, got := r.RunAllRetryTasks(tc.qname); !tc.match(got) {
|
||||
if _, got := r.RunAllRetryTasks(ctx, tc.qname); !tc.match(got) {
|
||||
t.Errorf("%s: RunAllRetryTasks returned %v", tc.desc, got)
|
||||
}
|
||||
if _, got := r.RunAllArchivedTasks(tc.qname); !tc.match(got) {
|
||||
if _, got := r.RunAllArchivedTasks(ctx, tc.qname); !tc.match(got) {
|
||||
t.Errorf("%s: RunAllArchivedTasks returned %v", tc.desc, got)
|
||||
}
|
||||
}
|
||||
@ -2047,7 +2047,7 @@ func TestArchiveRetryTask(t *testing.T) {
|
||||
h.SeedAllRetryQueues(t, r.client, tc.retry)
|
||||
h.SeedAllArchivedQueues(t, r.client, tc.archived)
|
||||
|
||||
if got := r.ArchiveTask(tc.qname, tc.id); got != nil {
|
||||
if got := r.ArchiveTask(ctx, tc.qname, tc.id); got != nil {
|
||||
t.Errorf("(*RDB).ArchiveTask(%q, %v) returned error: %v",
|
||||
tc.qname, tc.id, got)
|
||||
continue
|
||||
@ -2148,7 +2148,7 @@ func TestArchiveScheduledTask(t *testing.T) {
|
||||
h.SeedAllScheduledQueues(t, r.client, tc.scheduled)
|
||||
h.SeedAllArchivedQueues(t, r.client, tc.archived)
|
||||
|
||||
if got := r.ArchiveTask(tc.qname, tc.id); got != nil {
|
||||
if got := r.ArchiveTask(ctx, tc.qname, tc.id); got != nil {
|
||||
t.Errorf("(*RDB).ArchiveTask(%q, %v) returned error: %v",
|
||||
tc.qname, tc.id, got)
|
||||
continue
|
||||
@ -2231,7 +2231,7 @@ func TestArchivePendingTask(t *testing.T) {
|
||||
h.SeedAllPendingQueues(t, r.client, tc.pending)
|
||||
h.SeedAllArchivedQueues(t, r.client, tc.archived)
|
||||
|
||||
if got := r.ArchiveTask(tc.qname, tc.id); got != nil {
|
||||
if got := r.ArchiveTask(ctx, tc.qname, tc.id); got != nil {
|
||||
t.Errorf("(*RDB).ArchiveTask(%q, %v) returned error: %v",
|
||||
tc.qname, tc.id, got)
|
||||
continue
|
||||
@ -2379,7 +2379,7 @@ func TestArchiveTaskError(t *testing.T) {
|
||||
h.SeedAllScheduledQueues(t, r.client, tc.scheduled)
|
||||
h.SeedAllArchivedQueues(t, r.client, tc.archived)
|
||||
|
||||
got := r.ArchiveTask(tc.qname, tc.id)
|
||||
got := r.ArchiveTask(ctx, tc.qname, tc.id)
|
||||
if !tc.match(got) {
|
||||
t.Errorf("%s: returned error didn't match: got=%v", tc.desc, got)
|
||||
continue
|
||||
@ -2518,7 +2518,7 @@ func TestArchiveAllPendingTasks(t *testing.T) {
|
||||
h.SeedAllPendingQueues(t, r.client, tc.pending)
|
||||
h.SeedAllArchivedQueues(t, r.client, tc.archived)
|
||||
|
||||
got, err := r.ArchiveAllPendingTasks(tc.qname)
|
||||
got, err := r.ArchiveAllPendingTasks(ctx, tc.qname)
|
||||
if got != tc.want || err != nil {
|
||||
t.Errorf("(*RDB).KillAllRetryTasks(%q) = %v, %v; want %v, nil",
|
||||
tc.qname, got, err, tc.want)
|
||||
@ -2664,7 +2664,7 @@ func TestArchiveAllRetryTasks(t *testing.T) {
|
||||
h.SeedAllRetryQueues(t, r.client, tc.retry)
|
||||
h.SeedAllArchivedQueues(t, r.client, tc.archived)
|
||||
|
||||
got, err := r.ArchiveAllRetryTasks(tc.qname)
|
||||
got, err := r.ArchiveAllRetryTasks(ctx, tc.qname)
|
||||
if got != tc.want || err != nil {
|
||||
t.Errorf("(*RDB).KillAllRetryTasks(%q) = %v, %v; want %v, nil",
|
||||
tc.qname, got, err, tc.want)
|
||||
@ -2811,7 +2811,7 @@ func TestArchiveAllScheduledTasks(t *testing.T) {
|
||||
h.SeedAllScheduledQueues(t, r.client, tc.scheduled)
|
||||
h.SeedAllArchivedQueues(t, r.client, tc.archived)
|
||||
|
||||
got, err := r.ArchiveAllScheduledTasks(tc.qname)
|
||||
got, err := r.ArchiveAllScheduledTasks(ctx, tc.qname)
|
||||
if got != tc.want || err != nil {
|
||||
t.Errorf("(*RDB).KillAllScheduledTasks(%q) = %v, %v; want %v, nil",
|
||||
tc.qname, got, err, tc.want)
|
||||
@ -2853,13 +2853,13 @@ func TestArchiveAllTasksError(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
if _, got := r.ArchiveAllPendingTasks(tc.qname); !tc.match(got) {
|
||||
if _, got := r.ArchiveAllPendingTasks(ctx, tc.qname); !tc.match(got) {
|
||||
t.Errorf("%s: ArchiveAllPendingTasks returned %v", tc.desc, got)
|
||||
}
|
||||
if _, got := r.ArchiveAllScheduledTasks(tc.qname); !tc.match(got) {
|
||||
if _, got := r.ArchiveAllScheduledTasks(ctx, tc.qname); !tc.match(got) {
|
||||
t.Errorf("%s: ArchiveAllScheduledTasks returned %v", tc.desc, got)
|
||||
}
|
||||
if _, got := r.ArchiveAllRetryTasks(tc.qname); !tc.match(got) {
|
||||
if _, got := r.ArchiveAllRetryTasks(ctx, tc.qname); !tc.match(got) {
|
||||
t.Errorf("%s: ArchiveAllRetryTasks returned %v", tc.desc, got)
|
||||
}
|
||||
}
|
||||
@ -2917,7 +2917,7 @@ func TestDeleteArchivedTask(t *testing.T) {
|
||||
h.FlushDB(t, r.client) // clean up db before each test case
|
||||
h.SeedAllArchivedQueues(t, r.client, tc.archived)
|
||||
|
||||
if got := r.DeleteTask(tc.qname, tc.id); got != nil {
|
||||
if got := r.DeleteTask(ctx, tc.qname, tc.id); got != nil {
|
||||
t.Errorf("r.DeleteTask(%q, %v) returned error: %v", tc.qname, tc.id, got)
|
||||
continue
|
||||
}
|
||||
@ -2983,7 +2983,7 @@ func TestDeleteRetryTask(t *testing.T) {
|
||||
h.FlushDB(t, r.client) // clean up db before each test case
|
||||
h.SeedAllRetryQueues(t, r.client, tc.retry)
|
||||
|
||||
if got := r.DeleteTask(tc.qname, tc.id); got != nil {
|
||||
if got := r.DeleteTask(ctx, tc.qname, tc.id); got != nil {
|
||||
t.Errorf("r.DeleteTask(%q, %v) returned error: %v", tc.qname, tc.id, got)
|
||||
continue
|
||||
}
|
||||
@ -3049,7 +3049,7 @@ func TestDeleteScheduledTask(t *testing.T) {
|
||||
h.FlushDB(t, r.client) // clean up db before each test case
|
||||
h.SeedAllScheduledQueues(t, r.client, tc.scheduled)
|
||||
|
||||
if got := r.DeleteTask(tc.qname, tc.id); got != nil {
|
||||
if got := r.DeleteTask(ctx, tc.qname, tc.id); got != nil {
|
||||
t.Errorf("r.DeleteTask(%q, %v) returned error: %v", tc.qname, tc.id, got)
|
||||
continue
|
||||
}
|
||||
@ -3104,7 +3104,7 @@ func TestDeletePendingTask(t *testing.T) {
|
||||
h.FlushDB(t, r.client)
|
||||
h.SeedAllPendingQueues(t, r.client, tc.pending)
|
||||
|
||||
if got := r.DeleteTask(tc.qname, tc.id); got != nil {
|
||||
if got := r.DeleteTask(ctx, tc.qname, tc.id); got != nil {
|
||||
t.Errorf("r.DeleteTask(%q, %v) returned error: %v", tc.qname, tc.id, got)
|
||||
continue
|
||||
}
|
||||
@ -3156,7 +3156,7 @@ func TestDeleteTaskWithUniqueLock(t *testing.T) {
|
||||
h.FlushDB(t, r.client) // clean up db before each test case
|
||||
h.SeedAllScheduledQueues(t, r.client, tc.scheduled)
|
||||
|
||||
if got := r.DeleteTask(tc.qname, tc.id); got != nil {
|
||||
if got := r.DeleteTask(ctx, tc.qname, tc.id); got != nil {
|
||||
t.Errorf("r.DeleteTask(%q, %v) returned error: %v", tc.qname, tc.id, got)
|
||||
continue
|
||||
}
|
||||
@ -3168,7 +3168,7 @@ func TestDeleteTaskWithUniqueLock(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
if r.client.Exists(tc.uniqueKey).Val() != 0 {
|
||||
if r.client.Exists(ctx, tc.uniqueKey).Val() != 0 {
|
||||
t.Errorf("Uniqueness lock %q still exists", tc.uniqueKey)
|
||||
}
|
||||
}
|
||||
@ -3251,7 +3251,7 @@ func TestDeleteTaskError(t *testing.T) {
|
||||
h.SeedAllActiveQueues(t, r.client, tc.active)
|
||||
h.SeedAllScheduledQueues(t, r.client, tc.scheduled)
|
||||
|
||||
got := r.DeleteTask(tc.qname, tc.id)
|
||||
got := r.DeleteTask(ctx, tc.qname, tc.id)
|
||||
if !tc.match(got) {
|
||||
t.Errorf("%s: r.DeleteTask(qname, id) returned %v", tc.desc, got)
|
||||
continue
|
||||
@ -3319,7 +3319,7 @@ func TestDeleteAllArchivedTasks(t *testing.T) {
|
||||
h.FlushDB(t, r.client) // clean up db before each test case
|
||||
h.SeedAllArchivedQueues(t, r.client, tc.archived)
|
||||
|
||||
got, err := r.DeleteAllArchivedTasks(tc.qname)
|
||||
got, err := r.DeleteAllArchivedTasks(ctx, tc.qname)
|
||||
if err != nil {
|
||||
t.Errorf("r.DeleteAllDeadTasks(%q) returned error: %v", tc.qname, err)
|
||||
}
|
||||
@ -3386,7 +3386,7 @@ func TestDeleteAllArchivedTasksWithUniqueKey(t *testing.T) {
|
||||
h.FlushDB(t, r.client) // clean up db before each test case
|
||||
h.SeedAllArchivedQueues(t, r.client, tc.archived)
|
||||
|
||||
got, err := r.DeleteAllArchivedTasks(tc.qname)
|
||||
got, err := r.DeleteAllArchivedTasks(ctx, tc.qname)
|
||||
if err != nil {
|
||||
t.Errorf("r.DeleteAllDeadTasks(%q) returned error: %v", tc.qname, err)
|
||||
}
|
||||
@ -3401,7 +3401,7 @@ func TestDeleteAllArchivedTasksWithUniqueKey(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, uniqueKey := range tc.uniqueKeys {
|
||||
if r.client.Exists(uniqueKey).Val() != 0 {
|
||||
if r.client.Exists(ctx, uniqueKey).Val() != 0 {
|
||||
t.Errorf("Uniqueness lock %q still exists", uniqueKey)
|
||||
}
|
||||
}
|
||||
@ -3454,7 +3454,7 @@ func TestDeleteAllRetryTasks(t *testing.T) {
|
||||
h.FlushDB(t, r.client) // clean up db before each test case
|
||||
h.SeedAllRetryQueues(t, r.client, tc.retry)
|
||||
|
||||
got, err := r.DeleteAllRetryTasks(tc.qname)
|
||||
got, err := r.DeleteAllRetryTasks(ctx, tc.qname)
|
||||
if err != nil {
|
||||
t.Errorf("r.DeleteAllRetryTasks(%q) returned error: %v", tc.qname, err)
|
||||
}
|
||||
@ -3516,7 +3516,7 @@ func TestDeleteAllScheduledTasks(t *testing.T) {
|
||||
h.FlushDB(t, r.client) // clean up db before each test case
|
||||
h.SeedAllScheduledQueues(t, r.client, tc.scheduled)
|
||||
|
||||
got, err := r.DeleteAllScheduledTasks(tc.qname)
|
||||
got, err := r.DeleteAllScheduledTasks(ctx, tc.qname)
|
||||
if err != nil {
|
||||
t.Errorf("r.DeleteAllScheduledTasks(%q) returned error: %v", tc.qname, err)
|
||||
}
|
||||
@ -3573,7 +3573,7 @@ func TestDeleteAllPendingTasks(t *testing.T) {
|
||||
h.FlushDB(t, r.client) // clean up db before each test case
|
||||
h.SeedAllPendingQueues(t, r.client, tc.pending)
|
||||
|
||||
got, err := r.DeleteAllPendingTasks(tc.qname)
|
||||
got, err := r.DeleteAllPendingTasks(ctx, tc.qname)
|
||||
if err != nil {
|
||||
t.Errorf("r.DeleteAllPendingTasks(%q) returned error: %v", tc.qname, err)
|
||||
}
|
||||
@ -3606,16 +3606,16 @@ func TestDeleteAllTasksError(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
if _, got := r.DeleteAllPendingTasks(tc.qname); !tc.match(got) {
|
||||
if _, got := r.DeleteAllPendingTasks(ctx, tc.qname); !tc.match(got) {
|
||||
t.Errorf("%s: DeleteAllPendingTasks returned %v", tc.desc, got)
|
||||
}
|
||||
if _, got := r.DeleteAllScheduledTasks(tc.qname); !tc.match(got) {
|
||||
if _, got := r.DeleteAllScheduledTasks(ctx, tc.qname); !tc.match(got) {
|
||||
t.Errorf("%s: DeleteAllScheduledTasks returned %v", tc.desc, got)
|
||||
}
|
||||
if _, got := r.DeleteAllRetryTasks(tc.qname); !tc.match(got) {
|
||||
if _, got := r.DeleteAllRetryTasks(ctx, tc.qname); !tc.match(got) {
|
||||
t.Errorf("%s: DeleteAllRetryTasks returned %v", tc.desc, got)
|
||||
}
|
||||
if _, got := r.DeleteAllArchivedTasks(tc.qname); !tc.match(got) {
|
||||
if _, got := r.DeleteAllArchivedTasks(ctx, tc.qname); !tc.match(got) {
|
||||
t.Errorf("%s: DeleteAllArchivedTasks returned %v", tc.desc, got)
|
||||
}
|
||||
}
|
||||
@ -3696,13 +3696,13 @@ func TestRemoveQueue(t *testing.T) {
|
||||
h.SeedAllRetryQueues(t, r.client, tc.retry)
|
||||
h.SeedAllArchivedQueues(t, r.client, tc.archived)
|
||||
|
||||
err := r.RemoveQueue(tc.qname, tc.force)
|
||||
err := r.RemoveQueue(ctx, tc.qname, tc.force)
|
||||
if err != nil {
|
||||
t.Errorf("(*RDB).RemoveQueue(%q, %t) = %v, want nil",
|
||||
tc.qname, tc.force, err)
|
||||
continue
|
||||
}
|
||||
if r.client.SIsMember(base.AllQueues, tc.qname).Val() {
|
||||
if r.client.SIsMember(ctx, base.AllQueues, tc.qname).Val() {
|
||||
t.Errorf("%q is a member of %q", tc.qname, base.AllQueues)
|
||||
}
|
||||
|
||||
@ -3715,12 +3715,12 @@ func TestRemoveQueue(t *testing.T) {
|
||||
base.ArchivedKey(tc.qname),
|
||||
}
|
||||
for _, key := range keys {
|
||||
if r.client.Exists(key).Val() != 0 {
|
||||
if r.client.Exists(ctx, key).Val() != 0 {
|
||||
t.Errorf("key %q still exists", key)
|
||||
}
|
||||
}
|
||||
|
||||
if n := len(r.client.Keys(base.TaskKeyPrefix(tc.qname) + "*").Val()); n != 0 {
|
||||
if n := len(r.client.Keys(ctx, base.TaskKeyPrefix(tc.qname) + "*").Val()); n != 0 {
|
||||
t.Errorf("%d keys still exists for tasks", n)
|
||||
}
|
||||
}
|
||||
@ -3834,7 +3834,7 @@ func TestRemoveQueueError(t *testing.T) {
|
||||
h.SeedAllRetryQueues(t, r.client, tc.retry)
|
||||
h.SeedAllArchivedQueues(t, r.client, tc.archived)
|
||||
|
||||
got := r.RemoveQueue(tc.qname, tc.force)
|
||||
got := r.RemoveQueue(ctx, tc.qname, tc.force)
|
||||
if !tc.match(got) {
|
||||
t.Errorf("%s; returned error didn't match expected value; got=%v", tc.desc, got)
|
||||
continue
|
||||
@ -3920,12 +3920,12 @@ func TestListServers(t *testing.T) {
|
||||
h.FlushDB(t, r.client)
|
||||
|
||||
for _, info := range tc.data {
|
||||
if err := r.WriteServerState(info, []*base.WorkerInfo{}, 5*time.Second); err != nil {
|
||||
if err := r.WriteServerState(ctx, info, []*base.WorkerInfo{}, 5*time.Second); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
got, err := r.ListServers()
|
||||
got, err := r.ListServers(ctx)
|
||||
if err != nil {
|
||||
t.Errorf("r.ListServers returned an error: %v", err)
|
||||
}
|
||||
@ -3995,13 +3995,13 @@ func TestListWorkers(t *testing.T) {
|
||||
for _, tc := range tests {
|
||||
h.FlushDB(t, r.client)
|
||||
|
||||
err := r.WriteServerState(&base.ServerInfo{}, tc.data, time.Minute)
|
||||
err := r.WriteServerState(ctx, &base.ServerInfo{}, tc.data, time.Minute)
|
||||
if err != nil {
|
||||
t.Errorf("could not write server state to redis: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
got, err := r.ListWorkers()
|
||||
got, err := r.ListWorkers(ctx)
|
||||
if err != nil {
|
||||
t.Errorf("(*RDB).ListWorkers() returned an error: %v", err)
|
||||
continue
|
||||
@ -4037,20 +4037,20 @@ func TestWriteListClearSchedulerEntries(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
if err := r.WriteSchedulerEntries(schedulerID, data, 30*time.Second); err != nil {
|
||||
if err := r.WriteSchedulerEntries(ctx, schedulerID, data, 30*time.Second); err != nil {
|
||||
t.Fatalf("WriteSchedulerEnties failed: %v", err)
|
||||
}
|
||||
entries, err := r.ListSchedulerEntries()
|
||||
entries, err := r.ListSchedulerEntries(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("ListSchedulerEntries failed: %v", err)
|
||||
}
|
||||
if diff := cmp.Diff(data, entries, h.SortSchedulerEntryOpt); diff != "" {
|
||||
t.Errorf("ListSchedulerEntries() = %v, want %v; (-want,+got)\n%s", entries, data, diff)
|
||||
}
|
||||
if err := r.ClearSchedulerEntries(schedulerID); err != nil {
|
||||
if err := r.ClearSchedulerEntries(ctx, schedulerID); err != nil {
|
||||
t.Fatalf("ClearSchedulerEntries failed: %v", err)
|
||||
}
|
||||
entries, err = r.ListSchedulerEntries()
|
||||
entries, err = r.ListSchedulerEntries(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("ListSchedulerEntries() after clear failed: %v", err)
|
||||
}
|
||||
@ -4100,12 +4100,12 @@ loop:
|
||||
h.FlushDB(t, r.client)
|
||||
|
||||
for _, e := range tc.events {
|
||||
if err := r.RecordSchedulerEnqueueEvent(tc.entryID, e); err != nil {
|
||||
if err := r.RecordSchedulerEnqueueEvent(ctx, tc.entryID, e); err != nil {
|
||||
t.Errorf("RecordSchedulerEnqueueEvent(%q, %v) failed: %v", tc.entryID, e, err)
|
||||
continue loop
|
||||
}
|
||||
}
|
||||
got, err := r.ListSchedulerEnqueueEvents(tc.entryID, Pagination{Size: 20, Page: 0})
|
||||
got, err := r.ListSchedulerEnqueueEvents(ctx, tc.entryID, Pagination{Size: 20, Page: 0})
|
||||
if err != nil {
|
||||
t.Errorf("ListSchedulerEnqueueEvents(%q) failed: %v", tc.entryID, err)
|
||||
continue
|
||||
@ -4131,13 +4131,13 @@ func TestRecordSchedulerEnqueueEventTrimsDataSet(t *testing.T) {
|
||||
TaskID: fmt.Sprintf("task%d", i),
|
||||
EnqueuedAt: now.Add(-time.Duration(i) * time.Second),
|
||||
}
|
||||
if err := r.RecordSchedulerEnqueueEvent(entryID, &event); err != nil {
|
||||
if err := r.RecordSchedulerEnqueueEvent(ctx, entryID, &event); err != nil {
|
||||
t.Fatalf("RecordSchedulerEnqueueEvent failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure the set is full.
|
||||
if n := r.client.ZCard(key).Val(); n != maxEvents {
|
||||
if n := r.client.ZCard(ctx, key).Val(); n != maxEvents {
|
||||
t.Fatalf("unexpected number of events; got %d, want %d", n, maxEvents)
|
||||
}
|
||||
|
||||
@ -4146,13 +4146,13 @@ func TestRecordSchedulerEnqueueEventTrimsDataSet(t *testing.T) {
|
||||
TaskID: "latest",
|
||||
EnqueuedAt: now,
|
||||
}
|
||||
if err := r.RecordSchedulerEnqueueEvent(entryID, &event); err != nil {
|
||||
if err := r.RecordSchedulerEnqueueEvent(ctx, entryID, &event); err != nil {
|
||||
t.Fatalf("RecordSchedulerEnqueueEvent failed: %v", err)
|
||||
}
|
||||
if n := r.client.ZCard(key).Val(); n != maxEvents {
|
||||
if n := r.client.ZCard(ctx, key).Val(); n != maxEvents {
|
||||
t.Fatalf("unexpected number of events; got %d, want %d", n, maxEvents)
|
||||
}
|
||||
events, err := r.ListSchedulerEnqueueEvents(entryID, Pagination{Size: maxEvents})
|
||||
events, err := r.ListSchedulerEnqueueEvents(ctx, entryID, Pagination{Size: maxEvents})
|
||||
if err != nil {
|
||||
t.Fatalf("ListSchedulerEnqueueEvents failed: %v", err)
|
||||
}
|
||||
@ -4177,12 +4177,12 @@ func TestPause(t *testing.T) {
|
||||
for _, tc := range tests {
|
||||
h.FlushDB(t, r.client)
|
||||
|
||||
err := r.Pause(tc.qname)
|
||||
err := r.Pause(ctx, tc.qname)
|
||||
if err != nil {
|
||||
t.Errorf("Pause(%q) returned error: %v", tc.qname, err)
|
||||
}
|
||||
key := base.PausedKey(tc.qname)
|
||||
if r.client.Exists(key).Val() == 0 {
|
||||
if r.client.Exists(ctx, key).Val() == 0 {
|
||||
t.Errorf("key %q does not exist", key)
|
||||
}
|
||||
}
|
||||
@ -4202,12 +4202,12 @@ func TestPauseError(t *testing.T) {
|
||||
for _, tc := range tests {
|
||||
h.FlushDB(t, r.client)
|
||||
for _, qname := range tc.paused {
|
||||
if err := r.Pause(qname); err != nil {
|
||||
if err := r.Pause(ctx, qname); err != nil {
|
||||
t.Fatalf("could not pause %q: %v", qname, err)
|
||||
}
|
||||
}
|
||||
|
||||
err := r.Pause(tc.qname)
|
||||
err := r.Pause(ctx, tc.qname)
|
||||
if err == nil {
|
||||
t.Errorf("%s; Pause(%q) returned nil: want error", tc.desc, tc.qname)
|
||||
}
|
||||
@ -4227,17 +4227,17 @@ func TestUnpause(t *testing.T) {
|
||||
for _, tc := range tests {
|
||||
h.FlushDB(t, r.client)
|
||||
for _, qname := range tc.paused {
|
||||
if err := r.Pause(qname); err != nil {
|
||||
if err := r.Pause(ctx, qname); err != nil {
|
||||
t.Fatalf("could not pause %q: %v", qname, err)
|
||||
}
|
||||
}
|
||||
|
||||
err := r.Unpause(tc.qname)
|
||||
err := r.Unpause(ctx, tc.qname)
|
||||
if err != nil {
|
||||
t.Errorf("Unpause(%q) returned error: %v", tc.qname, err)
|
||||
}
|
||||
key := base.PausedKey(tc.qname)
|
||||
if r.client.Exists(key).Val() == 1 {
|
||||
if r.client.Exists(ctx, key).Val() == 1 {
|
||||
t.Errorf("key %q exists", key)
|
||||
}
|
||||
}
|
||||
@ -4257,12 +4257,12 @@ func TestUnpauseError(t *testing.T) {
|
||||
for _, tc := range tests {
|
||||
h.FlushDB(t, r.client)
|
||||
for _, qname := range tc.paused {
|
||||
if err := r.Pause(qname); err != nil {
|
||||
if err := r.Pause(ctx, qname); err != nil {
|
||||
t.Fatalf("could not pause %q: %v", qname, err)
|
||||
}
|
||||
}
|
||||
|
||||
err := r.Unpause(tc.qname)
|
||||
err := r.Unpause(ctx, tc.qname)
|
||||
if err == nil {
|
||||
t.Errorf("%s; Unpause(%q) returned nil: want error", tc.desc, tc.qname)
|
||||
}
|
||||
|
@ -6,10 +6,11 @@
|
||||
package rdb
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/go-redis/redis/v7"
|
||||
"github.com/go-redis/redis/v8"
|
||||
"github.com/hibiken/asynq/internal/base"
|
||||
"github.com/hibiken/asynq/internal/errors"
|
||||
"github.com/spf13/cast"
|
||||
@ -38,12 +39,12 @@ func (r *RDB) Client() redis.UniversalClient {
|
||||
}
|
||||
|
||||
// Ping checks the connection with redis server.
|
||||
func (r *RDB) Ping() error {
|
||||
return r.client.Ping().Err()
|
||||
func (r *RDB) Ping(ctx context.Context) error {
|
||||
return r.client.Ping(ctx).Err()
|
||||
}
|
||||
|
||||
func (r *RDB) runScript(op errors.Op, script *redis.Script, keys []string, args ...interface{}) error {
|
||||
if err := script.Run(r.client, keys, args...).Err(); err != nil {
|
||||
func (r *RDB) runScript(ctx context.Context, op errors.Op, script *redis.Script, keys []string, args ...interface{}) error {
|
||||
if err := script.Run(ctx, r.client, keys, args...).Err(); err != nil {
|
||||
return errors.E(op, errors.Internal, fmt.Sprintf("redis eval error: %v", err))
|
||||
}
|
||||
return nil
|
||||
@ -73,13 +74,13 @@ return 1
|
||||
`)
|
||||
|
||||
// Enqueue adds the given task to the pending list of the queue.
|
||||
func (r *RDB) Enqueue(msg *base.TaskMessage) error {
|
||||
func (r *RDB) Enqueue(ctx context.Context, msg *base.TaskMessage) error {
|
||||
var op errors.Op = "rdb.Enqueue"
|
||||
encoded, err := base.EncodeMessage(msg)
|
||||
if err != nil {
|
||||
return errors.E(op, errors.Unknown, fmt.Sprintf("cannot encode message: %v", err))
|
||||
}
|
||||
if err := r.client.SAdd(base.AllQueues, msg.Queue).Err(); err != nil {
|
||||
if err := r.client.SAdd(ctx, base.AllQueues, msg.Queue).Err(); err != nil {
|
||||
return errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "sadd", Err: err})
|
||||
}
|
||||
keys := []string{
|
||||
@ -92,7 +93,7 @@ func (r *RDB) Enqueue(msg *base.TaskMessage) error {
|
||||
msg.Timeout,
|
||||
msg.Deadline,
|
||||
}
|
||||
return r.runScript(op, enqueueCmd, keys, argv...)
|
||||
return r.runScript(ctx, op, enqueueCmd, keys, argv...)
|
||||
}
|
||||
|
||||
// enqueueUniqueCmd enqueues the task message if the task is unique.
|
||||
@ -127,13 +128,13 @@ return 1
|
||||
|
||||
// EnqueueUnique inserts the given task if the task's uniqueness lock can be acquired.
|
||||
// It returns ErrDuplicateTask if the lock cannot be acquired.
|
||||
func (r *RDB) EnqueueUnique(msg *base.TaskMessage, ttl time.Duration) error {
|
||||
func (r *RDB) EnqueueUnique(ctx context.Context, msg *base.TaskMessage, ttl time.Duration) error {
|
||||
var op errors.Op = "rdb.EnqueueUnique"
|
||||
encoded, err := base.EncodeMessage(msg)
|
||||
if err != nil {
|
||||
return errors.E(op, errors.Internal, "cannot encode task message: %v", err)
|
||||
}
|
||||
if err := r.client.SAdd(base.AllQueues, msg.Queue).Err(); err != nil {
|
||||
if err := r.client.SAdd(ctx, base.AllQueues, msg.Queue).Err(); err != nil {
|
||||
return errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "sadd", Err: err})
|
||||
}
|
||||
keys := []string{
|
||||
@ -148,7 +149,7 @@ func (r *RDB) EnqueueUnique(msg *base.TaskMessage, ttl time.Duration) error {
|
||||
msg.Timeout,
|
||||
msg.Deadline,
|
||||
}
|
||||
res, err := enqueueUniqueCmd.Run(r.client, keys, argv...).Result()
|
||||
res, err := enqueueUniqueCmd.Run(ctx, r.client, keys, argv...).Result()
|
||||
if err != nil {
|
||||
return errors.E(op, errors.Unknown, fmt.Sprintf("redis eval error: %v", err))
|
||||
}
|
||||
@ -210,7 +211,7 @@ return nil`)
|
||||
// off a queue if one exists and returns the message and deadline.
|
||||
// Dequeue skips a queue if the queue is paused.
|
||||
// If all queues are empty, ErrNoProcessableTask error is returned.
|
||||
func (r *RDB) Dequeue(qnames ...string) (msg *base.TaskMessage, deadline time.Time, err error) {
|
||||
func (r *RDB) Dequeue(ctx context.Context, qnames ...string) (msg *base.TaskMessage, deadline time.Time, err error) {
|
||||
var op errors.Op = "rdb.Dequeue"
|
||||
for _, qname := range qnames {
|
||||
keys := []string{
|
||||
@ -223,7 +224,7 @@ func (r *RDB) Dequeue(qnames ...string) (msg *base.TaskMessage, deadline time.Ti
|
||||
time.Now().Unix(),
|
||||
base.TaskKeyPrefix(qname),
|
||||
}
|
||||
res, err := dequeueCmd.Run(r.client, keys, argv...).Result()
|
||||
res, err := dequeueCmd.Run(ctx, r.client, keys, argv...).Result()
|
||||
if err == redis.Nil {
|
||||
continue
|
||||
} else if err != nil {
|
||||
@ -304,7 +305,7 @@ return redis.status_reply("OK")
|
||||
|
||||
// Done removes the task from active queue to mark the task as done.
|
||||
// It removes a uniqueness lock acquired by the task, if any.
|
||||
func (r *RDB) Done(msg *base.TaskMessage) error {
|
||||
func (r *RDB) Done(ctx context.Context, msg *base.TaskMessage) error {
|
||||
var op errors.Op = "rdb.Done"
|
||||
now := time.Now()
|
||||
expireAt := now.Add(statsTTL)
|
||||
@ -320,9 +321,9 @@ func (r *RDB) Done(msg *base.TaskMessage) error {
|
||||
}
|
||||
if len(msg.UniqueKey) > 0 {
|
||||
keys = append(keys, msg.UniqueKey)
|
||||
return r.runScript(op, doneUniqueCmd, keys, argv...)
|
||||
return r.runScript(ctx, op, doneUniqueCmd, keys, argv...)
|
||||
}
|
||||
return r.runScript(op, doneCmd, keys, argv...)
|
||||
return r.runScript(ctx, op, doneCmd, keys, argv...)
|
||||
}
|
||||
|
||||
// KEYS[1] -> asynq:{<qname>}:active
|
||||
@ -343,7 +344,7 @@ redis.call("HSET", KEYS[4], "state", "pending")
|
||||
return redis.status_reply("OK")`)
|
||||
|
||||
// Requeue moves the task from active queue to the specified queue.
|
||||
func (r *RDB) Requeue(msg *base.TaskMessage) error {
|
||||
func (r *RDB) Requeue(ctx context.Context, msg *base.TaskMessage) error {
|
||||
var op errors.Op = "rdb.Requeue"
|
||||
keys := []string{
|
||||
base.ActiveKey(msg.Queue),
|
||||
@ -351,7 +352,7 @@ func (r *RDB) Requeue(msg *base.TaskMessage) error {
|
||||
base.PendingKey(msg.Queue),
|
||||
base.TaskKey(msg.Queue, msg.ID.String()),
|
||||
}
|
||||
return r.runScript(op, requeueCmd, keys, msg.ID.String())
|
||||
return r.runScript(ctx, op, requeueCmd, keys, msg.ID.String())
|
||||
}
|
||||
|
||||
// KEYS[1] -> asynq:{<qname>}:t:<task_id>
|
||||
@ -372,13 +373,13 @@ return 1
|
||||
`)
|
||||
|
||||
// Schedule adds the task to the scheduled set to be processed in the future.
|
||||
func (r *RDB) Schedule(msg *base.TaskMessage, processAt time.Time) error {
|
||||
func (r *RDB) Schedule(ctx context.Context, msg *base.TaskMessage, processAt time.Time) error {
|
||||
var op errors.Op = "rdb.Schedule"
|
||||
encoded, err := base.EncodeMessage(msg)
|
||||
if err != nil {
|
||||
return errors.E(op, errors.Unknown, fmt.Sprintf("cannot encode message: %v", err))
|
||||
}
|
||||
if err := r.client.SAdd(base.AllQueues, msg.Queue).Err(); err != nil {
|
||||
if err := r.client.SAdd(ctx, base.AllQueues, msg.Queue).Err(); err != nil {
|
||||
return errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "sadd", Err: err})
|
||||
}
|
||||
keys := []string{
|
||||
@ -392,7 +393,7 @@ func (r *RDB) Schedule(msg *base.TaskMessage, processAt time.Time) error {
|
||||
msg.Timeout,
|
||||
msg.Deadline,
|
||||
}
|
||||
return r.runScript(op, scheduleCmd, keys, argv...)
|
||||
return r.runScript(ctx, op, scheduleCmd, keys, argv...)
|
||||
}
|
||||
|
||||
// KEYS[1] -> unique key
|
||||
@ -421,13 +422,13 @@ return 1
|
||||
|
||||
// ScheduleUnique adds the task to the backlog queue to be processed in the future if the uniqueness lock can be acquired.
|
||||
// It returns ErrDuplicateTask if the lock cannot be acquired.
|
||||
func (r *RDB) ScheduleUnique(msg *base.TaskMessage, processAt time.Time, ttl time.Duration) error {
|
||||
func (r *RDB) ScheduleUnique(ctx context.Context, msg *base.TaskMessage, processAt time.Time, ttl time.Duration) error {
|
||||
var op errors.Op = "rdb.ScheduleUnique"
|
||||
encoded, err := base.EncodeMessage(msg)
|
||||
if err != nil {
|
||||
return errors.E(op, errors.Internal, fmt.Sprintf("cannot encode task message: %v", err))
|
||||
}
|
||||
if err := r.client.SAdd(base.AllQueues, msg.Queue).Err(); err != nil {
|
||||
if err := r.client.SAdd(ctx, base.AllQueues, msg.Queue).Err(); err != nil {
|
||||
return errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "sadd", Err: err})
|
||||
}
|
||||
keys := []string{
|
||||
@ -443,7 +444,7 @@ func (r *RDB) ScheduleUnique(msg *base.TaskMessage, processAt time.Time, ttl tim
|
||||
msg.Timeout,
|
||||
msg.Deadline,
|
||||
}
|
||||
res, err := scheduleUniqueCmd.Run(r.client, keys, argv...).Result()
|
||||
res, err := scheduleUniqueCmd.Run(ctx, r.client, keys, argv...).Result()
|
||||
if err != nil {
|
||||
return errors.E(op, errors.Unknown, fmt.Sprintf("redis eval error: %v", err))
|
||||
}
|
||||
@ -488,7 +489,7 @@ return redis.status_reply("OK")`)
|
||||
|
||||
// Retry moves the task from active to retry queue, incrementing retry count
|
||||
// and assigning error message to the task message.
|
||||
func (r *RDB) Retry(msg *base.TaskMessage, processAt time.Time, errMsg string) error {
|
||||
func (r *RDB) Retry(ctx context.Context, msg *base.TaskMessage, processAt time.Time, errMsg string) error {
|
||||
var op errors.Op = "rdb.Retry"
|
||||
now := time.Now()
|
||||
modified := *msg
|
||||
@ -514,7 +515,7 @@ func (r *RDB) Retry(msg *base.TaskMessage, processAt time.Time, errMsg string) e
|
||||
processAt.Unix(),
|
||||
expireAt.Unix(),
|
||||
}
|
||||
return r.runScript(op, retryCmd, keys, argv...)
|
||||
return r.runScript(ctx, op, retryCmd, keys, argv...)
|
||||
}
|
||||
|
||||
const (
|
||||
@ -557,7 +558,7 @@ return redis.status_reply("OK")`)
|
||||
|
||||
// Archive sends the given task to archive, attaching the error message to the task.
|
||||
// It also trims the archive by timestamp and set size.
|
||||
func (r *RDB) Archive(msg *base.TaskMessage, errMsg string) error {
|
||||
func (r *RDB) Archive(ctx context.Context, msg *base.TaskMessage, errMsg string) error {
|
||||
var op errors.Op = "rdb.Archive"
|
||||
now := time.Now()
|
||||
modified := *msg
|
||||
@ -585,15 +586,15 @@ func (r *RDB) Archive(msg *base.TaskMessage, errMsg string) error {
|
||||
maxArchiveSize,
|
||||
expireAt.Unix(),
|
||||
}
|
||||
return r.runScript(op, archiveCmd, keys, argv...)
|
||||
return r.runScript(ctx, op, archiveCmd, keys, argv...)
|
||||
}
|
||||
|
||||
// ForwardIfReady checks scheduled and retry sets of the given queues
|
||||
// and move any tasks that are ready to be processed to the pending set.
|
||||
func (r *RDB) ForwardIfReady(qnames ...string) error {
|
||||
func (r *RDB) ForwardIfReady(ctx context.Context, qnames ...string) error {
|
||||
var op errors.Op = "rdb.ForwardIfReady"
|
||||
for _, qname := range qnames {
|
||||
if err := r.forwardAll(qname); err != nil {
|
||||
if err := r.forwardAll(ctx, qname); err != nil {
|
||||
return errors.E(op, errors.CanonicalCode(err), err)
|
||||
}
|
||||
}
|
||||
@ -616,9 +617,9 @@ return table.getn(ids)`)
|
||||
|
||||
// forward moves tasks with a score less than the current unix time
|
||||
// from the src zset to the dst list. It returns the number of tasks moved.
|
||||
func (r *RDB) forward(src, dst, taskKeyPrefix string) (int, error) {
|
||||
func (r *RDB) forward(ctx context.Context, src, dst, taskKeyPrefix string) (int, error) {
|
||||
now := float64(time.Now().Unix())
|
||||
res, err := forwardCmd.Run(r.client, []string{src, dst}, now, taskKeyPrefix).Result()
|
||||
res, err := forwardCmd.Run(ctx, r.client, []string{src, dst}, now, taskKeyPrefix).Result()
|
||||
if err != nil {
|
||||
return 0, errors.E(errors.Internal, fmt.Sprintf("redis eval error: %v", err))
|
||||
}
|
||||
@ -631,14 +632,14 @@ func (r *RDB) forward(src, dst, taskKeyPrefix string) (int, error) {
|
||||
|
||||
// forwardAll checks for tasks in scheduled/retry state that are ready to be run, and updates
|
||||
// their state to "pending".
|
||||
func (r *RDB) forwardAll(qname string) (err error) {
|
||||
func (r *RDB) forwardAll(ctx context.Context, qname string) (err error) {
|
||||
sources := []string{base.ScheduledKey(qname), base.RetryKey(qname)}
|
||||
dst := base.PendingKey(qname)
|
||||
taskKeyPrefix := base.TaskKeyPrefix(qname)
|
||||
for _, src := range sources {
|
||||
n := 1
|
||||
for n != 0 {
|
||||
n, err = r.forward(src, dst, taskKeyPrefix)
|
||||
n, err = r.forward(ctx, src, dst, taskKeyPrefix)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -661,11 +662,11 @@ return res
|
||||
`)
|
||||
|
||||
// ListDeadlineExceeded returns a list of task messages that have exceeded the deadline from the given queues.
|
||||
func (r *RDB) ListDeadlineExceeded(deadline time.Time, qnames ...string) ([]*base.TaskMessage, error) {
|
||||
func (r *RDB) ListDeadlineExceeded(ctx context.Context, deadline time.Time, qnames ...string) ([]*base.TaskMessage, error) {
|
||||
var op errors.Op = "rdb.ListDeadlineExceeded"
|
||||
var msgs []*base.TaskMessage
|
||||
for _, qname := range qnames {
|
||||
res, err := listDeadlineExceededCmd.Run(r.client,
|
||||
res, err := listDeadlineExceededCmd.Run(ctx, r.client,
|
||||
[]string{base.DeadlinesKey(qname)},
|
||||
deadline.Unix(), base.TaskKeyPrefix(qname)).Result()
|
||||
if err != nil {
|
||||
@ -703,7 +704,7 @@ redis.call("EXPIRE", KEYS[2], ARGV[1])
|
||||
return redis.status_reply("OK")`)
|
||||
|
||||
// WriteServerState writes server state data to redis with expiration set to the value ttl.
|
||||
func (r *RDB) WriteServerState(info *base.ServerInfo, workers []*base.WorkerInfo, ttl time.Duration) error {
|
||||
func (r *RDB) WriteServerState(ctx context.Context, info *base.ServerInfo, workers []*base.WorkerInfo, ttl time.Duration) error {
|
||||
var op errors.Op = "rdb.WriteServerState"
|
||||
bytes, err := base.EncodeServerInfo(info)
|
||||
if err != nil {
|
||||
@ -720,13 +721,13 @@ func (r *RDB) WriteServerState(info *base.ServerInfo, workers []*base.WorkerInfo
|
||||
}
|
||||
skey := base.ServerInfoKey(info.Host, info.PID, info.ServerID)
|
||||
wkey := base.WorkersKey(info.Host, info.PID, info.ServerID)
|
||||
if err := r.client.ZAdd(base.AllServers, &redis.Z{Score: float64(exp.Unix()), Member: skey}).Err(); err != nil {
|
||||
if err := r.client.ZAdd(ctx, base.AllServers, &redis.Z{Score: float64(exp.Unix()), Member: skey}).Err(); err != nil {
|
||||
return errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "sadd", Err: err})
|
||||
}
|
||||
if err := r.client.ZAdd(base.AllWorkers, &redis.Z{Score: float64(exp.Unix()), Member: wkey}).Err(); err != nil {
|
||||
if err := r.client.ZAdd(ctx, base.AllWorkers, &redis.Z{Score: float64(exp.Unix()), Member: wkey}).Err(); err != nil {
|
||||
return errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "zadd", Err: err})
|
||||
}
|
||||
return r.runScript(op, writeServerStateCmd, []string{skey, wkey}, args...)
|
||||
return r.runScript(ctx, op, writeServerStateCmd, []string{skey, wkey}, args...)
|
||||
}
|
||||
|
||||
// KEYS[1] -> asynq:servers:{<host:pid:sid>}
|
||||
@ -737,17 +738,17 @@ redis.call("DEL", KEYS[2])
|
||||
return redis.status_reply("OK")`)
|
||||
|
||||
// ClearServerState deletes server state data from redis.
|
||||
func (r *RDB) ClearServerState(host string, pid int, serverID string) error {
|
||||
func (r *RDB) ClearServerState(ctx context.Context, host string, pid int, serverID string) error {
|
||||
var op errors.Op = "rdb.ClearServerState"
|
||||
skey := base.ServerInfoKey(host, pid, serverID)
|
||||
wkey := base.WorkersKey(host, pid, serverID)
|
||||
if err := r.client.ZRem(base.AllServers, skey).Err(); err != nil {
|
||||
if err := r.client.ZRem(ctx, base.AllServers, skey).Err(); err != nil {
|
||||
return errors.E(op, errors.Internal, &errors.RedisCommandError{Command: "zrem", Err: err})
|
||||
}
|
||||
if err := r.client.ZRem(base.AllWorkers, wkey).Err(); err != nil {
|
||||
if err := r.client.ZRem(ctx, base.AllWorkers, wkey).Err(); err != nil {
|
||||
return errors.E(op, errors.Internal, &errors.RedisCommandError{Command: "zrem", Err: err})
|
||||
}
|
||||
return r.runScript(op, clearServerStateCmd, []string{skey, wkey})
|
||||
return r.runScript(ctx, op, clearServerStateCmd, []string{skey, wkey})
|
||||
}
|
||||
|
||||
// KEYS[1] -> asynq:schedulers:{<schedulerID>}
|
||||
@ -762,7 +763,7 @@ redis.call("EXPIRE", KEYS[1], ARGV[1])
|
||||
return redis.status_reply("OK")`)
|
||||
|
||||
// WriteSchedulerEntries writes scheduler entries data to redis with expiration set to the value ttl.
|
||||
func (r *RDB) WriteSchedulerEntries(schedulerID string, entries []*base.SchedulerEntry, ttl time.Duration) error {
|
||||
func (r *RDB) WriteSchedulerEntries(ctx context.Context, schedulerID string, entries []*base.SchedulerEntry, ttl time.Duration) error {
|
||||
var op errors.Op = "rdb.WriteSchedulerEntries"
|
||||
args := []interface{}{ttl.Seconds()}
|
||||
for _, e := range entries {
|
||||
@ -774,31 +775,31 @@ func (r *RDB) WriteSchedulerEntries(schedulerID string, entries []*base.Schedule
|
||||
}
|
||||
exp := time.Now().Add(ttl).UTC()
|
||||
key := base.SchedulerEntriesKey(schedulerID)
|
||||
err := r.client.ZAdd(base.AllSchedulers, &redis.Z{Score: float64(exp.Unix()), Member: key}).Err()
|
||||
err := r.client.ZAdd(ctx, base.AllSchedulers, &redis.Z{Score: float64(exp.Unix()), Member: key}).Err()
|
||||
if err != nil {
|
||||
return errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "zadd", Err: err})
|
||||
}
|
||||
return r.runScript(op, writeSchedulerEntriesCmd, []string{key}, args...)
|
||||
return r.runScript(ctx, op, writeSchedulerEntriesCmd, []string{key}, args...)
|
||||
}
|
||||
|
||||
// ClearSchedulerEntries deletes scheduler entries data from redis.
|
||||
func (r *RDB) ClearSchedulerEntries(scheduelrID string) error {
|
||||
func (r *RDB) ClearSchedulerEntries(ctx context.Context, scheduelrID string) error {
|
||||
var op errors.Op = "rdb.ClearSchedulerEntries"
|
||||
key := base.SchedulerEntriesKey(scheduelrID)
|
||||
if err := r.client.ZRem(base.AllSchedulers, key).Err(); err != nil {
|
||||
if err := r.client.ZRem(ctx, base.AllSchedulers, key).Err(); err != nil {
|
||||
return errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "zrem", Err: err})
|
||||
}
|
||||
if err := r.client.Del(key).Err(); err != nil {
|
||||
if err := r.client.Del(ctx, key).Err(); err != nil {
|
||||
return errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "del", Err: err})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CancelationPubSub returns a pubsub for cancelation messages.
|
||||
func (r *RDB) CancelationPubSub() (*redis.PubSub, error) {
|
||||
func (r *RDB) CancelationPubSub(ctx context.Context) (*redis.PubSub, error) {
|
||||
var op errors.Op = "rdb.CancelationPubSub"
|
||||
pubsub := r.client.Subscribe(base.CancelChannel)
|
||||
_, err := pubsub.Receive()
|
||||
pubsub := r.client.Subscribe(ctx, base.CancelChannel)
|
||||
_, err := pubsub.Receive(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.E(op, errors.Unknown, fmt.Sprintf("redis pubsub receive error: %v", err))
|
||||
}
|
||||
@ -807,9 +808,9 @@ func (r *RDB) CancelationPubSub() (*redis.PubSub, error) {
|
||||
|
||||
// PublishCancelation publish cancelation message to all subscribers.
|
||||
// The message is the ID for the task to be canceled.
|
||||
func (r *RDB) PublishCancelation(id string) error {
|
||||
func (r *RDB) PublishCancelation(ctx context.Context, id string) error {
|
||||
var op errors.Op = "rdb.PublishCancelation"
|
||||
if err := r.client.Publish(base.CancelChannel, id).Err(); err != nil {
|
||||
if err := r.client.Publish(ctx, base.CancelChannel, id).Err(); err != nil {
|
||||
return errors.E(op, errors.Unknown, fmt.Sprintf("redis pubsub publish error: %v", err))
|
||||
}
|
||||
return nil
|
||||
@ -828,7 +829,7 @@ return redis.status_reply("OK")`)
|
||||
const maxEvents = 1000
|
||||
|
||||
// RecordSchedulerEnqueueEvent records the time when the given task was enqueued.
|
||||
func (r *RDB) RecordSchedulerEnqueueEvent(entryID string, event *base.SchedulerEnqueueEvent) error {
|
||||
func (r *RDB) RecordSchedulerEnqueueEvent(ctx context.Context, entryID string, event *base.SchedulerEnqueueEvent) error {
|
||||
var op errors.Op = "rdb.RecordSchedulerEnqueueEvent"
|
||||
data, err := base.EncodeSchedulerEnqueueEvent(event)
|
||||
if err != nil {
|
||||
@ -842,14 +843,14 @@ func (r *RDB) RecordSchedulerEnqueueEvent(entryID string, event *base.SchedulerE
|
||||
data,
|
||||
maxEvents,
|
||||
}
|
||||
return r.runScript(op, recordSchedulerEnqueueEventCmd, keys, argv...)
|
||||
return r.runScript(ctx, op, recordSchedulerEnqueueEventCmd, keys, argv...)
|
||||
}
|
||||
|
||||
// ClearSchedulerHistory deletes the enqueue event history for the given scheduler entry.
|
||||
func (r *RDB) ClearSchedulerHistory(entryID string) error {
|
||||
func (r *RDB) ClearSchedulerHistory(ctx context.Context, entryID string) error {
|
||||
var op errors.Op = "rdb.ClearSchedulerHistory"
|
||||
key := base.SchedulerHistoryKey(entryID)
|
||||
if err := r.client.Del(key).Err(); err != nil {
|
||||
if err := r.client.Del(ctx, key).Err(); err != nil {
|
||||
return errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "del", Err: err})
|
||||
}
|
||||
return nil
|
||||
|
@ -5,6 +5,8 @@
|
||||
package rdb
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"strconv"
|
||||
@ -13,7 +15,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/go-redis/redis/v7"
|
||||
"github.com/go-redis/redis/v8"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
"github.com/google/uuid"
|
||||
@ -24,18 +26,24 @@ import (
|
||||
|
||||
// variables used for package testing.
|
||||
var (
|
||||
redisAddr string
|
||||
redisDB int
|
||||
redisAddr string
|
||||
redisDB int
|
||||
redisPassword string
|
||||
|
||||
useRedisCluster bool
|
||||
redisClusterAddrs string // comma-separated list of host:port
|
||||
redisTLSServer string
|
||||
|
||||
ctx = context.Background()
|
||||
)
|
||||
|
||||
func init() {
|
||||
flag.StringVar(&redisAddr, "redis_addr", "localhost:6379", "redis address to use in testing")
|
||||
flag.IntVar(&redisDB, "redis_db", 15, "redis db number to use in testing")
|
||||
flag.StringVar(&redisPassword, "redis_password", "", "redis password to use in testing")
|
||||
flag.BoolVar(&useRedisCluster, "redis_cluster", false, "use redis cluster as a broker in testing")
|
||||
flag.StringVar(&redisClusterAddrs, "redis_cluster_addrs", "localhost:7000,localhost:7001,localhost:7002", "comma separated list of redis server addresses")
|
||||
flag.StringVar(&redisTLSServer, "redis_tls_server", "", "redis host for TLS verification")
|
||||
}
|
||||
|
||||
func setup(tb testing.TB) (r *RDB) {
|
||||
@ -47,11 +55,15 @@ func setup(tb testing.TB) (r *RDB) {
|
||||
}
|
||||
r = NewRDB(redis.NewClusterClient(&redis.ClusterOptions{
|
||||
Addrs: addrs,
|
||||
Password: redisPassword,
|
||||
TLSConfig: getTLSConfig(),
|
||||
}))
|
||||
} else {
|
||||
r = NewRDB(redis.NewClient(&redis.Options{
|
||||
Addr: redisAddr,
|
||||
DB: redisDB,
|
||||
Password: redisPassword,
|
||||
TLSConfig: getTLSConfig(),
|
||||
}))
|
||||
}
|
||||
// Start each test with a clean slate.
|
||||
@ -59,6 +71,13 @@ func setup(tb testing.TB) (r *RDB) {
|
||||
return r
|
||||
}
|
||||
|
||||
func getTLSConfig() *tls.Config {
|
||||
if redisTLSServer != "" {
|
||||
return &tls.Config{ServerName: redisTLSServer}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestEnqueue(t *testing.T) {
|
||||
r := setup(t)
|
||||
defer r.Close()
|
||||
@ -77,7 +96,7 @@ func TestEnqueue(t *testing.T) {
|
||||
for _, tc := range tests {
|
||||
h.FlushDB(t, r.client) // clean up db before each test case.
|
||||
|
||||
err := r.Enqueue(tc.msg)
|
||||
err := r.Enqueue(ctx, tc.msg)
|
||||
if err != nil {
|
||||
t.Errorf("(*RDB).Enqueue(msg) = %v, want nil", err)
|
||||
continue
|
||||
@ -85,7 +104,7 @@ func TestEnqueue(t *testing.T) {
|
||||
|
||||
// Check Pending list has task ID.
|
||||
pendingKey := base.PendingKey(tc.msg.Queue)
|
||||
pendingIDs := r.client.LRange(pendingKey, 0, -1).Val()
|
||||
pendingIDs := r.client.LRange(ctx, pendingKey, 0, -1).Val()
|
||||
if n := len(pendingIDs); n != 1 {
|
||||
t.Errorf("Redis LIST %q contains %d IDs, want 1", pendingKey, n)
|
||||
continue
|
||||
@ -97,26 +116,26 @@ func TestEnqueue(t *testing.T) {
|
||||
|
||||
// Check the value under the task key.
|
||||
taskKey := base.TaskKey(tc.msg.Queue, tc.msg.ID.String())
|
||||
encoded := r.client.HGet(taskKey, "msg").Val() // "msg" field
|
||||
encoded := r.client.HGet(ctx, taskKey, "msg").Val() // "msg" field
|
||||
decoded := h.MustUnmarshal(t, encoded)
|
||||
if diff := cmp.Diff(tc.msg, decoded); diff != "" {
|
||||
t.Errorf("persisted message was %v, want %v; (-want, +got)\n%s", decoded, tc.msg, diff)
|
||||
}
|
||||
state := r.client.HGet(taskKey, "state").Val() // "state" field
|
||||
state := r.client.HGet(ctx, taskKey, "state").Val() // "state" field
|
||||
if state != "pending" {
|
||||
t.Errorf("state field under task-key is set to %q, want %q", state, "pending")
|
||||
}
|
||||
timeout := r.client.HGet(taskKey, "timeout").Val() // "timeout" field
|
||||
timeout := r.client.HGet(ctx, taskKey, "timeout").Val() // "timeout" field
|
||||
if want := strconv.Itoa(int(tc.msg.Timeout)); timeout != want {
|
||||
t.Errorf("timeout field under task-key is set to %v, want %v", timeout, want)
|
||||
}
|
||||
deadline := r.client.HGet(taskKey, "deadline").Val() // "deadline" field
|
||||
deadline := r.client.HGet(ctx, taskKey, "deadline").Val() // "deadline" field
|
||||
if want := strconv.Itoa(int(tc.msg.Deadline)); deadline != want {
|
||||
t.Errorf("deadline field under task-key is set to %v, want %v", deadline, want)
|
||||
}
|
||||
|
||||
// Check queue is in the AllQueues set.
|
||||
if !r.client.SIsMember(base.AllQueues, tc.msg.Queue).Val() {
|
||||
if !r.client.SIsMember(ctx, base.AllQueues, tc.msg.Queue).Val() {
|
||||
t.Errorf("%q is not a member of SET %q", tc.msg.Queue, base.AllQueues)
|
||||
}
|
||||
}
|
||||
@ -144,7 +163,7 @@ func TestEnqueueUnique(t *testing.T) {
|
||||
h.FlushDB(t, r.client) // clean up db before each test case.
|
||||
|
||||
// Enqueue the first message, should succeed.
|
||||
err := r.EnqueueUnique(tc.msg, tc.ttl)
|
||||
err := r.EnqueueUnique(ctx, tc.msg, tc.ttl)
|
||||
if err != nil {
|
||||
t.Errorf("First message: (*RDB).EnqueueUnique(%v, %v) = %v, want nil",
|
||||
tc.msg, tc.ttl, err)
|
||||
@ -158,13 +177,13 @@ func TestEnqueueUnique(t *testing.T) {
|
||||
if diff := cmp.Diff(tc.msg, gotPending[0]); diff != "" {
|
||||
t.Errorf("persisted data differed from the original input (-want, +got)\n%s", diff)
|
||||
}
|
||||
if !r.client.SIsMember(base.AllQueues, tc.msg.Queue).Val() {
|
||||
if !r.client.SIsMember(ctx, base.AllQueues, tc.msg.Queue).Val() {
|
||||
t.Errorf("%q is not a member of SET %q", tc.msg.Queue, base.AllQueues)
|
||||
}
|
||||
|
||||
// Check Pending list has task ID.
|
||||
pendingKey := base.PendingKey(tc.msg.Queue)
|
||||
pendingIDs := r.client.LRange(pendingKey, 0, -1).Val()
|
||||
pendingIDs := r.client.LRange(ctx, pendingKey, 0, -1).Val()
|
||||
if len(pendingIDs) != 1 {
|
||||
t.Errorf("Redis LIST %q contains %d IDs, want 1", pendingKey, len(pendingIDs))
|
||||
continue
|
||||
@ -176,40 +195,40 @@ func TestEnqueueUnique(t *testing.T) {
|
||||
|
||||
// Check the value under the task key.
|
||||
taskKey := base.TaskKey(tc.msg.Queue, tc.msg.ID.String())
|
||||
encoded := r.client.HGet(taskKey, "msg").Val() // "msg" field
|
||||
encoded := r.client.HGet(ctx, taskKey, "msg").Val() // "msg" field
|
||||
decoded := h.MustUnmarshal(t, encoded)
|
||||
if diff := cmp.Diff(tc.msg, decoded); diff != "" {
|
||||
t.Errorf("persisted message was %v, want %v; (-want, +got)\n%s", decoded, tc.msg, diff)
|
||||
}
|
||||
state := r.client.HGet(taskKey, "state").Val() // "state" field
|
||||
state := r.client.HGet(ctx, taskKey, "state").Val() // "state" field
|
||||
if state != "pending" {
|
||||
t.Errorf("state field under task-key is set to %q, want %q", state, "pending")
|
||||
}
|
||||
timeout := r.client.HGet(taskKey, "timeout").Val() // "timeout" field
|
||||
timeout := r.client.HGet(ctx, taskKey, "timeout").Val() // "timeout" field
|
||||
if want := strconv.Itoa(int(tc.msg.Timeout)); timeout != want {
|
||||
t.Errorf("timeout field under task-key is set to %v, want %v", timeout, want)
|
||||
}
|
||||
deadline := r.client.HGet(taskKey, "deadline").Val() // "deadline" field
|
||||
deadline := r.client.HGet(ctx, taskKey, "deadline").Val() // "deadline" field
|
||||
if want := strconv.Itoa(int(tc.msg.Deadline)); deadline != want {
|
||||
t.Errorf("deadline field under task-key is set to %v, want %v", deadline, want)
|
||||
}
|
||||
uniqueKey := r.client.HGet(taskKey, "unique_key").Val() // "unique_key" field
|
||||
uniqueKey := r.client.HGet(ctx, taskKey, "unique_key").Val() // "unique_key" field
|
||||
if uniqueKey != tc.msg.UniqueKey {
|
||||
t.Errorf("uniqueue_key field under task key is set to %q, want %q", uniqueKey, tc.msg.UniqueKey)
|
||||
}
|
||||
|
||||
// Check queue is in the AllQueues set.
|
||||
if !r.client.SIsMember(base.AllQueues, tc.msg.Queue).Val() {
|
||||
if !r.client.SIsMember(ctx, base.AllQueues, tc.msg.Queue).Val() {
|
||||
t.Errorf("%q is not a member of SET %q", tc.msg.Queue, base.AllQueues)
|
||||
}
|
||||
|
||||
// Enqueue the second message, should fail.
|
||||
got := r.EnqueueUnique(tc.msg, tc.ttl)
|
||||
got := r.EnqueueUnique(ctx, tc.msg, tc.ttl)
|
||||
if !errors.Is(got, errors.ErrDuplicateTask) {
|
||||
t.Errorf("Second message: (*RDB).EnqueueUnique(msg, ttl) = %v, want %v", got, errors.ErrDuplicateTask)
|
||||
continue
|
||||
}
|
||||
gotTTL := r.client.TTL(tc.msg.UniqueKey).Val()
|
||||
gotTTL := r.client.TTL(ctx, tc.msg.UniqueKey).Val()
|
||||
if !cmp.Equal(tc.ttl.Seconds(), gotTTL.Seconds(), cmpopts.EquateApprox(0, 2)) {
|
||||
t.Errorf("TTL %q = %v, want %v", tc.msg.UniqueKey, gotTTL, tc.ttl)
|
||||
continue
|
||||
@ -330,7 +349,7 @@ func TestDequeue(t *testing.T) {
|
||||
h.FlushDB(t, r.client) // clean up db before each test case
|
||||
h.SeedAllPendingQueues(t, r.client, tc.pending)
|
||||
|
||||
gotMsg, gotDeadline, err := r.Dequeue(tc.args...)
|
||||
gotMsg, gotDeadline, err := r.Dequeue(ctx, tc.args...)
|
||||
if err != nil {
|
||||
t.Errorf("(*RDB).Dequeue(%v) returned error %v", tc.args, err)
|
||||
continue
|
||||
@ -425,7 +444,7 @@ func TestDequeueError(t *testing.T) {
|
||||
h.FlushDB(t, r.client) // clean up db before each test case
|
||||
h.SeedAllPendingQueues(t, r.client, tc.pending)
|
||||
|
||||
gotMsg, gotDeadline, gotErr := r.Dequeue(tc.args...)
|
||||
gotMsg, gotDeadline, gotErr := r.Dequeue(ctx, tc.args...)
|
||||
if !errors.Is(gotErr, tc.wantErr) {
|
||||
t.Errorf("(*RDB).Dequeue(%v) returned error %v; want %v",
|
||||
tc.args, gotErr, tc.wantErr)
|
||||
@ -546,13 +565,13 @@ func TestDequeueIgnoresPausedQueues(t *testing.T) {
|
||||
for _, tc := range tests {
|
||||
h.FlushDB(t, r.client) // clean up db before each test case
|
||||
for _, qname := range tc.paused {
|
||||
if err := r.Pause(qname); err != nil {
|
||||
if err := r.Pause(ctx, qname); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
h.SeedAllPendingQueues(t, r.client, tc.pending)
|
||||
|
||||
got, _, err := r.Dequeue(tc.args...)
|
||||
got, _, err := r.Dequeue(ctx, tc.args...)
|
||||
if !cmp.Equal(got, tc.wantMsg) || !errors.Is(err, tc.wantErr) {
|
||||
t.Errorf("Dequeue(%v) = %v, %v; want %v, %v",
|
||||
tc.args, got, err, tc.wantMsg, tc.wantErr)
|
||||
@ -681,7 +700,7 @@ func TestDone(t *testing.T) {
|
||||
for _, msg := range msgs {
|
||||
// Set uniqueness lock if unique key is present.
|
||||
if len(msg.UniqueKey) > 0 {
|
||||
err := r.client.SetNX(msg.UniqueKey, msg.ID.String(), time.Minute).Err()
|
||||
err := r.client.SetNX(ctx, msg.UniqueKey, msg.ID.String(), time.Minute).Err()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -689,7 +708,7 @@ func TestDone(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
err := r.Done(tc.target)
|
||||
err := r.Done(ctx, tc.target)
|
||||
if err != nil {
|
||||
t.Errorf("%s; (*RDB).Done(task) = %v, want nil", tc.desc, err)
|
||||
continue
|
||||
@ -711,17 +730,17 @@ func TestDone(t *testing.T) {
|
||||
}
|
||||
|
||||
processedKey := base.ProcessedKey(tc.target.Queue, time.Now())
|
||||
gotProcessed := r.client.Get(processedKey).Val()
|
||||
gotProcessed := r.client.Get(ctx, processedKey).Val()
|
||||
if gotProcessed != "1" {
|
||||
t.Errorf("%s; GET %q = %q, want 1", tc.desc, processedKey, gotProcessed)
|
||||
}
|
||||
|
||||
gotTTL := r.client.TTL(processedKey).Val()
|
||||
gotTTL := r.client.TTL(ctx, processedKey).Val()
|
||||
if gotTTL > statsTTL {
|
||||
t.Errorf("%s; TTL %q = %v, want less than or equal to %v", tc.desc, processedKey, gotTTL, statsTTL)
|
||||
}
|
||||
|
||||
if len(tc.target.UniqueKey) > 0 && r.client.Exists(tc.target.UniqueKey).Val() != 0 {
|
||||
if len(tc.target.UniqueKey) > 0 && r.client.Exists(ctx, tc.target.UniqueKey).Val() != 0 {
|
||||
t.Errorf("%s; Uniqueness lock %q still exists", tc.desc, tc.target.UniqueKey)
|
||||
}
|
||||
}
|
||||
@ -849,7 +868,7 @@ func TestRequeue(t *testing.T) {
|
||||
h.SeedAllActiveQueues(t, r.client, tc.active)
|
||||
h.SeedAllDeadlines(t, r.client, tc.deadlines)
|
||||
|
||||
err := r.Requeue(tc.target)
|
||||
err := r.Requeue(ctx, tc.target)
|
||||
if err != nil {
|
||||
t.Errorf("(*RDB).Requeue(task) = %v, want nil", err)
|
||||
continue
|
||||
@ -890,7 +909,7 @@ func TestSchedule(t *testing.T) {
|
||||
for _, tc := range tests {
|
||||
h.FlushDB(t, r.client) // clean up db before each test case
|
||||
|
||||
err := r.Schedule(tc.msg, tc.processAt)
|
||||
err := r.Schedule(ctx, tc.msg, tc.processAt)
|
||||
if err != nil {
|
||||
t.Errorf("(*RDB).Schedule(%v, %v) = %v, want nil",
|
||||
tc.msg, tc.processAt, err)
|
||||
@ -899,7 +918,7 @@ func TestSchedule(t *testing.T) {
|
||||
|
||||
// Check Scheduled zset has task ID.
|
||||
scheduledKey := base.ScheduledKey(tc.msg.Queue)
|
||||
zs := r.client.ZRangeWithScores(scheduledKey, 0, -1).Val()
|
||||
zs := r.client.ZRangeWithScores(ctx, scheduledKey, 0, -1).Val()
|
||||
if n := len(zs); n != 1 {
|
||||
t.Errorf("Redis ZSET %q contains %d elements, want 1",
|
||||
scheduledKey, n)
|
||||
@ -918,28 +937,28 @@ func TestSchedule(t *testing.T) {
|
||||
|
||||
// Check the values under the task key.
|
||||
taskKey := base.TaskKey(tc.msg.Queue, tc.msg.ID.String())
|
||||
encoded := r.client.HGet(taskKey, "msg").Val() // "msg" field
|
||||
encoded := r.client.HGet(ctx, taskKey, "msg").Val() // "msg" field
|
||||
decoded := h.MustUnmarshal(t, encoded)
|
||||
if diff := cmp.Diff(tc.msg, decoded); diff != "" {
|
||||
t.Errorf("persisted message was %v, want %v; (-want, +got)\n%s",
|
||||
decoded, tc.msg, diff)
|
||||
}
|
||||
state := r.client.HGet(taskKey, "state").Val() // "state" field
|
||||
state := r.client.HGet(ctx, taskKey, "state").Val() // "state" field
|
||||
if want := "scheduled"; state != want {
|
||||
t.Errorf("state field under task-key is set to %q, want %q",
|
||||
state, want)
|
||||
}
|
||||
timeout := r.client.HGet(taskKey, "timeout").Val() // "timeout" field
|
||||
timeout := r.client.HGet(ctx, taskKey, "timeout").Val() // "timeout" field
|
||||
if want := strconv.Itoa(int(tc.msg.Timeout)); timeout != want {
|
||||
t.Errorf("timeout field under task-key is set to %v, want %v", timeout, want)
|
||||
}
|
||||
deadline := r.client.HGet(taskKey, "deadline").Val() // "deadline" field
|
||||
deadline := r.client.HGet(ctx, taskKey, "deadline").Val() // "deadline" field
|
||||
if want := strconv.Itoa(int(tc.msg.Deadline)); deadline != want {
|
||||
t.Errorf("deadline field under task-ke is set to %v, want %v", deadline, want)
|
||||
}
|
||||
|
||||
// Check queue is in the AllQueues set.
|
||||
if !r.client.SIsMember(base.AllQueues, tc.msg.Queue).Val() {
|
||||
if !r.client.SIsMember(ctx, base.AllQueues, tc.msg.Queue).Val() {
|
||||
t.Errorf("%q is not a member of SET %q", tc.msg.Queue, base.AllQueues)
|
||||
}
|
||||
}
|
||||
@ -968,7 +987,7 @@ func TestScheduleUnique(t *testing.T) {
|
||||
h.FlushDB(t, r.client) // clean up db before each test case
|
||||
|
||||
desc := "(*RDB).ScheduleUnique(msg, processAt, ttl)"
|
||||
err := r.ScheduleUnique(tc.msg, tc.processAt, tc.ttl)
|
||||
err := r.ScheduleUnique(ctx, tc.msg, tc.processAt, tc.ttl)
|
||||
if err != nil {
|
||||
t.Errorf("Frist task: %s = %v, want nil", desc, err)
|
||||
continue
|
||||
@ -976,7 +995,7 @@ func TestScheduleUnique(t *testing.T) {
|
||||
|
||||
// Check Scheduled zset has task ID.
|
||||
scheduledKey := base.ScheduledKey(tc.msg.Queue)
|
||||
zs := r.client.ZRangeWithScores(scheduledKey, 0, -1).Val()
|
||||
zs := r.client.ZRangeWithScores(ctx, scheduledKey, 0, -1).Val()
|
||||
if n := len(zs); n != 1 {
|
||||
t.Errorf("Redis ZSET %q contains %d elements, want 1",
|
||||
scheduledKey, n)
|
||||
@ -995,43 +1014,43 @@ func TestScheduleUnique(t *testing.T) {
|
||||
|
||||
// Check the values under the task key.
|
||||
taskKey := base.TaskKey(tc.msg.Queue, tc.msg.ID.String())
|
||||
encoded := r.client.HGet(taskKey, "msg").Val() // "msg" field
|
||||
encoded := r.client.HGet(ctx, taskKey, "msg").Val() // "msg" field
|
||||
decoded := h.MustUnmarshal(t, encoded)
|
||||
if diff := cmp.Diff(tc.msg, decoded); diff != "" {
|
||||
t.Errorf("persisted message was %v, want %v; (-want, +got)\n%s",
|
||||
decoded, tc.msg, diff)
|
||||
}
|
||||
state := r.client.HGet(taskKey, "state").Val() // "state" field
|
||||
state := r.client.HGet(ctx, taskKey, "state").Val() // "state" field
|
||||
if want := "scheduled"; state != want {
|
||||
t.Errorf("state field under task-key is set to %q, want %q",
|
||||
state, want)
|
||||
}
|
||||
timeout := r.client.HGet(taskKey, "timeout").Val() // "timeout" field
|
||||
timeout := r.client.HGet(ctx, taskKey, "timeout").Val() // "timeout" field
|
||||
if want := strconv.Itoa(int(tc.msg.Timeout)); timeout != want {
|
||||
t.Errorf("timeout field under task-key is set to %v, want %v", timeout, want)
|
||||
}
|
||||
deadline := r.client.HGet(taskKey, "deadline").Val() // "deadline" field
|
||||
deadline := r.client.HGet(ctx, taskKey, "deadline").Val() // "deadline" field
|
||||
if want := strconv.Itoa(int(tc.msg.Deadline)); deadline != want {
|
||||
t.Errorf("deadline field under task-key is set to %v, want %v", deadline, want)
|
||||
}
|
||||
uniqueKey := r.client.HGet(taskKey, "unique_key").Val() // "unique_key" field
|
||||
uniqueKey := r.client.HGet(ctx, taskKey, "unique_key").Val() // "unique_key" field
|
||||
if uniqueKey != tc.msg.UniqueKey {
|
||||
t.Errorf("uniqueue_key field under task key is set to %q, want %q", uniqueKey, tc.msg.UniqueKey)
|
||||
}
|
||||
|
||||
// Check queue is in the AllQueues set.
|
||||
if !r.client.SIsMember(base.AllQueues, tc.msg.Queue).Val() {
|
||||
if !r.client.SIsMember(ctx, base.AllQueues, tc.msg.Queue).Val() {
|
||||
t.Errorf("%q is not a member of SET %q", tc.msg.Queue, base.AllQueues)
|
||||
}
|
||||
|
||||
// Enqueue the second message, should fail.
|
||||
got := r.ScheduleUnique(tc.msg, tc.processAt, tc.ttl)
|
||||
got := r.ScheduleUnique(ctx, tc.msg, tc.processAt, tc.ttl)
|
||||
if !errors.Is(got, errors.ErrDuplicateTask) {
|
||||
t.Errorf("Second task: %s = %v, want %v", desc, got, errors.ErrDuplicateTask)
|
||||
continue
|
||||
}
|
||||
|
||||
gotTTL := r.client.TTL(tc.msg.UniqueKey).Val()
|
||||
gotTTL := r.client.TTL(ctx, tc.msg.UniqueKey).Val()
|
||||
if !cmp.Equal(tc.ttl.Seconds(), gotTTL.Seconds(), cmpopts.EquateApprox(0, 1)) {
|
||||
t.Errorf("TTL %q = %v, want %v", tc.msg.UniqueKey, gotTTL, tc.ttl)
|
||||
continue
|
||||
@ -1158,7 +1177,7 @@ func TestRetry(t *testing.T) {
|
||||
h.SeedAllRetryQueues(t, r.client, tc.retry)
|
||||
|
||||
callTime := time.Now() // time when method was called
|
||||
err := r.Retry(tc.msg, tc.processAt, tc.errMsg)
|
||||
err := r.Retry(ctx, tc.msg, tc.processAt, tc.errMsg)
|
||||
if err != nil {
|
||||
t.Errorf("(*RDB).Retry = %v, want nil", err)
|
||||
continue
|
||||
@ -1189,21 +1208,21 @@ func TestRetry(t *testing.T) {
|
||||
}
|
||||
|
||||
processedKey := base.ProcessedKey(tc.msg.Queue, time.Now())
|
||||
gotProcessed := r.client.Get(processedKey).Val()
|
||||
gotProcessed := r.client.Get(ctx, processedKey).Val()
|
||||
if gotProcessed != "1" {
|
||||
t.Errorf("GET %q = %q, want 1", processedKey, gotProcessed)
|
||||
}
|
||||
gotTTL := r.client.TTL(processedKey).Val()
|
||||
gotTTL := r.client.TTL(ctx, processedKey).Val()
|
||||
if gotTTL > statsTTL {
|
||||
t.Errorf("TTL %q = %v, want less than or equal to %v", processedKey, gotTTL, statsTTL)
|
||||
}
|
||||
|
||||
failedKey := base.FailedKey(tc.msg.Queue, time.Now())
|
||||
gotFailed := r.client.Get(failedKey).Val()
|
||||
gotFailed := r.client.Get(ctx, failedKey).Val()
|
||||
if gotFailed != "1" {
|
||||
t.Errorf("GET %q = %q, want 1", failedKey, gotFailed)
|
||||
}
|
||||
gotTTL = r.client.TTL(failedKey).Val()
|
||||
gotTTL = r.client.TTL(ctx, failedKey).Val()
|
||||
if gotTTL > statsTTL {
|
||||
t.Errorf("TTL %q = %v, want less than or equal to %v", failedKey, gotTTL, statsTTL)
|
||||
}
|
||||
@ -1373,7 +1392,7 @@ func TestArchive(t *testing.T) {
|
||||
h.SeedAllArchivedQueues(t, r.client, tc.archived)
|
||||
|
||||
callTime := time.Now() // record time `Archive` was called
|
||||
err := r.Archive(tc.target, errMsg)
|
||||
err := r.Archive(ctx, tc.target, errMsg)
|
||||
if err != nil {
|
||||
t.Errorf("(*RDB).Archive(%v, %v) = %v, want nil", tc.target, errMsg, err)
|
||||
continue
|
||||
@ -1399,21 +1418,21 @@ func TestArchive(t *testing.T) {
|
||||
}
|
||||
|
||||
processedKey := base.ProcessedKey(tc.target.Queue, time.Now())
|
||||
gotProcessed := r.client.Get(processedKey).Val()
|
||||
gotProcessed := r.client.Get(ctx, processedKey).Val()
|
||||
if gotProcessed != "1" {
|
||||
t.Errorf("GET %q = %q, want 1", processedKey, gotProcessed)
|
||||
}
|
||||
gotTTL := r.client.TTL(processedKey).Val()
|
||||
gotTTL := r.client.TTL(ctx, processedKey).Val()
|
||||
if gotTTL > statsTTL {
|
||||
t.Errorf("TTL %q = %v, want less than or equal to %v", processedKey, gotTTL, statsTTL)
|
||||
}
|
||||
|
||||
failedKey := base.FailedKey(tc.target.Queue, time.Now())
|
||||
gotFailed := r.client.Get(failedKey).Val()
|
||||
gotFailed := r.client.Get(ctx, failedKey).Val()
|
||||
if gotFailed != "1" {
|
||||
t.Errorf("GET %q = %q, want 1", failedKey, gotFailed)
|
||||
}
|
||||
gotTTL = r.client.TTL(processedKey).Val()
|
||||
gotTTL = r.client.TTL(ctx, processedKey).Val()
|
||||
if gotTTL > statsTTL {
|
||||
t.Errorf("TTL %q = %v, want less than or equal to %v", failedKey, gotTTL, statsTTL)
|
||||
}
|
||||
@ -1537,7 +1556,7 @@ func TestForwardIfReady(t *testing.T) {
|
||||
h.SeedAllScheduledQueues(t, r.client, tc.scheduled)
|
||||
h.SeedAllRetryQueues(t, r.client, tc.retry)
|
||||
|
||||
err := r.ForwardIfReady(tc.qnames...)
|
||||
err := r.ForwardIfReady(ctx, tc.qnames...)
|
||||
if err != nil {
|
||||
t.Errorf("(*RDB).CheckScheduled(%v) = %v, want nil", tc.qnames, err)
|
||||
continue
|
||||
@ -1639,7 +1658,7 @@ func TestListDeadlineExceeded(t *testing.T) {
|
||||
h.FlushDB(t, r.client)
|
||||
h.SeedAllDeadlines(t, r.client, tc.deadlines)
|
||||
|
||||
got, err := r.ListDeadlineExceeded(tc.t, tc.qnames...)
|
||||
got, err := r.ListDeadlineExceeded(ctx, tc.t, tc.qnames...)
|
||||
if err != nil {
|
||||
t.Errorf("%s; ListDeadlineExceeded(%v) returned error: %v", tc.desc, tc.t, err)
|
||||
continue
|
||||
@ -1676,14 +1695,14 @@ func TestWriteServerState(t *testing.T) {
|
||||
ActiveWorkerCount: 0,
|
||||
}
|
||||
|
||||
err := r.WriteServerState(&info, nil /* workers */, ttl)
|
||||
err := r.WriteServerState(ctx, &info, nil /* workers */, ttl)
|
||||
if err != nil {
|
||||
t.Errorf("r.WriteServerState returned an error: %v", err)
|
||||
}
|
||||
|
||||
// Check ServerInfo was written correctly.
|
||||
skey := base.ServerInfoKey(host, pid, serverID)
|
||||
data := r.client.Get(skey).Val()
|
||||
data := r.client.Get(ctx, skey).Val()
|
||||
got, err := base.DecodeServerInfo([]byte(data))
|
||||
if err != nil {
|
||||
t.Fatalf("could not decode server info: %v", err)
|
||||
@ -1693,12 +1712,12 @@ func TestWriteServerState(t *testing.T) {
|
||||
got, info, diff)
|
||||
}
|
||||
// Check ServerInfo TTL was set correctly.
|
||||
gotTTL := r.client.TTL(skey).Val()
|
||||
gotTTL := r.client.TTL(ctx, skey).Val()
|
||||
if !cmp.Equal(ttl.Seconds(), gotTTL.Seconds(), cmpopts.EquateApprox(0, 1)) {
|
||||
t.Errorf("TTL of %q was %v, want %v", skey, gotTTL, ttl)
|
||||
}
|
||||
// Check ServerInfo key was added to the set all server keys correctly.
|
||||
gotServerKeys := r.client.ZRange(base.AllServers, 0, -1).Val()
|
||||
gotServerKeys := r.client.ZRange(ctx, base.AllServers, 0, -1).Val()
|
||||
wantServerKeys := []string{skey}
|
||||
if diff := cmp.Diff(wantServerKeys, gotServerKeys); diff != "" {
|
||||
t.Errorf("%q contained %v, want %v", base.AllServers, gotServerKeys, wantServerKeys)
|
||||
@ -1706,12 +1725,12 @@ func TestWriteServerState(t *testing.T) {
|
||||
|
||||
// Check WorkersInfo was written correctly.
|
||||
wkey := base.WorkersKey(host, pid, serverID)
|
||||
workerExist := r.client.Exists(wkey).Val()
|
||||
workerExist := r.client.Exists(ctx, wkey).Val()
|
||||
if workerExist != 0 {
|
||||
t.Errorf("%q key exists", wkey)
|
||||
}
|
||||
// Check WorkersInfo key was added to the set correctly.
|
||||
gotWorkerKeys := r.client.ZRange(base.AllWorkers, 0, -1).Val()
|
||||
gotWorkerKeys := r.client.ZRange(ctx, base.AllWorkers, 0, -1).Val()
|
||||
wantWorkerKeys := []string{wkey}
|
||||
if diff := cmp.Diff(wantWorkerKeys, gotWorkerKeys); diff != "" {
|
||||
t.Errorf("%q contained %v, want %v", base.AllWorkers, gotWorkerKeys, wantWorkerKeys)
|
||||
@ -1766,14 +1785,14 @@ func TestWriteServerStateWithWorkers(t *testing.T) {
|
||||
ActiveWorkerCount: len(workers),
|
||||
}
|
||||
|
||||
err := r.WriteServerState(&serverInfo, workers, ttl)
|
||||
err := r.WriteServerState(ctx, &serverInfo, workers, ttl)
|
||||
if err != nil {
|
||||
t.Fatalf("r.WriteServerState returned an error: %v", err)
|
||||
}
|
||||
|
||||
// Check ServerInfo was written correctly.
|
||||
skey := base.ServerInfoKey(host, pid, serverID)
|
||||
data := r.client.Get(skey).Val()
|
||||
data := r.client.Get(ctx, skey).Val()
|
||||
got, err := base.DecodeServerInfo([]byte(data))
|
||||
if err != nil {
|
||||
t.Fatalf("could not decode server info: %v", err)
|
||||
@ -1783,12 +1802,12 @@ func TestWriteServerStateWithWorkers(t *testing.T) {
|
||||
got, serverInfo, diff)
|
||||
}
|
||||
// Check ServerInfo TTL was set correctly.
|
||||
gotTTL := r.client.TTL(skey).Val()
|
||||
gotTTL := r.client.TTL(ctx, skey).Val()
|
||||
if !cmp.Equal(ttl.Seconds(), gotTTL.Seconds(), cmpopts.EquateApprox(0, 1)) {
|
||||
t.Errorf("TTL of %q was %v, want %v", skey, gotTTL, ttl)
|
||||
}
|
||||
// Check ServerInfo key was added to the set correctly.
|
||||
gotServerKeys := r.client.ZRange(base.AllServers, 0, -1).Val()
|
||||
gotServerKeys := r.client.ZRange(ctx, base.AllServers, 0, -1).Val()
|
||||
wantServerKeys := []string{skey}
|
||||
if diff := cmp.Diff(wantServerKeys, gotServerKeys); diff != "" {
|
||||
t.Errorf("%q contained %v, want %v", base.AllServers, gotServerKeys, wantServerKeys)
|
||||
@ -1796,7 +1815,7 @@ func TestWriteServerStateWithWorkers(t *testing.T) {
|
||||
|
||||
// Check WorkersInfo was written correctly.
|
||||
wkey := base.WorkersKey(host, pid, serverID)
|
||||
wdata := r.client.HGetAll(wkey).Val()
|
||||
wdata := r.client.HGetAll(ctx, wkey).Val()
|
||||
if len(wdata) != 2 {
|
||||
t.Fatalf("HGETALL %q returned a hash of size %d, want 2", wkey, len(wdata))
|
||||
}
|
||||
@ -1814,12 +1833,12 @@ func TestWriteServerStateWithWorkers(t *testing.T) {
|
||||
}
|
||||
|
||||
// Check WorkersInfo TTL was set correctly.
|
||||
gotTTL = r.client.TTL(wkey).Val()
|
||||
gotTTL = r.client.TTL(ctx, wkey).Val()
|
||||
if !cmp.Equal(ttl.Seconds(), gotTTL.Seconds(), cmpopts.EquateApprox(0, 1)) {
|
||||
t.Errorf("TTL of %q was %v, want %v", wkey, gotTTL, ttl)
|
||||
}
|
||||
// Check WorkersInfo key was added to the set correctly.
|
||||
gotWorkerKeys := r.client.ZRange(base.AllWorkers, 0, -1).Val()
|
||||
gotWorkerKeys := r.client.ZRange(ctx, base.AllWorkers, 0, -1).Val()
|
||||
wantWorkerKeys := []string{wkey}
|
||||
if diff := cmp.Diff(wantWorkerKeys, gotWorkerKeys); diff != "" {
|
||||
t.Errorf("%q contained %v, want %v", base.AllWorkers, gotWorkerKeys, wantWorkerKeys)
|
||||
@ -1892,14 +1911,14 @@ func TestClearServerState(t *testing.T) {
|
||||
}
|
||||
|
||||
// Write server and workers data.
|
||||
if err := r.WriteServerState(&serverInfo1, workers1, ttl); err != nil {
|
||||
if err := r.WriteServerState(ctx, &serverInfo1, workers1, ttl); err != nil {
|
||||
t.Fatalf("could not write server state: %v", err)
|
||||
}
|
||||
if err := r.WriteServerState(&serverInfo2, workers2, ttl); err != nil {
|
||||
if err := r.WriteServerState(ctx, &serverInfo2, workers2, ttl); err != nil {
|
||||
t.Fatalf("could not write server state: %v", err)
|
||||
}
|
||||
|
||||
err := r.ClearServerState(host, pid, serverID)
|
||||
err := r.ClearServerState(ctx, host, pid, serverID)
|
||||
if err != nil {
|
||||
t.Fatalf("(*RDB).ClearServerState failed: %v", err)
|
||||
}
|
||||
@ -1909,18 +1928,18 @@ func TestClearServerState(t *testing.T) {
|
||||
otherSKey := base.ServerInfoKey(otherHost, otherPID, otherServerID)
|
||||
otherWKey := base.WorkersKey(otherHost, otherPID, otherServerID)
|
||||
// Check all keys are cleared.
|
||||
if r.client.Exists(skey).Val() != 0 {
|
||||
if r.client.Exists(ctx, skey).Val() != 0 {
|
||||
t.Errorf("Redis key %q exists", skey)
|
||||
}
|
||||
if r.client.Exists(wkey).Val() != 0 {
|
||||
if r.client.Exists(ctx, wkey).Val() != 0 {
|
||||
t.Errorf("Redis key %q exists", wkey)
|
||||
}
|
||||
gotServerKeys := r.client.ZRange(base.AllServers, 0, -1).Val()
|
||||
gotServerKeys := r.client.ZRange(ctx, base.AllServers, 0, -1).Val()
|
||||
wantServerKeys := []string{otherSKey}
|
||||
if diff := cmp.Diff(wantServerKeys, gotServerKeys); diff != "" {
|
||||
t.Errorf("%q contained %v, want %v", base.AllServers, gotServerKeys, wantServerKeys)
|
||||
}
|
||||
gotWorkerKeys := r.client.ZRange(base.AllWorkers, 0, -1).Val()
|
||||
gotWorkerKeys := r.client.ZRange(ctx, base.AllWorkers, 0, -1).Val()
|
||||
wantWorkerKeys := []string{otherWKey}
|
||||
if diff := cmp.Diff(wantWorkerKeys, gotWorkerKeys); diff != "" {
|
||||
t.Errorf("%q contained %v, want %v", base.AllWorkers, gotWorkerKeys, wantWorkerKeys)
|
||||
@ -1931,7 +1950,7 @@ func TestCancelationPubSub(t *testing.T) {
|
||||
r := setup(t)
|
||||
defer r.Close()
|
||||
|
||||
pubsub, err := r.CancelationPubSub()
|
||||
pubsub, err := r.CancelationPubSub(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("(*RDB).CancelationPubSub() returned an error: %v", err)
|
||||
}
|
||||
@ -1954,7 +1973,7 @@ func TestCancelationPubSub(t *testing.T) {
|
||||
publish := []string{"one", "two", "three"}
|
||||
|
||||
for _, msg := range publish {
|
||||
r.PublishCancelation(msg)
|
||||
r.PublishCancelation(ctx, msg)
|
||||
}
|
||||
|
||||
// allow for message to reach subscribers.
|
||||
|
@ -6,11 +6,12 @@
|
||||
package testbroker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-redis/redis/v7"
|
||||
"github.com/go-redis/redis/v8"
|
||||
"github.com/hibiken/asynq/internal/base"
|
||||
)
|
||||
|
||||
@ -45,148 +46,148 @@ func (tb *TestBroker) Wakeup() {
|
||||
tb.sleeping = false
|
||||
}
|
||||
|
||||
func (tb *TestBroker) Enqueue(msg *base.TaskMessage) error {
|
||||
func (tb *TestBroker) Enqueue(ctx context.Context, msg *base.TaskMessage) error {
|
||||
tb.mu.Lock()
|
||||
defer tb.mu.Unlock()
|
||||
if tb.sleeping {
|
||||
return errRedisDown
|
||||
}
|
||||
return tb.real.Enqueue(msg)
|
||||
return tb.real.Enqueue(ctx, msg)
|
||||
}
|
||||
|
||||
func (tb *TestBroker) EnqueueUnique(msg *base.TaskMessage, ttl time.Duration) error {
|
||||
func (tb *TestBroker) EnqueueUnique(ctx context.Context, msg *base.TaskMessage, ttl time.Duration) error {
|
||||
tb.mu.Lock()
|
||||
defer tb.mu.Unlock()
|
||||
if tb.sleeping {
|
||||
return errRedisDown
|
||||
}
|
||||
return tb.real.EnqueueUnique(msg, ttl)
|
||||
return tb.real.EnqueueUnique(ctx, msg, ttl)
|
||||
}
|
||||
|
||||
func (tb *TestBroker) Dequeue(qnames ...string) (*base.TaskMessage, time.Time, error) {
|
||||
func (tb *TestBroker) Dequeue(ctx context.Context, qnames ...string) (*base.TaskMessage, time.Time, error) {
|
||||
tb.mu.Lock()
|
||||
defer tb.mu.Unlock()
|
||||
if tb.sleeping {
|
||||
return nil, time.Time{}, errRedisDown
|
||||
}
|
||||
return tb.real.Dequeue(qnames...)
|
||||
return tb.real.Dequeue(ctx, qnames...)
|
||||
}
|
||||
|
||||
func (tb *TestBroker) Done(msg *base.TaskMessage) error {
|
||||
func (tb *TestBroker) Done(ctx context.Context, msg *base.TaskMessage) error {
|
||||
tb.mu.Lock()
|
||||
defer tb.mu.Unlock()
|
||||
if tb.sleeping {
|
||||
return errRedisDown
|
||||
}
|
||||
return tb.real.Done(msg)
|
||||
return tb.real.Done(ctx, msg)
|
||||
}
|
||||
|
||||
func (tb *TestBroker) Requeue(msg *base.TaskMessage) error {
|
||||
func (tb *TestBroker) Requeue(ctx context.Context, msg *base.TaskMessage) error {
|
||||
tb.mu.Lock()
|
||||
defer tb.mu.Unlock()
|
||||
if tb.sleeping {
|
||||
return errRedisDown
|
||||
}
|
||||
return tb.real.Requeue(msg)
|
||||
return tb.real.Requeue(ctx, msg)
|
||||
}
|
||||
|
||||
func (tb *TestBroker) Schedule(msg *base.TaskMessage, processAt time.Time) error {
|
||||
func (tb *TestBroker) Schedule(ctx context.Context, msg *base.TaskMessage, processAt time.Time) error {
|
||||
tb.mu.Lock()
|
||||
defer tb.mu.Unlock()
|
||||
if tb.sleeping {
|
||||
return errRedisDown
|
||||
}
|
||||
return tb.real.Schedule(msg, processAt)
|
||||
return tb.real.Schedule(ctx, msg, processAt)
|
||||
}
|
||||
|
||||
func (tb *TestBroker) ScheduleUnique(msg *base.TaskMessage, processAt time.Time, ttl time.Duration) error {
|
||||
func (tb *TestBroker) ScheduleUnique(ctx context.Context, msg *base.TaskMessage, processAt time.Time, ttl time.Duration) error {
|
||||
tb.mu.Lock()
|
||||
defer tb.mu.Unlock()
|
||||
if tb.sleeping {
|
||||
return errRedisDown
|
||||
}
|
||||
return tb.real.ScheduleUnique(msg, processAt, ttl)
|
||||
return tb.real.ScheduleUnique(ctx, msg, processAt, ttl)
|
||||
}
|
||||
|
||||
func (tb *TestBroker) Retry(msg *base.TaskMessage, processAt time.Time, errMsg string) error {
|
||||
func (tb *TestBroker) Retry(ctx context.Context, msg *base.TaskMessage, processAt time.Time, errMsg string) error {
|
||||
tb.mu.Lock()
|
||||
defer tb.mu.Unlock()
|
||||
if tb.sleeping {
|
||||
return errRedisDown
|
||||
}
|
||||
return tb.real.Retry(msg, processAt, errMsg)
|
||||
return tb.real.Retry(ctx, msg, processAt, errMsg)
|
||||
}
|
||||
|
||||
func (tb *TestBroker) Archive(msg *base.TaskMessage, errMsg string) error {
|
||||
func (tb *TestBroker) Archive(ctx context.Context, msg *base.TaskMessage, errMsg string) error {
|
||||
tb.mu.Lock()
|
||||
defer tb.mu.Unlock()
|
||||
if tb.sleeping {
|
||||
return errRedisDown
|
||||
}
|
||||
return tb.real.Archive(msg, errMsg)
|
||||
return tb.real.Archive(ctx, msg, errMsg)
|
||||
}
|
||||
|
||||
func (tb *TestBroker) ForwardIfReady(qnames ...string) error {
|
||||
func (tb *TestBroker) ForwardIfReady(ctx context.Context, qnames ...string) error {
|
||||
tb.mu.Lock()
|
||||
defer tb.mu.Unlock()
|
||||
if tb.sleeping {
|
||||
return errRedisDown
|
||||
}
|
||||
return tb.real.ForwardIfReady(qnames...)
|
||||
return tb.real.ForwardIfReady(ctx, qnames...)
|
||||
}
|
||||
|
||||
func (tb *TestBroker) ListDeadlineExceeded(deadline time.Time, qnames ...string) ([]*base.TaskMessage, error) {
|
||||
func (tb *TestBroker) ListDeadlineExceeded(ctx context.Context, deadline time.Time, qnames ...string) ([]*base.TaskMessage, error) {
|
||||
tb.mu.Lock()
|
||||
defer tb.mu.Unlock()
|
||||
if tb.sleeping {
|
||||
return nil, errRedisDown
|
||||
}
|
||||
return tb.real.ListDeadlineExceeded(deadline, qnames...)
|
||||
return tb.real.ListDeadlineExceeded(ctx, deadline, qnames...)
|
||||
}
|
||||
|
||||
func (tb *TestBroker) WriteServerState(info *base.ServerInfo, workers []*base.WorkerInfo, ttl time.Duration) error {
|
||||
func (tb *TestBroker) WriteServerState(ctx context.Context, info *base.ServerInfo, workers []*base.WorkerInfo, ttl time.Duration) error {
|
||||
tb.mu.Lock()
|
||||
defer tb.mu.Unlock()
|
||||
if tb.sleeping {
|
||||
return errRedisDown
|
||||
}
|
||||
return tb.real.WriteServerState(info, workers, ttl)
|
||||
return tb.real.WriteServerState(ctx, info, workers, ttl)
|
||||
}
|
||||
|
||||
func (tb *TestBroker) ClearServerState(host string, pid int, serverID string) error {
|
||||
func (tb *TestBroker) ClearServerState(ctx context.Context, host string, pid int, serverID string) error {
|
||||
tb.mu.Lock()
|
||||
defer tb.mu.Unlock()
|
||||
if tb.sleeping {
|
||||
return errRedisDown
|
||||
}
|
||||
return tb.real.ClearServerState(host, pid, serverID)
|
||||
return tb.real.ClearServerState(ctx, host, pid, serverID)
|
||||
}
|
||||
|
||||
func (tb *TestBroker) CancelationPubSub() (*redis.PubSub, error) {
|
||||
func (tb *TestBroker) CancelationPubSub(ctx context.Context) (*redis.PubSub, error) {
|
||||
tb.mu.Lock()
|
||||
defer tb.mu.Unlock()
|
||||
if tb.sleeping {
|
||||
return nil, errRedisDown
|
||||
}
|
||||
return tb.real.CancelationPubSub()
|
||||
return tb.real.CancelationPubSub(ctx)
|
||||
}
|
||||
|
||||
func (tb *TestBroker) PublishCancelation(id string) error {
|
||||
func (tb *TestBroker) PublishCancelation(ctx context.Context, id string) error {
|
||||
tb.mu.Lock()
|
||||
defer tb.mu.Unlock()
|
||||
if tb.sleeping {
|
||||
return errRedisDown
|
||||
}
|
||||
return tb.real.PublishCancelation(id)
|
||||
return tb.real.PublishCancelation(ctx, id)
|
||||
}
|
||||
|
||||
func (tb *TestBroker) Ping() error {
|
||||
func (tb *TestBroker) Ping(ctx context.Context) error {
|
||||
tb.mu.Lock()
|
||||
defer tb.mu.Unlock()
|
||||
if tb.sleeping {
|
||||
return errRedisDown
|
||||
}
|
||||
return tb.real.Ping()
|
||||
return tb.real.Ping(ctx)
|
||||
}
|
||||
|
||||
func (tb *TestBroker) Close() error {
|
||||
|
20
processor.go
20
processor.go
@ -160,7 +160,7 @@ func (p *processor) exec() {
|
||||
return
|
||||
case p.sema <- struct{}{}: // acquire token
|
||||
qnames := p.queues()
|
||||
msg, deadline, err := p.broker.Dequeue(qnames...)
|
||||
msg, deadline, err := p.broker.Dequeue(context.Background(), qnames...)
|
||||
switch {
|
||||
case errors.Is(err, errors.ErrNoProcessableTask):
|
||||
p.logger.Debug("All queues are empty")
|
||||
@ -211,7 +211,7 @@ func (p *processor) exec() {
|
||||
case <-p.abort:
|
||||
// time is up, push the message back to queue and quit this worker goroutine.
|
||||
p.logger.Warnf("Quitting worker. task id=%s", msg.ID)
|
||||
p.requeue(msg)
|
||||
p.requeue(ctx, msg)
|
||||
return
|
||||
case <-ctx.Done():
|
||||
p.retryOrKill(ctx, msg, ctx.Err())
|
||||
@ -231,8 +231,8 @@ func (p *processor) exec() {
|
||||
}
|
||||
}
|
||||
|
||||
func (p *processor) requeue(msg *base.TaskMessage) {
|
||||
err := p.broker.Requeue(msg)
|
||||
func (p *processor) requeue(ctx context.Context, msg *base.TaskMessage) {
|
||||
err := p.broker.Requeue(ctx, msg)
|
||||
if err != nil {
|
||||
p.logger.Errorf("Could not push task id=%s back to queue: %v", msg.ID, err)
|
||||
} else {
|
||||
@ -241,7 +241,7 @@ func (p *processor) requeue(msg *base.TaskMessage) {
|
||||
}
|
||||
|
||||
func (p *processor) markAsDone(ctx context.Context, msg *base.TaskMessage) {
|
||||
err := p.broker.Done(msg)
|
||||
err := p.broker.Done(ctx, msg)
|
||||
if err != nil {
|
||||
errMsg := fmt.Sprintf("Could not remove task id=%s type=%q from %q err: %+v", msg.ID, msg.Type, base.ActiveKey(msg.Queue), err)
|
||||
deadline, ok := ctx.Deadline()
|
||||
@ -251,7 +251,7 @@ func (p *processor) markAsDone(ctx context.Context, msg *base.TaskMessage) {
|
||||
p.logger.Warnf("%s; Will retry syncing", errMsg)
|
||||
p.syncRequestCh <- &syncRequest{
|
||||
fn: func() error {
|
||||
return p.broker.Done(msg)
|
||||
return p.broker.Done(ctx, msg)
|
||||
},
|
||||
errMsg: errMsg,
|
||||
deadline: deadline,
|
||||
@ -278,7 +278,7 @@ func (p *processor) retryOrKill(ctx context.Context, msg *base.TaskMessage, err
|
||||
func (p *processor) retry(ctx context.Context, msg *base.TaskMessage, e error) {
|
||||
d := p.retryDelayFunc(msg.Retried, e, NewTask(msg.Type, msg.Payload))
|
||||
retryAt := time.Now().Add(d)
|
||||
err := p.broker.Retry(msg, retryAt, e.Error())
|
||||
err := p.broker.Retry(ctx, msg, retryAt, e.Error())
|
||||
if err != nil {
|
||||
errMsg := fmt.Sprintf("Could not move task id=%s from %q to %q", msg.ID, base.ActiveKey(msg.Queue), base.RetryKey(msg.Queue))
|
||||
deadline, ok := ctx.Deadline()
|
||||
@ -288,7 +288,7 @@ func (p *processor) retry(ctx context.Context, msg *base.TaskMessage, e error) {
|
||||
p.logger.Warnf("%s; Will retry syncing", errMsg)
|
||||
p.syncRequestCh <- &syncRequest{
|
||||
fn: func() error {
|
||||
return p.broker.Retry(msg, retryAt, e.Error())
|
||||
return p.broker.Retry(ctx, msg, retryAt, e.Error())
|
||||
},
|
||||
errMsg: errMsg,
|
||||
deadline: deadline,
|
||||
@ -297,7 +297,7 @@ func (p *processor) retry(ctx context.Context, msg *base.TaskMessage, e error) {
|
||||
}
|
||||
|
||||
func (p *processor) archive(ctx context.Context, msg *base.TaskMessage, e error) {
|
||||
err := p.broker.Archive(msg, e.Error())
|
||||
err := p.broker.Archive(ctx, msg, e.Error())
|
||||
if err != nil {
|
||||
errMsg := fmt.Sprintf("Could not move task id=%s from %q to %q", msg.ID, base.ActiveKey(msg.Queue), base.ArchivedKey(msg.Queue))
|
||||
deadline, ok := ctx.Deadline()
|
||||
@ -307,7 +307,7 @@ func (p *processor) archive(ctx context.Context, msg *base.TaskMessage, e error)
|
||||
p.logger.Warnf("%s; Will retry syncing", errMsg)
|
||||
p.syncRequestCh <- &syncRequest{
|
||||
fn: func() error {
|
||||
return p.broker.Archive(msg, e.Error())
|
||||
return p.broker.Archive(ctx, msg, e.Error())
|
||||
},
|
||||
errMsg: errMsg,
|
||||
deadline: deadline,
|
||||
|
@ -112,14 +112,14 @@ func TestProcessorSuccessWithSingleQueue(t *testing.T) {
|
||||
|
||||
p.start(&sync.WaitGroup{})
|
||||
for _, msg := range tc.incoming {
|
||||
err := rdbClient.Enqueue(msg)
|
||||
err := rdbClient.Enqueue(ctx, msg)
|
||||
if err != nil {
|
||||
p.shutdown()
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
time.Sleep(2 * time.Second) // wait for two second to allow all pending tasks to be processed.
|
||||
if l := r.LLen(base.ActiveKey(base.DefaultQueueName)).Val(); l != 0 {
|
||||
if l := r.LLen(ctx, base.ActiveKey(base.DefaultQueueName)).Val(); l != 0 {
|
||||
t.Errorf("%q has %d tasks, want 0", base.ActiveKey(base.DefaultQueueName), l)
|
||||
}
|
||||
p.shutdown()
|
||||
@ -211,7 +211,7 @@ func TestProcessorSuccessWithMultipleQueues(t *testing.T) {
|
||||
time.Sleep(2 * time.Second)
|
||||
// Make sure no messages are stuck in active list.
|
||||
for _, qname := range tc.queues {
|
||||
if l := r.LLen(base.ActiveKey(qname)).Val(); l != 0 {
|
||||
if l := r.LLen(ctx, base.ActiveKey(qname)).Val(); l != 0 {
|
||||
t.Errorf("%q has %d tasks, want 0", base.ActiveKey(qname), l)
|
||||
}
|
||||
}
|
||||
@ -290,7 +290,7 @@ func TestProcessTasksWithLargeNumberInPayload(t *testing.T) {
|
||||
|
||||
p.start(&sync.WaitGroup{})
|
||||
time.Sleep(2 * time.Second) // wait for two second to allow all pending tasks to be processed.
|
||||
if l := r.LLen(base.ActiveKey(base.DefaultQueueName)).Val(); l != 0 {
|
||||
if l := r.LLen(ctx, base.ActiveKey(base.DefaultQueueName)).Val(); l != 0 {
|
||||
t.Errorf("%q has %d tasks, want 0", base.ActiveKey(base.DefaultQueueName), l)
|
||||
}
|
||||
p.shutdown()
|
||||
@ -439,7 +439,7 @@ func TestProcessorRetry(t *testing.T) {
|
||||
t.Errorf("%s: mismatch found in %q after running processor; (-want, +got)\n%s", tc.desc, base.ArchivedKey(base.DefaultQueueName), diff)
|
||||
}
|
||||
|
||||
if l := r.LLen(base.ActiveKey(base.DefaultQueueName)).Val(); l != 0 {
|
||||
if l := r.LLen(ctx, base.ActiveKey(base.DefaultQueueName)).Val(); l != 0 {
|
||||
t.Errorf("%s: %q has %d tasks, want 0", base.ActiveKey(base.DefaultQueueName), tc.desc, l)
|
||||
}
|
||||
|
||||
@ -593,7 +593,7 @@ func TestProcessorWithStrictPriority(t *testing.T) {
|
||||
time.Sleep(tc.wait)
|
||||
// Make sure no tasks are stuck in active list.
|
||||
for _, qname := range tc.queues {
|
||||
if l := r.LLen(base.ActiveKey(qname)).Val(); l != 0 {
|
||||
if l := r.LLen(ctx, base.ActiveKey(qname)).Val(); l != 0 {
|
||||
t.Errorf("%q has %d tasks, want 0", base.ActiveKey(qname), l)
|
||||
}
|
||||
}
|
||||
|
@ -5,6 +5,7 @@
|
||||
package asynq
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
@ -76,7 +77,7 @@ func (r *recoverer) start(wg *sync.WaitGroup) {
|
||||
func (r *recoverer) recover() {
|
||||
// Get all tasks which have expired 30 seconds ago or earlier.
|
||||
deadline := time.Now().Add(-30 * time.Second)
|
||||
msgs, err := r.broker.ListDeadlineExceeded(deadline, r.queues...)
|
||||
msgs, err := r.broker.ListDeadlineExceeded(context.Background(), deadline, r.queues...)
|
||||
if err != nil {
|
||||
r.logger.Warn("recoverer: could not list deadline exceeded tasks")
|
||||
return
|
||||
@ -94,13 +95,13 @@ func (r *recoverer) recover() {
|
||||
func (r *recoverer) retry(msg *base.TaskMessage, errMsg string) {
|
||||
delay := r.retryDelayFunc(msg.Retried, fmt.Errorf(errMsg), NewTask(msg.Type, msg.Payload))
|
||||
retryAt := time.Now().Add(delay)
|
||||
if err := r.broker.Retry(msg, retryAt, errMsg); err != nil {
|
||||
if err := r.broker.Retry(context.Background(), msg, retryAt, errMsg); err != nil {
|
||||
r.logger.Warnf("recoverer: could not retry deadline exceeded task: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *recoverer) archive(msg *base.TaskMessage, errMsg string) {
|
||||
if err := r.broker.Archive(msg, errMsg); err != nil {
|
||||
if err := r.broker.Archive(context.Background(), msg, errMsg); err != nil {
|
||||
r.logger.Warnf("recoverer: could not move task to archive: %v", err)
|
||||
}
|
||||
}
|
||||
|
19
scheduler.go
19
scheduler.go
@ -5,12 +5,13 @@
|
||||
package asynq
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-redis/redis/v7"
|
||||
"github.com/go-redis/redis/v8"
|
||||
"github.com/google/uuid"
|
||||
"github.com/hibiken/asynq/internal/base"
|
||||
"github.com/hibiken/asynq/internal/log"
|
||||
@ -117,7 +118,7 @@ type enqueueJob struct {
|
||||
}
|
||||
|
||||
func (j *enqueueJob) Run() {
|
||||
info, err := j.client.Enqueue(j.task, j.opts...)
|
||||
info, err := j.client.Enqueue(context.Background(), j.task, j.opts...)
|
||||
if err != nil {
|
||||
j.logger.Errorf("scheduler could not enqueue a task %+v: %v", j.task, err)
|
||||
if j.errHandler != nil {
|
||||
@ -130,7 +131,7 @@ func (j *enqueueJob) Run() {
|
||||
TaskID: info.ID,
|
||||
EnqueuedAt: time.Now().In(j.location),
|
||||
}
|
||||
err = j.rdb.RecordSchedulerEnqueueEvent(j.id.String(), event)
|
||||
err = j.rdb.RecordSchedulerEnqueueEvent(context.Background(), j.id.String(), event)
|
||||
if err != nil {
|
||||
j.logger.Errorf("scheduler could not record enqueue event of enqueued task %+v: %v", j.task, err)
|
||||
}
|
||||
@ -138,7 +139,7 @@ func (j *enqueueJob) Run() {
|
||||
|
||||
// Register registers a task to be enqueued on the given schedule specified by the cronspec.
|
||||
// It returns an ID of the newly registered entry.
|
||||
func (s *Scheduler) Register(cronspec string, task *Task, opts ...Option) (entryID string, err error) {
|
||||
func (s *Scheduler) Register(ctx context.Context, cronspec string, task *Task, opts ...Option) (entryID string, err error) {
|
||||
job := &enqueueJob{
|
||||
id: uuid.New(),
|
||||
cronspec: cronspec,
|
||||
@ -206,7 +207,7 @@ func (s *Scheduler) Shutdown() {
|
||||
<-ctx.Done()
|
||||
s.wg.Wait()
|
||||
|
||||
s.clearHistory()
|
||||
s.clearHistory(ctx)
|
||||
s.client.Close()
|
||||
s.rdb.Close()
|
||||
s.state.Set(base.StateClosed)
|
||||
@ -220,7 +221,7 @@ func (s *Scheduler) runHeartbeater() {
|
||||
select {
|
||||
case <-s.done:
|
||||
s.logger.Debugf("Scheduler heatbeater shutting down")
|
||||
s.rdb.ClearSchedulerEntries(s.id)
|
||||
s.rdb.ClearSchedulerEntries(context.Background(), s.id)
|
||||
return
|
||||
case <-ticker.C:
|
||||
s.beat()
|
||||
@ -245,7 +246,7 @@ func (s *Scheduler) beat() {
|
||||
entries = append(entries, e)
|
||||
}
|
||||
s.logger.Debugf("Writing entries %v", entries)
|
||||
if err := s.rdb.WriteSchedulerEntries(s.id, entries, 5*time.Second); err != nil {
|
||||
if err := s.rdb.WriteSchedulerEntries(context.Background(), s.id, entries, 5*time.Second); err != nil {
|
||||
s.logger.Warnf("Scheduler could not write heartbeat data: %v", err)
|
||||
}
|
||||
}
|
||||
@ -258,10 +259,10 @@ func stringifyOptions(opts []Option) []string {
|
||||
return res
|
||||
}
|
||||
|
||||
func (s *Scheduler) clearHistory() {
|
||||
func (s *Scheduler) clearHistory(ctx context.Context) {
|
||||
for _, entry := range s.cron.Entries() {
|
||||
job := entry.Job.(*enqueueJob)
|
||||
if err := s.rdb.ClearSchedulerHistory(job.id.String()); err != nil {
|
||||
if err := s.rdb.ClearSchedulerHistory(ctx, job.id.String()); err != nil {
|
||||
s.logger.Warnf("Could not clear scheduler history for entry %q: %v", job.id.String(), err)
|
||||
}
|
||||
}
|
||||
|
@ -59,7 +59,7 @@ func TestSchedulerRegister(t *testing.T) {
|
||||
|
||||
for _, tc := range tests {
|
||||
scheduler := NewScheduler(getRedisConnOpt(t), nil)
|
||||
if _, err := scheduler.Register(tc.cronspec, tc.task, tc.opts...); err != nil {
|
||||
if _, err := scheduler.Register(ctx, tc.cronspec, tc.task, tc.opts...); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@ -95,7 +95,7 @@ func TestSchedulerWhenRedisDown(t *testing.T) {
|
||||
|
||||
task := NewTask("test", nil)
|
||||
|
||||
if _, err := scheduler.Register("@every 3s", task); err != nil {
|
||||
if _, err := scheduler.Register(ctx, "@every 3s", task); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@ -134,7 +134,7 @@ func TestSchedulerUnregister(t *testing.T) {
|
||||
|
||||
for _, tc := range tests {
|
||||
scheduler := NewScheduler(getRedisConnOpt(t), nil)
|
||||
entryID, err := scheduler.Register(tc.cronspec, tc.task, tc.opts...)
|
||||
entryID, err := scheduler.Register(ctx, tc.cronspec, tc.task, tc.opts...)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -15,7 +15,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-redis/redis/v7"
|
||||
"github.com/go-redis/redis/v8"
|
||||
"github.com/hibiken/asynq/internal/base"
|
||||
"github.com/hibiken/asynq/internal/log"
|
||||
"github.com/hibiken/asynq/internal/rdb"
|
||||
|
@ -19,7 +19,7 @@ import (
|
||||
|
||||
func TestServer(t *testing.T) {
|
||||
// https://github.com/go-redis/redis/issues/1029
|
||||
ignoreOpt := goleak.IgnoreTopFunction("github.com/go-redis/redis/v7/internal/pool.(*ConnPool).reaper")
|
||||
ignoreOpt := goleak.IgnoreTopFunction("github.com/go-redis/redis/v8/internal/pool.(*ConnPool).reaper")
|
||||
defer goleak.VerifyNoLeaks(t, ignoreOpt)
|
||||
|
||||
redisConnOpt := getRedisConnOpt(t)
|
||||
@ -40,12 +40,12 @@ func TestServer(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = c.Enqueue(NewTask("send_email", asynqtest.JSON(map[string]interface{}{"recipient_id": 123})))
|
||||
_, err = c.Enqueue(ctx, NewTask("send_email", asynqtest.JSON(map[string]interface{}{"recipient_id": 123})))
|
||||
if err != nil {
|
||||
t.Errorf("could not enqueue a task: %v", err)
|
||||
}
|
||||
|
||||
_, err = c.Enqueue(NewTask("send_email", asynqtest.JSON(map[string]interface{}{"recipient_id": 456})), ProcessIn(1*time.Hour))
|
||||
_, err = c.Enqueue(ctx, NewTask("send_email", asynqtest.JSON(map[string]interface{}{"recipient_id": 456})), ProcessIn(1*time.Hour))
|
||||
if err != nil {
|
||||
t.Errorf("could not enqueue a task: %v", err)
|
||||
}
|
||||
@ -55,7 +55,7 @@ func TestServer(t *testing.T) {
|
||||
|
||||
func TestServerRun(t *testing.T) {
|
||||
// https://github.com/go-redis/redis/issues/1029
|
||||
ignoreOpt := goleak.IgnoreTopFunction("github.com/go-redis/redis/v7/internal/pool.(*ConnPool).reaper")
|
||||
ignoreOpt := goleak.IgnoreTopFunction("github.com/go-redis/redis/v8/internal/pool.(*ConnPool).reaper")
|
||||
defer goleak.VerifyNoLeaks(t, ignoreOpt)
|
||||
|
||||
srv := NewServer(RedisClientOpt{Addr: ":6379"}, Config{LogLevel: testLogLevel})
|
||||
@ -183,15 +183,15 @@ func TestServerWithFlakyBroker(t *testing.T) {
|
||||
}
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
_, err := c.Enqueue(NewTask("enqueued", nil), MaxRetry(i))
|
||||
_, err := c.Enqueue(ctx, NewTask("enqueued", nil), MaxRetry(i))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = c.Enqueue(NewTask("bad_task", nil))
|
||||
_, err = c.Enqueue(ctx, NewTask("bad_task", nil))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = c.Enqueue(NewTask("scheduled", nil), ProcessIn(time.Duration(i)*time.Second))
|
||||
_, err = c.Enqueue(ctx, NewTask("scheduled", nil), ProcessIn(time.Duration(i)*time.Second))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -5,10 +5,11 @@
|
||||
package asynq
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-redis/redis/v7"
|
||||
"github.com/go-redis/redis/v8"
|
||||
"github.com/hibiken/asynq/internal/base"
|
||||
"github.com/hibiken/asynq/internal/log"
|
||||
)
|
||||
@ -59,7 +60,7 @@ func (s *subscriber) start(wg *sync.WaitGroup) {
|
||||
)
|
||||
// Try until successfully connect to Redis.
|
||||
for {
|
||||
pubsub, err = s.broker.CancelationPubSub()
|
||||
pubsub, err = s.broker.CancelationPubSub(context.Background())
|
||||
if err != nil {
|
||||
s.logger.Errorf("cannot subscribe to cancelation channel: %v", err)
|
||||
select {
|
||||
|
@ -51,7 +51,7 @@ func TestSubscriber(t *testing.T) {
|
||||
// wait for subscriber to establish connection to pubsub channel
|
||||
time.Sleep(time.Second)
|
||||
|
||||
if err := rdbClient.PublishCancelation(tc.publishID); err != nil {
|
||||
if err := rdbClient.PublishCancelation(ctx, tc.publishID); err != nil {
|
||||
t.Fatalf("could not publish cancelation message: %v", err)
|
||||
}
|
||||
|
||||
@ -110,7 +110,7 @@ func TestSubscriberWithRedisDown(t *testing.T) {
|
||||
called = true
|
||||
})
|
||||
|
||||
if err := r.PublishCancelation(id); err != nil {
|
||||
if err := r.PublishCancelation(ctx, id); err != nil {
|
||||
t.Fatalf("could not publish cancelation message: %v", err)
|
||||
}
|
||||
|
||||
|
@ -41,7 +41,7 @@ func TestSyncer(t *testing.T) {
|
||||
m := msg
|
||||
syncRequestCh <- &syncRequest{
|
||||
fn: func() error {
|
||||
return rdbClient.Done(m)
|
||||
return rdbClient.Done(ctx, m)
|
||||
},
|
||||
deadline: time.Now().Add(5 * time.Minute),
|
||||
}
|
||||
|
@ -11,7 +11,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-redis/redis/v7"
|
||||
"github.com/go-redis/redis/v8"
|
||||
"github.com/google/uuid"
|
||||
"github.com/hibiken/asynq/internal/base"
|
||||
"github.com/hibiken/asynq/internal/errors"
|
||||
|
@ -14,7 +14,7 @@ import (
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/go-redis/redis/v7"
|
||||
"github.com/go-redis/redis/v8"
|
||||
"github.com/hibiken/asynq"
|
||||
"github.com/hibiken/asynq/internal/base"
|
||||
"github.com/hibiken/asynq/internal/rdb"
|
||||
|
10
tools/go.mod
10
tools/go.mod
@ -3,21 +3,13 @@ module github.com/hibiken/asynq/tools
|
||||
go 1.13
|
||||
|
||||
require (
|
||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6 // indirect
|
||||
github.com/coreos/go-etcd v2.0.0+incompatible // indirect
|
||||
github.com/cpuguy83/go-md2man v1.0.10 // indirect
|
||||
github.com/fatih/color v1.9.0
|
||||
github.com/go-redis/redis/v7 v7.4.0
|
||||
github.com/golang/protobuf v1.4.1 // indirect
|
||||
github.com/go-redis/redis/v8 v8.11.0
|
||||
github.com/google/uuid v1.2.0
|
||||
github.com/hibiken/asynq v0.17.1
|
||||
github.com/mitchellh/go-homedir v1.1.0
|
||||
github.com/spf13/cast v1.3.1
|
||||
github.com/spf13/cobra v1.1.1
|
||||
github.com/spf13/viper v1.7.0
|
||||
github.com/ugorji/go v1.1.4 // indirect
|
||||
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8 // indirect
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77 // indirect
|
||||
)
|
||||
|
||||
replace github.com/hibiken/asynq => ./..
|
||||
|
105
tools/go.sum
105
tools/go.sum
@ -18,7 +18,6 @@ github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAE
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
|
||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
|
||||
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
@ -26,39 +25,39 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce
|
||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s=
|
||||
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
|
||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-redis/redis/v7 v7.2.0 h1:CrCexy/jYWZjW0AyVoHlcJUeZN19VWlbepTh1Vq6dJs=
|
||||
github.com/go-redis/redis/v7 v7.2.0/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRfnwdHj/Dcg=
|
||||
github.com/go-redis/redis/v7 v7.4.0 h1:7obg6wUoj05T0EpY0o8B59S9w5yeMWql7sw2kwNW1x4=
|
||||
github.com/go-redis/redis/v7 v7.4.0/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRfnwdHj/Dcg=
|
||||
github.com/go-redis/redis/v8 v8.11.0 h1:O1Td0mQ8UFChQ3N9zFQqo6kTU2cJ+/it88gDB+zg0wo=
|
||||
github.com/go-redis/redis/v8 v8.11.0/go.mod h1:DLomh7y2e3ggQXQLd1YgmvIfecPJoFl7WU5SOQ/r06M=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
@ -69,36 +68,34 @@ github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb
|
||||
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.1 h1:ZFgWrT+bLgsYPirOnRfKLYJLvssAegOj/hgyMFdJZe0=
|
||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
|
||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs=
|
||||
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
@ -124,7 +121,6 @@ github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO
|
||||
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
|
||||
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
|
||||
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
|
||||
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
@ -143,7 +139,6 @@ github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORN
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=
|
||||
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
@ -168,12 +163,17 @@ github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
|
||||
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo=
|
||||
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME=
|
||||
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||
github.com/onsi/ginkgo v1.15.0 h1:1V1NfVQR87RtWAgp1lv9JZJ5Jap+XFGKPi00andXGi4=
|
||||
github.com/onsi/ginkgo v1.15.0/go.mod h1:hF8qUzuuC8DJGygJH3726JnCZX4MYbRB8yFfISqnKUg=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/onsi/gomega v1.10.5 h1:7n6FEkpFmfCoo2t+YYqXH0evK+a9ICQz0xcAy9dYcaQ=
|
||||
github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48=
|
||||
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
@ -196,7 +196,6 @@ github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
|
||||
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||
@ -213,49 +212,41 @@ github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B
|
||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng=
|
||||
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s=
|
||||
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
|
||||
github.com/spf13/cobra v1.0.0 h1:6m/oheQuQ13N9ks4hubMG6BnvwOeaJrqSPLahSnczz8=
|
||||
github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
|
||||
github.com/spf13/cobra v1.1.1 h1:KfztREH0tPxJJ+geloSLaAkaPkr4ki2Er5quFV1TDo4=
|
||||
github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI=
|
||||
github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=
|
||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
|
||||
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
|
||||
github.com/spf13/viper v1.6.2 h1:7aKfF+e8/k68gda3LOjo5RxiUqddoFxVq4BKBPrxk5E=
|
||||
github.com/spf13/viper v1.6.2/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k=
|
||||
github.com/spf13/viper v1.7.0 h1:xVKxvI7ouOI5I+U9s2eeiUfMaWBVoXA3AWskkrqK0VM=
|
||||
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
|
||||
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
|
||||
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/goleak v0.10.0 h1:G3eWbSNIskeRqtsN/1uI5B+eP73y3JUuBsv9AZjehb4=
|
||||
go.uber.org/goleak v0.10.0/go.mod h1:VCZuO8V8mFPlL0F5J5GK1rtHV3DrFcQ1R8ryq7FK0aI=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
@ -274,6 +265,7 @@ golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU
|
||||
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@ -287,11 +279,12 @@ golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190923162816-aa69164e4478 h1:l5EDrHhldLYb3ZRHDUhXF7Om7MvYXnkV9/iQNo1lX6g=
|
||||
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb h1:eBmm0M9fYhWpKZLjQUUKka/LtIxf46G4fxeEz5KJr9U=
|
||||
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
@ -300,6 +293,7 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@ -307,7 +301,6 @@ golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5h
|
||||
golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@ -316,14 +309,19 @@ golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e h1:9vRrk9YW2BTzLP0VCB9ZDjU4cPqkg+IDWL7XgxA1yxQ=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210112080510-489259a85091 h1:DMyOG0U+gKfu8JZzg2UQe9MeaC1X+xQWlAKcRnjxjCw=
|
||||
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
@ -345,9 +343,13 @@ golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtn
|
||||
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
|
||||
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
@ -369,7 +371,6 @@ google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvx
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
@ -379,6 +380,7 @@ google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQ
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c=
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
@ -388,7 +390,6 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno=
|
||||
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
@ -397,12 +398,12 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkep
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo=
|
||||
gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
|
Loading…
x
Reference in New Issue
Block a user