mirror of
https://github.com/hibiken/asynq.git
synced 2025-09-17 20:30:06 +08:00
Compare commits
27 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
b6486716b4 | ||
![]() |
742ed6546f | ||
![]() |
897ab4e28b | ||
![]() |
a4e4c0b1d5 | ||
![]() |
95b7dcaad4 | ||
![]() |
8d3248e850 | ||
![]() |
e69264dc04 | ||
![]() |
93ff4e4149 | ||
![]() |
3ee97f327e | ||
![]() |
49c117f4d1 | ||
![]() |
5b53a2aee9 | ||
![]() |
26e399bc9c | ||
![]() |
e21fe3bd79 | ||
![]() |
37c6c73d9b | ||
![]() |
5775a5818d | ||
![]() |
e3c9aae8c7 | ||
![]() |
9e02a91808 | ||
![]() |
830020eb39 | ||
![]() |
f9a6c6156f | ||
![]() |
8540172306 | ||
![]() |
b44908fefd | ||
![]() |
e78d5e4171 | ||
![]() |
0cc486a975 | ||
![]() |
906f231e6c | ||
![]() |
39f237899b | ||
![]() |
3d9a222bb3 | ||
![]() |
2bcaea52ce |
@@ -4,8 +4,11 @@ git:
|
||||
depth: 1
|
||||
env:
|
||||
- GO111MODULE=on # go modules are the default
|
||||
go: [1.12.x, 1.13.x]
|
||||
go: [1.12.x, 1.13.x, 1.14.x]
|
||||
script:
|
||||
- go test -race -v -coverprofile=coverage.txt -covermode=atomic ./...
|
||||
services:
|
||||
- redis-server
|
||||
after_success:
|
||||
- bash ./.travis/benchcmp.sh
|
||||
- bash <(curl -s https://codecov.io/bash)
|
||||
|
17
CHANGELOG.md
17
CHANGELOG.md
@@ -7,6 +7,23 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
||||
|
||||
## [Unreleased]
|
||||
|
||||
## [0.6.0] - 2020-03-01
|
||||
|
||||
### Added
|
||||
|
||||
- Added `ServeMux` type to make it easy for users to implement Handler interface.
|
||||
- `ErrorHandler` type was added. Allow users to specify error handling function (e.g. Report error to error reporting service such as Honeybadger, Bugsnag, etc)
|
||||
|
||||
## [0.5.0] - 2020-02-23
|
||||
|
||||
### Changed
|
||||
|
||||
- `Client` API has changed. Use `Enqueue`, `EnqueueAt` and `EnqueueIn` to enqueue and schedule tasks.
|
||||
|
||||
### Added
|
||||
|
||||
- `asynqmon workers` was added to list all running workers information
|
||||
|
||||
## [0.4.0] - 2020-02-13
|
||||
|
||||
### Changed
|
||||
|
92
README.md
92
README.md
@@ -5,6 +5,7 @@
|
||||
[](https://goreportcard.com/report/github.com/hibiken/asynq)
|
||||
[](https://godoc.org/github.com/hibiken/asynq)
|
||||
[](https://gitter.im/go-asynq/community)
|
||||
[](https://codecov.io/gh/hibiken/asynq)
|
||||
|
||||
Asynq is a simple Go library for queueing tasks and processing them in the background with workers.
|
||||
It is backed by Redis and it is designed to have a low barrier to entry. It should be integrated in your web stack easily.
|
||||
@@ -29,62 +30,42 @@ First, make sure you are running a Redis server locally.
|
||||
$ redis-server
|
||||
```
|
||||
|
||||
To create and schedule tasks, use `Client` and provide a task and when to process the task.
|
||||
To create and schedule tasks, use `Client` and provide a task and when to enqueue the task.
|
||||
|
||||
```go
|
||||
func main() {
|
||||
r := &asynq.RedisClientOpt{
|
||||
Addr: "localhost:6379",
|
||||
Addr: "127.0.0.1:6379",
|
||||
}
|
||||
|
||||
client := asynq.NewClient(r)
|
||||
|
||||
// Create a task with task type and payload
|
||||
t1 := asynq.NewTask("send_welcome_email", map[string]interface{}{"user_id": 42})
|
||||
t1 := asynq.NewTask("email:signup", map[string]interface{}{"user_id": 42})
|
||||
|
||||
t2 := asynq.NewTask("send_reminder_email", map[string]interface{}{"user_id": 42})
|
||||
t2 := asynq.NewTask("email:reminder", map[string]interface{}{"user_id": 42})
|
||||
|
||||
// Process immediately
|
||||
err := client.Schedule(t1, time.Now())
|
||||
err := client.Enqueue(t1)
|
||||
|
||||
// Process 24 hrs later
|
||||
err = client.Schedule(t2, time.Now().Add(24 * time.Hour))
|
||||
err = client.EnqueueIn(24*time.Hour, t2)
|
||||
|
||||
// If processing fails, retry up to 10 times (Default is 25)
|
||||
err = client.Schedule(t1, time.Now(), asynq.Retry(10))
|
||||
// Process at specified time.
|
||||
target := time.Date(2020, time.March, 6, 10, 0, 0, 0, time.UTC)
|
||||
err = client.EnqueueAt(target, t2)
|
||||
|
||||
// Use custom queue called "critical"
|
||||
err = client.Schedule(t1, time.Now(), asynq.Queue("critical"))
|
||||
|
||||
// Use timeout to specify how long a task may run (Default is no limit)
|
||||
err = client.Schedule(t1, time.Now(), asynq.Timeout(30 * time.Second))
|
||||
// Pass options to specify processing behavior for a given task.
|
||||
//
|
||||
// MaxRetry specifies the maximum number of times this task will be retried (Default is 25).
|
||||
// Queue specifies which queue to enqueue this task to (Default is "default").
|
||||
// Timeout specifies the the timeout for the task's context (Default is no timeout).
|
||||
err = client.Enqueue(t1, asynq.MaxRetry(10), asynq.Queue("critical"), asynq.Timeout(time.Minute))
|
||||
}
|
||||
```
|
||||
|
||||
To start the background workers, use `Background` and provide your `Handler` to process the tasks.
|
||||
|
||||
```go
|
||||
func main() {
|
||||
r := &asynq.RedisClientOpt{
|
||||
Addr: "localhost:6379",
|
||||
}
|
||||
|
||||
bg := asynq.NewBackground(r, &asynq.Config{
|
||||
// Specify how many concurrent workers to use
|
||||
Concurrency: 10,
|
||||
// You can optionally create multiple queues with different priority.
|
||||
Queues: map[string]int{
|
||||
"critical": 6,
|
||||
"default": 3,
|
||||
"low": 1,
|
||||
},
|
||||
// See the godoc for other configuration options
|
||||
})
|
||||
|
||||
bg.Run(handler)
|
||||
}
|
||||
```
|
||||
|
||||
`Handler` is an interface with one method `ProcessTask` with the following signature.
|
||||
|
||||
```go
|
||||
@@ -98,6 +79,46 @@ type Handler interface {
|
||||
}
|
||||
```
|
||||
|
||||
You can optionally use `ServeMux` to create a handler, just as you would with `"net/http"` Handler.
|
||||
|
||||
```go
|
||||
func main() {
|
||||
r := &asynq.RedisClientOpt{
|
||||
Addr: "127.0.0.1:6379",
|
||||
}
|
||||
|
||||
bg := asynq.NewBackground(r, &asynq.Config{
|
||||
// Specify how many concurrent workers to use
|
||||
Concurrency: 10,
|
||||
// Optionally specify multiple queues with different priority.
|
||||
Queues: map[string]int{
|
||||
"critical": 6,
|
||||
"default": 3,
|
||||
"low": 1,
|
||||
},
|
||||
// See the godoc for other configuration options
|
||||
})
|
||||
|
||||
mux := asynq.NewServeMux()
|
||||
mux.HandleFunc("email:signup", signupEmailHandler)
|
||||
mux.HandleFunc("email:reminder", reminderEmailHandler)
|
||||
// ...register other handlers...
|
||||
|
||||
bg.Run(mux)
|
||||
}
|
||||
|
||||
// function with the same signature as the ProcessTask method for the Handler interface.
|
||||
func signupEmailHandler(ctx context.Context, t *asynq.Task) error {
|
||||
id, err := t.Payload.GetInt("user_id")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Printf("Send welcome email to user %d\n", id)
|
||||
// ...your email sending logic...
|
||||
return nil
|
||||
}
|
||||
```
|
||||
|
||||
For a more detailed walk-through of the library, see our [Getting Started Guide](https://github.com/hibiken/asynq/wiki/Getting-Started).
|
||||
|
||||
To Learn more about `asynq` features and APIs, see our [Wiki pages](https://github.com/hibiken/asynq/wiki) and [godoc](https://godoc.org/github.com/hibiken/asynq).
|
||||
@@ -129,6 +150,7 @@ Please see the [Contribution Guide](/CONTRIBUTING.md) before contributing.
|
||||
## Acknowledgements
|
||||
|
||||
- [Sidekiq](https://github.com/mperham/sidekiq) : Many of the design ideas are taken from sidekiq and its Web UI
|
||||
- [RQ](https://github.com/rq/rq) : Client APIs are inspired by rq library.
|
||||
- [Cobra](https://github.com/spf13/cobra) : Asynqmon CLI is built with cobra
|
||||
|
||||
## License
|
||||
|
2
asynq.go
2
asynq.go
@@ -138,6 +138,6 @@ func createRedisClient(r RedisConnOpt) *redis.Client {
|
||||
TLSConfig: r.TLSConfig,
|
||||
})
|
||||
default:
|
||||
panic(fmt.Sprintf("unexpected type %T for RedisConnOpt", r))
|
||||
panic(fmt.Sprintf("asynq: unexpected type %T for RedisConnOpt", r))
|
||||
}
|
||||
}
|
||||
|
@@ -34,7 +34,11 @@ type Background struct {
|
||||
mu sync.Mutex
|
||||
running bool
|
||||
|
||||
pinfo *base.ProcessInfo
|
||||
ps *base.ProcessState
|
||||
|
||||
// wait group to wait for all goroutines to finish.
|
||||
wg sync.WaitGroup
|
||||
|
||||
rdb *rdb.RDB
|
||||
scheduler *scheduler
|
||||
processor *processor
|
||||
@@ -85,6 +89,35 @@ type Config struct {
|
||||
// The tasks in lower priority queues are processed only when those queues with
|
||||
// higher priorities are empty.
|
||||
StrictPriority bool
|
||||
|
||||
// ErrorHandler handles errors returned by the task handler.
|
||||
//
|
||||
// HandleError is invoked only if the task handler returns a non-nil error.
|
||||
//
|
||||
// Example:
|
||||
// func reportError(task *asynq.Task, err error, retried, maxRetry int) {
|
||||
// if retried >= maxRetry {
|
||||
// err = fmt.Errorf("retry exhausted for task %s: %w", task.Type, err)
|
||||
// }
|
||||
// errorReportingService.Notify(err)
|
||||
// })
|
||||
//
|
||||
// ErrorHandler: asynq.ErrorHandlerFunc(reportError)
|
||||
ErrorHandler ErrorHandler
|
||||
}
|
||||
|
||||
// An ErrorHandler handles errors returned by the task handler.
|
||||
type ErrorHandler interface {
|
||||
HandleError(task *Task, err error, retried, maxRetry int)
|
||||
}
|
||||
|
||||
// The ErrorHandlerFunc type is an adapter to allow the use of ordinary functions as a ErrorHandler.
|
||||
// If f is a function with the appropriate signature, ErrorHandlerFunc(f) is a ErrorHandler that calls f.
|
||||
type ErrorHandlerFunc func(task *Task, err error, retried, maxRetry int)
|
||||
|
||||
// HandleError calls fn(task, err, retried, maxRetry)
|
||||
func (fn ErrorHandlerFunc) HandleError(task *Task, err error, retried, maxRetry int) {
|
||||
fn(task, err, retried, maxRetry)
|
||||
}
|
||||
|
||||
// Formula taken from https://github.com/mperham/sidekiq.
|
||||
@@ -125,18 +158,18 @@ func NewBackground(r RedisConnOpt, cfg *Config) *Background {
|
||||
}
|
||||
pid := os.Getpid()
|
||||
|
||||
pinfo := base.NewProcessInfo(host, pid, n, queues, cfg.StrictPriority)
|
||||
rdb := rdb.NewRDB(createRedisClient(r))
|
||||
syncRequestCh := make(chan *syncRequest)
|
||||
cancelations := base.NewCancelations()
|
||||
syncer := newSyncer(syncRequestCh, 5*time.Second)
|
||||
heartbeater := newHeartbeater(rdb, pinfo, 5*time.Second)
|
||||
ps := base.NewProcessState(host, pid, n, queues, cfg.StrictPriority)
|
||||
syncCh := make(chan *syncRequest)
|
||||
cancels := base.NewCancelations()
|
||||
syncer := newSyncer(syncCh, 5*time.Second)
|
||||
heartbeater := newHeartbeater(rdb, ps, 5*time.Second)
|
||||
scheduler := newScheduler(rdb, 5*time.Second, queues)
|
||||
processor := newProcessor(rdb, pinfo, delayFunc, syncRequestCh, cancelations)
|
||||
subscriber := newSubscriber(rdb, cancelations)
|
||||
processor := newProcessor(rdb, ps, delayFunc, syncCh, cancels, cfg.ErrorHandler)
|
||||
subscriber := newSubscriber(rdb, cancels)
|
||||
return &Background{
|
||||
pinfo: pinfo,
|
||||
rdb: rdb,
|
||||
ps: ps,
|
||||
scheduler: scheduler,
|
||||
processor: processor,
|
||||
syncer: syncer,
|
||||
@@ -145,7 +178,7 @@ func NewBackground(r RedisConnOpt, cfg *Config) *Background {
|
||||
}
|
||||
}
|
||||
|
||||
// A Handler processes a task.
|
||||
// A Handler processes tasks.
|
||||
//
|
||||
// ProcessTask should return nil if the processing of a task
|
||||
// is successful.
|
||||
@@ -188,7 +221,7 @@ func (bg *Background) Run(handler Handler) {
|
||||
sig := <-sigs
|
||||
if sig == syscall.SIGTSTP {
|
||||
bg.processor.stop()
|
||||
bg.pinfo.SetState("stopped")
|
||||
bg.ps.SetStatus(base.StatusStopped)
|
||||
continue
|
||||
}
|
||||
break
|
||||
@@ -208,11 +241,11 @@ func (bg *Background) start(handler Handler) {
|
||||
bg.running = true
|
||||
bg.processor.handler = handler
|
||||
|
||||
bg.heartbeater.start()
|
||||
bg.subscriber.start()
|
||||
bg.syncer.start()
|
||||
bg.scheduler.start()
|
||||
bg.processor.start()
|
||||
bg.heartbeater.start(&bg.wg)
|
||||
bg.subscriber.start(&bg.wg)
|
||||
bg.syncer.start(&bg.wg)
|
||||
bg.scheduler.start(&bg.wg)
|
||||
bg.processor.start(&bg.wg)
|
||||
}
|
||||
|
||||
// stops the background-task processing.
|
||||
@@ -223,17 +256,19 @@ func (bg *Background) stop() {
|
||||
return
|
||||
}
|
||||
|
||||
// Note: The order of termination is important.
|
||||
// Sender goroutines should be terminated before the receiver goroutines.
|
||||
//
|
||||
// processor -> syncer (via syncCh)
|
||||
bg.scheduler.terminate()
|
||||
bg.processor.terminate()
|
||||
// Note: processor and all worker goroutines need to be exited
|
||||
// before shutting down syncer to avoid goroutine leak.
|
||||
bg.syncer.terminate()
|
||||
bg.subscriber.terminate()
|
||||
bg.heartbeater.terminate()
|
||||
|
||||
bg.rdb.ClearProcessInfo(bg.pinfo)
|
||||
bg.wg.Wait()
|
||||
|
||||
bg.rdb.Close()
|
||||
bg.processor.handler = nil
|
||||
bg.running = false
|
||||
|
||||
logger.info("Bye!")
|
||||
|
@@ -34,9 +34,15 @@ func TestBackground(t *testing.T) {
|
||||
|
||||
bg.start(HandlerFunc(h))
|
||||
|
||||
client.Schedule(NewTask("send_email", map[string]interface{}{"recipient_id": 123}), time.Now())
|
||||
err := client.Enqueue(NewTask("send_email", map[string]interface{}{"recipient_id": 123}))
|
||||
if err != nil {
|
||||
t.Errorf("could not enqueue a task: %v", err)
|
||||
}
|
||||
|
||||
client.Schedule(NewTask("send_email", map[string]interface{}{"recipient_id": 456}), time.Now().Add(time.Hour))
|
||||
err = client.EnqueueAt(time.Now().Add(time.Hour), NewTask("send_email", map[string]interface{}{"recipient_id": 456}))
|
||||
if err != nil {
|
||||
t.Errorf("could not enqueue a task: %v", err)
|
||||
}
|
||||
|
||||
bg.stop()
|
||||
}
|
||||
|
@@ -33,7 +33,9 @@ func BenchmarkEndToEndSimple(b *testing.B) {
|
||||
// Create a bunch of tasks
|
||||
for i := 0; i < count; i++ {
|
||||
t := NewTask(fmt.Sprintf("task%d", i), map[string]interface{}{"data": i})
|
||||
client.Schedule(t, time.Now())
|
||||
if err := client.Enqueue(t); err != nil {
|
||||
b.Fatalf("could not enqueue a task: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
@@ -74,11 +76,15 @@ func BenchmarkEndToEnd(b *testing.B) {
|
||||
// Create a bunch of tasks
|
||||
for i := 0; i < count; i++ {
|
||||
t := NewTask(fmt.Sprintf("task%d", i), map[string]interface{}{"data": i})
|
||||
client.Schedule(t, time.Now())
|
||||
if err := client.Enqueue(t); err != nil {
|
||||
b.Fatalf("could not enqueue a task: %v", err)
|
||||
}
|
||||
}
|
||||
for i := 0; i < count; i++ {
|
||||
t := NewTask(fmt.Sprintf("scheduled%d", i), map[string]interface{}{"data": i})
|
||||
client.Schedule(t, time.Now().Add(time.Second))
|
||||
if err := client.EnqueueAt(time.Now().Add(time.Second), t); err != nil {
|
||||
b.Fatalf("could not enqueue a task: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
@@ -129,15 +135,21 @@ func BenchmarkEndToEndMultipleQueues(b *testing.B) {
|
||||
// Create a bunch of tasks
|
||||
for i := 0; i < highCount; i++ {
|
||||
t := NewTask(fmt.Sprintf("task%d", i), map[string]interface{}{"data": i})
|
||||
client.Schedule(t, time.Now(), Queue("high"))
|
||||
if err := client.Enqueue(t, Queue("high")); err != nil {
|
||||
b.Fatalf("could not enqueue a task: %v", err)
|
||||
}
|
||||
}
|
||||
for i := 0; i < defaultCount; i++ {
|
||||
t := NewTask(fmt.Sprintf("task%d", i), map[string]interface{}{"data": i})
|
||||
client.Schedule(t, time.Now())
|
||||
if err := client.Enqueue(t); err != nil {
|
||||
b.Fatalf("could not enqueue a task: %v", err)
|
||||
}
|
||||
}
|
||||
for i := 0; i < lowCount; i++ {
|
||||
t := NewTask(fmt.Sprintf("task%d", i), map[string]interface{}{"data": i})
|
||||
client.Schedule(t, time.Now(), Queue("low"))
|
||||
if err := client.Enqueue(t, Queue("low")); err != nil {
|
||||
b.Fatalf("could not enqueue a task: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
39
client.go
39
client.go
@@ -96,14 +96,13 @@ const (
|
||||
defaultMaxRetry = 25
|
||||
)
|
||||
|
||||
// Schedule registers a task to be processed at the specified time.
|
||||
// EnqueueAt schedules task to be enqueued at the specified time.
|
||||
//
|
||||
// Schedule returns nil if the task is registered successfully,
|
||||
// otherwise returns a non-nil error.
|
||||
// EnqueueAt returns nil if the task is scheduled successfully, otherwise returns a non-nil error.
|
||||
//
|
||||
// opts specifies the behavior of task processing. If there are conflicting
|
||||
// Option values the last one overrides others.
|
||||
func (c *Client) Schedule(task *Task, processAt time.Time, opts ...Option) error {
|
||||
// The argument opts specifies the behavior of task processing.
|
||||
// If there are conflicting Option values the last one overrides others.
|
||||
func (c *Client) EnqueueAt(t time.Time, task *Task, opts ...Option) error {
|
||||
opt := composeOptions(opts...)
|
||||
msg := &base.TaskMessage{
|
||||
ID: xid.New(),
|
||||
@@ -113,12 +112,32 @@ func (c *Client) Schedule(task *Task, processAt time.Time, opts ...Option) error
|
||||
Retry: opt.retry,
|
||||
Timeout: opt.timeout.String(),
|
||||
}
|
||||
return c.enqueue(msg, processAt)
|
||||
return c.enqueue(msg, t)
|
||||
}
|
||||
|
||||
func (c *Client) enqueue(msg *base.TaskMessage, processAt time.Time) error {
|
||||
if time.Now().After(processAt) {
|
||||
// Enqueue enqueues task to be processed immediately.
|
||||
//
|
||||
// Enqueue returns nil if the task is enqueued successfully, otherwise returns a non-nil error.
|
||||
//
|
||||
// The argument opts specifies the behavior of task processing.
|
||||
// If there are conflicting Option values the last one overrides others.
|
||||
func (c *Client) Enqueue(task *Task, opts ...Option) error {
|
||||
return c.EnqueueAt(time.Now(), task, opts...)
|
||||
}
|
||||
|
||||
// EnqueueIn schedules task to be enqueued after the specified delay.
|
||||
//
|
||||
// EnqueueIn returns nil if the task is scheduled successfully, otherwise returns a non-nil error.
|
||||
//
|
||||
// The argument opts specifies the behavior of task processing.
|
||||
// If there are conflicting Option values the last one overrides others.
|
||||
func (c *Client) EnqueueIn(d time.Duration, task *Task, opts ...Option) error {
|
||||
return c.EnqueueAt(time.Now().Add(d), task, opts...)
|
||||
}
|
||||
|
||||
func (c *Client) enqueue(msg *base.TaskMessage, t time.Time) error {
|
||||
if time.Now().After(t) {
|
||||
return c.rdb.Enqueue(msg)
|
||||
}
|
||||
return c.rdb.Schedule(msg, processAt)
|
||||
return c.rdb.Schedule(msg, t)
|
||||
}
|
||||
|
365
client_test.go
365
client_test.go
@@ -13,15 +13,20 @@ import (
|
||||
"github.com/hibiken/asynq/internal/base"
|
||||
)
|
||||
|
||||
func TestClient(t *testing.T) {
|
||||
func TestClientEnqueueAt(t *testing.T) {
|
||||
r := setup(t)
|
||||
client := NewClient(&RedisClientOpt{
|
||||
Addr: "localhost:6379",
|
||||
DB: 14,
|
||||
client := NewClient(RedisClientOpt{
|
||||
Addr: redisAddr,
|
||||
DB: redisDB,
|
||||
})
|
||||
|
||||
task := NewTask("send_email", map[string]interface{}{"to": "customer@gmail.com", "from": "merchant@example.com"})
|
||||
|
||||
var (
|
||||
now = time.Now()
|
||||
oneHourLater = now.Add(time.Hour)
|
||||
)
|
||||
|
||||
tests := []struct {
|
||||
desc string
|
||||
task *Task
|
||||
@@ -33,7 +38,7 @@ func TestClient(t *testing.T) {
|
||||
{
|
||||
desc: "Process task immediately",
|
||||
task: task,
|
||||
processAt: time.Now(),
|
||||
processAt: now,
|
||||
opts: []Option{},
|
||||
wantEnqueued: map[string][]*base.TaskMessage{
|
||||
"default": []*base.TaskMessage{
|
||||
@@ -51,7 +56,7 @@ func TestClient(t *testing.T) {
|
||||
{
|
||||
desc: "Schedule task to be processed in the future",
|
||||
task: task,
|
||||
processAt: time.Now().Add(2 * time.Hour),
|
||||
processAt: oneHourLater,
|
||||
opts: []Option{},
|
||||
wantEnqueued: nil, // db is flushed in setup so list does not exist hence nil
|
||||
wantScheduled: []h.ZSetEntry{
|
||||
@@ -63,137 +68,239 @@ func TestClient(t *testing.T) {
|
||||
Queue: "default",
|
||||
Timeout: time.Duration(0).String(),
|
||||
},
|
||||
Score: float64(time.Now().Add(2 * time.Hour).Unix()),
|
||||
Score: float64(oneHourLater.Unix()),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "Process task immediately with a custom retry count",
|
||||
task: task,
|
||||
processAt: time.Now(),
|
||||
opts: []Option{
|
||||
MaxRetry(3),
|
||||
},
|
||||
wantEnqueued: map[string][]*base.TaskMessage{
|
||||
"default": []*base.TaskMessage{
|
||||
&base.TaskMessage{
|
||||
Type: task.Type,
|
||||
Payload: task.Payload.data,
|
||||
Retry: 3,
|
||||
Queue: "default",
|
||||
Timeout: time.Duration(0).String(),
|
||||
},
|
||||
},
|
||||
},
|
||||
wantScheduled: nil, // db is flushed in setup so zset does not exist hence nil
|
||||
},
|
||||
{
|
||||
desc: "Negative retry count",
|
||||
task: task,
|
||||
processAt: time.Now(),
|
||||
opts: []Option{
|
||||
MaxRetry(-2),
|
||||
},
|
||||
wantEnqueued: map[string][]*base.TaskMessage{
|
||||
"default": []*base.TaskMessage{
|
||||
&base.TaskMessage{
|
||||
Type: task.Type,
|
||||
Payload: task.Payload.data,
|
||||
Retry: 0, // Retry count should be set to zero
|
||||
Queue: "default",
|
||||
Timeout: time.Duration(0).String(),
|
||||
},
|
||||
},
|
||||
},
|
||||
wantScheduled: nil, // db is flushed in setup so zset does not exist hence nil
|
||||
},
|
||||
{
|
||||
desc: "Conflicting options",
|
||||
task: task,
|
||||
processAt: time.Now(),
|
||||
opts: []Option{
|
||||
MaxRetry(2),
|
||||
MaxRetry(10),
|
||||
},
|
||||
wantEnqueued: map[string][]*base.TaskMessage{
|
||||
"default": []*base.TaskMessage{
|
||||
&base.TaskMessage{
|
||||
Type: task.Type,
|
||||
Payload: task.Payload.data,
|
||||
Retry: 10, // Last option takes precedence
|
||||
Queue: "default",
|
||||
Timeout: time.Duration(0).String(),
|
||||
},
|
||||
},
|
||||
},
|
||||
wantScheduled: nil, // db is flushed in setup so zset does not exist hence nil
|
||||
},
|
||||
{
|
||||
desc: "With queue option",
|
||||
task: task,
|
||||
processAt: time.Now(),
|
||||
opts: []Option{
|
||||
Queue("custom"),
|
||||
},
|
||||
wantEnqueued: map[string][]*base.TaskMessage{
|
||||
"custom": []*base.TaskMessage{
|
||||
&base.TaskMessage{
|
||||
Type: task.Type,
|
||||
Payload: task.Payload.data,
|
||||
Retry: defaultMaxRetry,
|
||||
Queue: "custom",
|
||||
Timeout: time.Duration(0).String(),
|
||||
},
|
||||
},
|
||||
},
|
||||
wantScheduled: nil, // db is flushed in setup so zset does not exist hence nil
|
||||
},
|
||||
{
|
||||
desc: "Queue option should be case-insensitive",
|
||||
task: task,
|
||||
processAt: time.Now(),
|
||||
opts: []Option{
|
||||
Queue("HIGH"),
|
||||
},
|
||||
wantEnqueued: map[string][]*base.TaskMessage{
|
||||
"high": []*base.TaskMessage{
|
||||
&base.TaskMessage{
|
||||
Type: task.Type,
|
||||
Payload: task.Payload.data,
|
||||
Retry: defaultMaxRetry,
|
||||
Queue: "high",
|
||||
Timeout: time.Duration(0).String(),
|
||||
},
|
||||
},
|
||||
},
|
||||
wantScheduled: nil, // db is flushed in setup so zset does not exist hence nil
|
||||
},
|
||||
{
|
||||
desc: "Timeout option sets the timeout duration",
|
||||
task: task,
|
||||
processAt: time.Now(),
|
||||
opts: []Option{
|
||||
Timeout(20 * time.Second),
|
||||
},
|
||||
wantEnqueued: map[string][]*base.TaskMessage{
|
||||
"default": []*base.TaskMessage{
|
||||
&base.TaskMessage{
|
||||
Type: task.Type,
|
||||
Payload: task.Payload.data,
|
||||
Retry: defaultMaxRetry,
|
||||
Queue: "default",
|
||||
Timeout: (20 * time.Second).String(),
|
||||
},
|
||||
},
|
||||
},
|
||||
wantScheduled: nil, // db is flushed in setup so zset does not exist hence nil
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
h.FlushDB(t, r) // clean up db before each test case.
|
||||
|
||||
err := client.Schedule(tc.task, tc.processAt, tc.opts...)
|
||||
err := client.EnqueueAt(tc.processAt, tc.task, tc.opts...)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
continue
|
||||
}
|
||||
|
||||
for qname, want := range tc.wantEnqueued {
|
||||
gotEnqueued := h.GetEnqueuedMessages(t, r, qname)
|
||||
if diff := cmp.Diff(want, gotEnqueued, h.IgnoreIDOpt); diff != "" {
|
||||
t.Errorf("%s;\nmismatch found in %q; (-want,+got)\n%s", tc.desc, base.QueueKey(qname), diff)
|
||||
}
|
||||
}
|
||||
|
||||
gotScheduled := h.GetScheduledEntries(t, r)
|
||||
if diff := cmp.Diff(tc.wantScheduled, gotScheduled, h.IgnoreIDOpt); diff != "" {
|
||||
t.Errorf("%s;\nmismatch found in %q; (-want,+got)\n%s", tc.desc, base.ScheduledQueue, diff)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestClientEnqueue(t *testing.T) {
|
||||
r := setup(t)
|
||||
client := NewClient(RedisClientOpt{
|
||||
Addr: redisAddr,
|
||||
DB: redisDB,
|
||||
})
|
||||
|
||||
task := NewTask("send_email", map[string]interface{}{"to": "customer@gmail.com", "from": "merchant@example.com"})
|
||||
|
||||
tests := []struct {
|
||||
desc string
|
||||
task *Task
|
||||
opts []Option
|
||||
wantEnqueued map[string][]*base.TaskMessage
|
||||
}{
|
||||
{
|
||||
desc: "Process task immediately with a custom retry count",
|
||||
task: task,
|
||||
opts: []Option{
|
||||
MaxRetry(3),
|
||||
},
|
||||
wantEnqueued: map[string][]*base.TaskMessage{
|
||||
"default": []*base.TaskMessage{
|
||||
&base.TaskMessage{
|
||||
Type: task.Type,
|
||||
Payload: task.Payload.data,
|
||||
Retry: 3,
|
||||
Queue: "default",
|
||||
Timeout: time.Duration(0).String(),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "Negative retry count",
|
||||
task: task,
|
||||
opts: []Option{
|
||||
MaxRetry(-2),
|
||||
},
|
||||
wantEnqueued: map[string][]*base.TaskMessage{
|
||||
"default": []*base.TaskMessage{
|
||||
&base.TaskMessage{
|
||||
Type: task.Type,
|
||||
Payload: task.Payload.data,
|
||||
Retry: 0, // Retry count should be set to zero
|
||||
Queue: "default",
|
||||
Timeout: time.Duration(0).String(),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "Conflicting options",
|
||||
task: task,
|
||||
opts: []Option{
|
||||
MaxRetry(2),
|
||||
MaxRetry(10),
|
||||
},
|
||||
wantEnqueued: map[string][]*base.TaskMessage{
|
||||
"default": []*base.TaskMessage{
|
||||
&base.TaskMessage{
|
||||
Type: task.Type,
|
||||
Payload: task.Payload.data,
|
||||
Retry: 10, // Last option takes precedence
|
||||
Queue: "default",
|
||||
Timeout: time.Duration(0).String(),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "With queue option",
|
||||
task: task,
|
||||
opts: []Option{
|
||||
Queue("custom"),
|
||||
},
|
||||
wantEnqueued: map[string][]*base.TaskMessage{
|
||||
"custom": []*base.TaskMessage{
|
||||
&base.TaskMessage{
|
||||
Type: task.Type,
|
||||
Payload: task.Payload.data,
|
||||
Retry: defaultMaxRetry,
|
||||
Queue: "custom",
|
||||
Timeout: time.Duration(0).String(),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "Queue option should be case-insensitive",
|
||||
task: task,
|
||||
opts: []Option{
|
||||
Queue("HIGH"),
|
||||
},
|
||||
wantEnqueued: map[string][]*base.TaskMessage{
|
||||
"high": []*base.TaskMessage{
|
||||
&base.TaskMessage{
|
||||
Type: task.Type,
|
||||
Payload: task.Payload.data,
|
||||
Retry: defaultMaxRetry,
|
||||
Queue: "high",
|
||||
Timeout: time.Duration(0).String(),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "Timeout option sets the timeout duration",
|
||||
task: task,
|
||||
opts: []Option{
|
||||
Timeout(20 * time.Second),
|
||||
},
|
||||
wantEnqueued: map[string][]*base.TaskMessage{
|
||||
"default": []*base.TaskMessage{
|
||||
&base.TaskMessage{
|
||||
Type: task.Type,
|
||||
Payload: task.Payload.data,
|
||||
Retry: defaultMaxRetry,
|
||||
Queue: "default",
|
||||
Timeout: (20 * time.Second).String(),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
h.FlushDB(t, r) // clean up db before each test case.
|
||||
|
||||
err := client.Enqueue(tc.task, tc.opts...)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
continue
|
||||
}
|
||||
|
||||
for qname, want := range tc.wantEnqueued {
|
||||
got := h.GetEnqueuedMessages(t, r, qname)
|
||||
if diff := cmp.Diff(want, got, h.IgnoreIDOpt); diff != "" {
|
||||
t.Errorf("%s;\nmismatch found in %q; (-want,+got)\n%s", tc.desc, base.QueueKey(qname), diff)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestClientEnqueueIn(t *testing.T) {
|
||||
r := setup(t)
|
||||
client := NewClient(RedisClientOpt{
|
||||
Addr: redisAddr,
|
||||
DB: redisDB,
|
||||
})
|
||||
|
||||
task := NewTask("send_email", map[string]interface{}{"to": "customer@gmail.com", "from": "merchant@example.com"})
|
||||
|
||||
tests := []struct {
|
||||
desc string
|
||||
task *Task
|
||||
delay time.Duration
|
||||
opts []Option
|
||||
wantEnqueued map[string][]*base.TaskMessage
|
||||
wantScheduled []h.ZSetEntry
|
||||
}{
|
||||
{
|
||||
desc: "schedule a task to be enqueued in one hour",
|
||||
task: task,
|
||||
delay: time.Hour,
|
||||
opts: []Option{},
|
||||
wantEnqueued: nil, // db is flushed in setup so list does not exist hence nil
|
||||
wantScheduled: []h.ZSetEntry{
|
||||
{
|
||||
Msg: &base.TaskMessage{
|
||||
Type: task.Type,
|
||||
Payload: task.Payload.data,
|
||||
Retry: defaultMaxRetry,
|
||||
Queue: "default",
|
||||
Timeout: time.Duration(0).String(),
|
||||
},
|
||||
Score: float64(time.Now().Add(time.Hour).Unix()),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "Zero delay",
|
||||
task: task,
|
||||
delay: 0,
|
||||
opts: []Option{},
|
||||
wantEnqueued: map[string][]*base.TaskMessage{
|
||||
"default": []*base.TaskMessage{
|
||||
&base.TaskMessage{
|
||||
Type: task.Type,
|
||||
Payload: task.Payload.data,
|
||||
Retry: defaultMaxRetry,
|
||||
Queue: "default",
|
||||
Timeout: time.Duration(0).String(),
|
||||
},
|
||||
},
|
||||
},
|
||||
wantScheduled: nil, // db is flushed in setup so zset does not exist hence nil
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
h.FlushDB(t, r) // clean up db before each test case.
|
||||
|
||||
err := client.EnqueueIn(tc.delay, tc.task, tc.opts...)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
continue
|
||||
|
11
doc.go
11
doc.go
@@ -9,8 +9,8 @@ Asynq uses Redis as a message broker. To connect to redis server,
|
||||
specify the options using one of RedisConnOpt types.
|
||||
|
||||
redis = &asynq.RedisClientOpt{
|
||||
Addr: "localhost:6379",
|
||||
Password: "secretpassword",
|
||||
Addr: "127.0.0.1:6379",
|
||||
Password: "xxxxx",
|
||||
DB: 3,
|
||||
}
|
||||
|
||||
@@ -24,8 +24,11 @@ Task is created with two parameters: its type and payload.
|
||||
"send_email",
|
||||
map[string]interface{}{"user_id": 42})
|
||||
|
||||
// Schedule the task t to be processed a minute from now.
|
||||
err := client.Schedule(t, time.Now().Add(time.Minute))
|
||||
// Enqueue the task to be processed immediately.
|
||||
err := client.Enqueue(t)
|
||||
|
||||
// Schedule the task to be processed in one minute.
|
||||
err = client.EnqueueIn(time.Minute, t)
|
||||
|
||||
The Background is used to run the background task processing with a given
|
||||
handler.
|
||||
|
10
go.mod
10
go.mod
@@ -3,20 +3,12 @@ module github.com/hibiken/asynq
|
||||
go 1.13
|
||||
|
||||
require (
|
||||
github.com/go-redis/redis/v7 v7.0.0-beta.4
|
||||
github.com/go-redis/redis/v7 v7.2.0
|
||||
github.com/google/go-cmp v0.4.0
|
||||
github.com/mitchellh/go-homedir v1.1.0
|
||||
github.com/pelletier/go-toml v1.6.0 // indirect
|
||||
github.com/rs/xid v1.2.1
|
||||
github.com/spf13/afero v1.2.2 // indirect
|
||||
github.com/spf13/cast v1.3.1
|
||||
github.com/spf13/cobra v0.0.5
|
||||
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/spf13/viper v1.6.0
|
||||
go.uber.org/goleak v0.10.0
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e // indirect
|
||||
golang.org/x/text v0.3.2 // indirect
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4
|
||||
gopkg.in/yaml.v2 v2.2.7 // indirect
|
||||
)
|
||||
|
158
go.sum
158
go.sum
@@ -1,178 +1,53 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-redis/redis/v7 v7.0.0-beta.4 h1:p6z7Pde69EGRWvlC++y8aFcaWegyrKHzOBGo0zUACTQ=
|
||||
github.com/go-redis/redis/v7 v7.0.0-beta.4/go.mod h1:xhhSbUMTsleRPur+Vgx9sUHtyN33bdjxY+9/0n9Ig8s=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/go-redis/redis/v7 v7.2.0 h1:CrCexy/jYWZjW0AyVoHlcJUeZN19VWlbepTh1Vq6dJs=
|
||||
github.com/go-redis/redis/v7 v7.2.0/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRfnwdHj/Dcg=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY=
|
||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=
|
||||
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w=
|
||||
github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo=
|
||||
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/pelletier/go-toml v1.6.0 h1:aetoXYr0Tv7xRU/V4B4IZJ2QcbtMUFoNb3ORp7TzIK4=
|
||||
github.com/pelletier/go-toml v1.6.0/go.mod h1:5N711Q9dKgbdkxHL+MEfF31hpT7l0S0s/t2kKREewys=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rs/xid v1.2.1 h1:mhH9Nq+C1fY2l1XIpgxIiUOfNpRBYH1kKcr+qfKgjRc=
|
||||
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
|
||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
|
||||
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI=
|
||||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||
github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc=
|
||||
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
|
||||
github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8=
|
||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng=
|
||||
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s=
|
||||
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
|
||||
github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=
|
||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
|
||||
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
|
||||
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.3.2 h1:VUFqw5KcqRf7i70GOzW7N+Q7+gxVBkSSqiXB12+JQ4M=
|
||||
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
|
||||
github.com/spf13/viper v1.6.0 h1:qSjVKzM2dmqQLutPN4Y0SEzDpAf7T6HHIT3E2Xr75Gg=
|
||||
github.com/spf13/viper v1.6.0/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
|
||||
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
|
||||
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/goleak v0.10.0 h1:G3eWbSNIskeRqtsN/1uI5B+eP73y3JUuBsv9AZjehb4=
|
||||
go.uber.org/goleak v0.10.0/go.mod h1:VCZuO8V8mFPlL0F5J5GK1rtHV3DrFcQ1R8ryq7FK0aI=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190522155817-f3200d17e092 h1:4QSRKanuywn15aTZvI/mIDEgPQpswuFndXpOj3rKEco=
|
||||
golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjutRKlBEZQ6wTn8ozI/nI=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e h1:o3PsSEY8E4eXWkXrIP9YJALUkVZqzHJT5DOasTyn8Vs=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a h1:1n5lsVfiQW3yfsRGu98756EH1YthsFqr/5mxHduZW2A=
|
||||
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e h1:9vRrk9YW2BTzLP0VCB9ZDjU4cPqkg+IDWL7XgxA1yxQ=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||
@@ -181,34 +56,19 @@ golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno=
|
||||
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
|
||||
gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo=
|
||||
gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
|
18
heartbeat.go
18
heartbeat.go
@@ -5,6 +5,7 @@
|
||||
package asynq
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/hibiken/asynq/internal/base"
|
||||
@@ -16,7 +17,7 @@ import (
|
||||
type heartbeater struct {
|
||||
rdb *rdb.RDB
|
||||
|
||||
pinfo *base.ProcessInfo
|
||||
ps *base.ProcessState
|
||||
|
||||
// channel to communicate back to the long running "heartbeater" goroutine.
|
||||
done chan struct{}
|
||||
@@ -25,10 +26,10 @@ type heartbeater struct {
|
||||
interval time.Duration
|
||||
}
|
||||
|
||||
func newHeartbeater(rdb *rdb.RDB, pinfo *base.ProcessInfo, interval time.Duration) *heartbeater {
|
||||
func newHeartbeater(rdb *rdb.RDB, ps *base.ProcessState, interval time.Duration) *heartbeater {
|
||||
return &heartbeater{
|
||||
rdb: rdb,
|
||||
pinfo: pinfo,
|
||||
ps: ps,
|
||||
done: make(chan struct{}),
|
||||
interval: interval,
|
||||
}
|
||||
@@ -40,14 +41,17 @@ func (h *heartbeater) terminate() {
|
||||
h.done <- struct{}{}
|
||||
}
|
||||
|
||||
func (h *heartbeater) start() {
|
||||
h.pinfo.SetStarted(time.Now())
|
||||
h.pinfo.SetState("running")
|
||||
func (h *heartbeater) start(wg *sync.WaitGroup) {
|
||||
h.ps.SetStarted(time.Now())
|
||||
h.ps.SetStatus(base.StatusRunning)
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
h.beat()
|
||||
for {
|
||||
select {
|
||||
case <-h.done:
|
||||
h.rdb.ClearProcessState(h.ps)
|
||||
logger.info("Heartbeater done")
|
||||
return
|
||||
case <-time.After(h.interval):
|
||||
@@ -60,7 +64,7 @@ func (h *heartbeater) start() {
|
||||
func (h *heartbeater) beat() {
|
||||
// Note: Set TTL to be long enough so that it won't expire before we write again
|
||||
// and short enough to expire quickly once the process is shut down or killed.
|
||||
err := h.rdb.WriteProcessInfo(h.pinfo, h.interval*2)
|
||||
err := h.rdb.WriteProcessState(h.ps, h.interval*2)
|
||||
if err != nil {
|
||||
logger.error("could not write heartbeat data: %v", err)
|
||||
}
|
||||
|
@@ -5,6 +5,7 @@
|
||||
package asynq
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -26,7 +27,7 @@ func TestHeartbeater(t *testing.T) {
|
||||
queues map[string]int
|
||||
concurrency int
|
||||
}{
|
||||
{time.Second, "some.address.ec2.aws.com", 45678, map[string]int{"default": 1}, 10},
|
||||
{time.Second, "localhost", 45678, map[string]int{"default": 1}, 10},
|
||||
}
|
||||
|
||||
timeCmpOpt := cmpopts.EquateApproxTime(10 * time.Millisecond)
|
||||
@@ -34,8 +35,11 @@ func TestHeartbeater(t *testing.T) {
|
||||
for _, tc := range tests {
|
||||
h.FlushDB(t, r)
|
||||
|
||||
pi := base.NewProcessInfo(tc.host, tc.pid, tc.concurrency, tc.queues, false)
|
||||
hb := newHeartbeater(rdbClient, pi, tc.interval)
|
||||
state := base.NewProcessState(tc.host, tc.pid, tc.concurrency, tc.queues, false)
|
||||
hb := newHeartbeater(rdbClient, state, tc.interval)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
hb.start(&wg)
|
||||
|
||||
want := &base.ProcessInfo{
|
||||
Host: tc.host,
|
||||
@@ -43,42 +47,53 @@ func TestHeartbeater(t *testing.T) {
|
||||
Queues: tc.queues,
|
||||
Concurrency: tc.concurrency,
|
||||
Started: time.Now(),
|
||||
State: "running",
|
||||
Status: "running",
|
||||
}
|
||||
hb.start()
|
||||
|
||||
// allow for heartbeater to write to redis
|
||||
time.Sleep(tc.interval * 2)
|
||||
|
||||
got, err := rdbClient.ReadProcessInfo(tc.host, tc.pid)
|
||||
ps, err := rdbClient.ListProcesses()
|
||||
if err != nil {
|
||||
t.Errorf("could not read process status from redis: %v", err)
|
||||
hb.terminate()
|
||||
continue
|
||||
}
|
||||
|
||||
if diff := cmp.Diff(want, got, timeCmpOpt, ignoreOpt); diff != "" {
|
||||
t.Errorf("redis stored process status %+v, want %+v; (-want, +got)\n%s", got, want, diff)
|
||||
if len(ps) != 1 {
|
||||
t.Errorf("(*RDB).ListProcesses returned %d process info, want 1", len(ps))
|
||||
hb.terminate()
|
||||
continue
|
||||
}
|
||||
|
||||
// state change
|
||||
pi.SetState("stopped")
|
||||
if diff := cmp.Diff(want, ps[0], timeCmpOpt, ignoreOpt); diff != "" {
|
||||
t.Errorf("redis stored process status %+v, want %+v; (-want, +got)\n%s", ps[0], want, diff)
|
||||
hb.terminate()
|
||||
continue
|
||||
}
|
||||
|
||||
// status change
|
||||
state.SetStatus(base.StatusStopped)
|
||||
|
||||
// allow for heartbeater to write to redis
|
||||
time.Sleep(tc.interval * 2)
|
||||
|
||||
want.State = "stopped"
|
||||
got, err = rdbClient.ReadProcessInfo(tc.host, tc.pid)
|
||||
want.Status = "stopped"
|
||||
ps, err = rdbClient.ListProcesses()
|
||||
if err != nil {
|
||||
t.Errorf("could not read process status from redis: %v", err)
|
||||
hb.terminate()
|
||||
continue
|
||||
}
|
||||
|
||||
if diff := cmp.Diff(want, got, timeCmpOpt, ignoreOpt); diff != "" {
|
||||
t.Errorf("redis stored process status %+v, want %+v; (-want, +got)\n%s", got, want, diff)
|
||||
if len(ps) != 1 {
|
||||
t.Errorf("(*RDB).ListProcesses returned %d process info, want 1", len(ps))
|
||||
hb.terminate()
|
||||
continue
|
||||
}
|
||||
|
||||
if diff := cmp.Diff(want, ps[0], timeCmpOpt, ignoreOpt); diff != "" {
|
||||
t.Errorf("redis stored process status %+v, want %+v; (-want, +got)\n%s", ps[0], want, diff)
|
||||
hb.terminate()
|
||||
continue
|
||||
}
|
||||
|
@@ -53,6 +53,22 @@ var SortProcessInfoOpt = cmp.Transformer("SortProcessInfo", func(in []*base.Proc
|
||||
return out
|
||||
})
|
||||
|
||||
// SortWorkerInfoOpt is a cmp.Option to sort base.WorkerInfo for comparing slice of worker info.
|
||||
var SortWorkerInfoOpt = cmp.Transformer("SortWorkerInfo", func(in []*base.WorkerInfo) []*base.WorkerInfo {
|
||||
out := append([]*base.WorkerInfo(nil), in...) // Copy input to avoid mutating it
|
||||
sort.Slice(out, func(i, j int) bool {
|
||||
return out[i].ID.String() < out[j].ID.String()
|
||||
})
|
||||
return out
|
||||
})
|
||||
|
||||
// SortStringSliceOpt is a cmp.Option to sort string slice.
|
||||
var SortStringSliceOpt = cmp.Transformer("SortStringSlice", func(in []string) []string {
|
||||
out := append([]string(nil), in...)
|
||||
sort.Strings(out)
|
||||
return out
|
||||
})
|
||||
|
||||
// IgnoreIDOpt is an cmp.Option to ignore ID field in task messages when comparing.
|
||||
var IgnoreIDOpt = cmpopts.IgnoreFields(base.TaskMessage{}, "ID")
|
||||
|
||||
|
@@ -20,8 +20,10 @@ const DefaultQueueName = "default"
|
||||
|
||||
// Redis keys
|
||||
const (
|
||||
psPrefix = "asynq:ps:" // HASH
|
||||
AllProcesses = "asynq:ps" // ZSET
|
||||
psPrefix = "asynq:ps:" // STRING - asynq:ps:<host>:<pid>
|
||||
AllWorkers = "asynq:workers" // ZSET
|
||||
workersPrefix = "asynq:workers:" // HASH - asynq:workers:<host:<pid>
|
||||
processedPrefix = "asynq:processed:" // STRING - asynq:processed:<yyyy-mm-dd>
|
||||
failurePrefix = "asynq:failure:" // STRING - asynq:failure:<yyyy-mm-dd>
|
||||
QueuePrefix = "asynq:queues:" // LIST - asynq:queues:<qname>
|
||||
@@ -34,28 +36,31 @@ const (
|
||||
CancelChannel = "asynq:cancel" // PubSub channel
|
||||
)
|
||||
|
||||
// QueueKey returns a redis key string for the given queue name.
|
||||
// QueueKey returns a redis key for the given queue name.
|
||||
func QueueKey(qname string) string {
|
||||
return QueuePrefix + strings.ToLower(qname)
|
||||
}
|
||||
|
||||
// ProcessedKey returns a redis key string for processed count
|
||||
// for the given day.
|
||||
// ProcessedKey returns a redis key for processed count for the given day.
|
||||
func ProcessedKey(t time.Time) string {
|
||||
return processedPrefix + t.UTC().Format("2006-01-02")
|
||||
}
|
||||
|
||||
// FailureKey returns a redis key string for failure count
|
||||
// for the given day.
|
||||
// FailureKey returns a redis key for failure count for the given day.
|
||||
func FailureKey(t time.Time) string {
|
||||
return failurePrefix + t.UTC().Format("2006-01-02")
|
||||
}
|
||||
|
||||
// ProcessInfoKey returns a redis key string for process info.
|
||||
// ProcessInfoKey returns a redis key for process info.
|
||||
func ProcessInfoKey(hostname string, pid int) string {
|
||||
return fmt.Sprintf("%s%s:%d", psPrefix, hostname, pid)
|
||||
}
|
||||
|
||||
// WorkersKey returns a redis key for the workers given hostname and pid.
|
||||
func WorkersKey(hostname string, pid int) string {
|
||||
return fmt.Sprintf("%s%s:%d", workersPrefix, hostname, pid)
|
||||
}
|
||||
|
||||
// TaskMessage is the internal representation of a task with additional metadata fields.
|
||||
// Serialized data of this type gets written to redis.
|
||||
type TaskMessage struct {
|
||||
@@ -87,54 +92,171 @@ type TaskMessage struct {
|
||||
Timeout string
|
||||
}
|
||||
|
||||
// ProcessInfo holds information about running background worker process.
|
||||
// ProcessState holds process level information.
|
||||
//
|
||||
// ProcessStates are safe for concurrent use by multiple goroutines.
|
||||
type ProcessState struct {
|
||||
mu sync.Mutex // guards all data fields
|
||||
concurrency int
|
||||
queues map[string]int
|
||||
strictPriority bool
|
||||
pid int
|
||||
host string
|
||||
status PStatus
|
||||
started time.Time
|
||||
workers map[string]*workerStats
|
||||
}
|
||||
|
||||
// PStatus represents status of a process.
|
||||
type PStatus int
|
||||
|
||||
const (
|
||||
// StatusIdle indicates process is in idle state.
|
||||
StatusIdle PStatus = iota
|
||||
|
||||
// StatusRunning indicates process is up and processing tasks.
|
||||
StatusRunning
|
||||
|
||||
// StatusStopped indicates process is up but not processing new tasks.
|
||||
StatusStopped
|
||||
)
|
||||
|
||||
var statuses = []string{
|
||||
"idle",
|
||||
"running",
|
||||
"stopped",
|
||||
}
|
||||
|
||||
func (s PStatus) String() string {
|
||||
if StatusIdle <= s && s <= StatusStopped {
|
||||
return statuses[s]
|
||||
}
|
||||
return "unknown status"
|
||||
}
|
||||
|
||||
type workerStats struct {
|
||||
msg *TaskMessage
|
||||
started time.Time
|
||||
}
|
||||
|
||||
// NewProcessState returns a new instance of ProcessState.
|
||||
func NewProcessState(host string, pid, concurrency int, queues map[string]int, strict bool) *ProcessState {
|
||||
return &ProcessState{
|
||||
host: host,
|
||||
pid: pid,
|
||||
concurrency: concurrency,
|
||||
queues: cloneQueueConfig(queues),
|
||||
strictPriority: strict,
|
||||
status: StatusIdle,
|
||||
workers: make(map[string]*workerStats),
|
||||
}
|
||||
}
|
||||
|
||||
// SetStatus updates the state of process.
|
||||
func (ps *ProcessState) SetStatus(status PStatus) {
|
||||
ps.mu.Lock()
|
||||
defer ps.mu.Unlock()
|
||||
ps.status = status
|
||||
}
|
||||
|
||||
// SetStarted records when the process started processing.
|
||||
func (ps *ProcessState) SetStarted(t time.Time) {
|
||||
ps.mu.Lock()
|
||||
defer ps.mu.Unlock()
|
||||
ps.started = t
|
||||
}
|
||||
|
||||
// AddWorkerStats records when a worker started and which task it's processing.
|
||||
func (ps *ProcessState) AddWorkerStats(msg *TaskMessage, started time.Time) {
|
||||
ps.mu.Lock()
|
||||
defer ps.mu.Unlock()
|
||||
ps.workers[msg.ID.String()] = &workerStats{msg, started}
|
||||
}
|
||||
|
||||
// DeleteWorkerStats removes a worker's entry from the process state.
|
||||
func (ps *ProcessState) DeleteWorkerStats(msg *TaskMessage) {
|
||||
ps.mu.Lock()
|
||||
defer ps.mu.Unlock()
|
||||
delete(ps.workers, msg.ID.String())
|
||||
}
|
||||
|
||||
// Get returns current state of process as a ProcessInfo.
|
||||
func (ps *ProcessState) Get() *ProcessInfo {
|
||||
ps.mu.Lock()
|
||||
defer ps.mu.Unlock()
|
||||
return &ProcessInfo{
|
||||
Host: ps.host,
|
||||
PID: ps.pid,
|
||||
Concurrency: ps.concurrency,
|
||||
Queues: cloneQueueConfig(ps.queues),
|
||||
StrictPriority: ps.strictPriority,
|
||||
Status: ps.status.String(),
|
||||
Started: ps.started,
|
||||
ActiveWorkerCount: len(ps.workers),
|
||||
}
|
||||
}
|
||||
|
||||
// GetWorkers returns a list of currently running workers' info.
|
||||
func (ps *ProcessState) GetWorkers() []*WorkerInfo {
|
||||
ps.mu.Lock()
|
||||
defer ps.mu.Unlock()
|
||||
var res []*WorkerInfo
|
||||
for _, w := range ps.workers {
|
||||
res = append(res, &WorkerInfo{
|
||||
Host: ps.host,
|
||||
PID: ps.pid,
|
||||
ID: w.msg.ID,
|
||||
Type: w.msg.Type,
|
||||
Queue: w.msg.Queue,
|
||||
Payload: clonePayload(w.msg.Payload),
|
||||
Started: w.started,
|
||||
})
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func cloneQueueConfig(qcfg map[string]int) map[string]int {
|
||||
res := make(map[string]int)
|
||||
for qname, n := range qcfg {
|
||||
res[qname] = n
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func clonePayload(payload map[string]interface{}) map[string]interface{} {
|
||||
res := make(map[string]interface{})
|
||||
for k, v := range payload {
|
||||
res[k] = v
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// ProcessInfo holds information about a running background worker process.
|
||||
type ProcessInfo struct {
|
||||
mu sync.Mutex
|
||||
Host string
|
||||
PID int
|
||||
Concurrency int
|
||||
Queues map[string]int
|
||||
StrictPriority bool
|
||||
PID int
|
||||
Host string
|
||||
State string
|
||||
Status string
|
||||
Started time.Time
|
||||
ActiveWorkerCount int
|
||||
}
|
||||
|
||||
// NewProcessInfo returns a new instance of ProcessInfo.
|
||||
func NewProcessInfo(host string, pid, concurrency int, queues map[string]int, strict bool) *ProcessInfo {
|
||||
return &ProcessInfo{
|
||||
Host: host,
|
||||
PID: pid,
|
||||
Concurrency: concurrency,
|
||||
Queues: queues,
|
||||
StrictPriority: strict,
|
||||
}
|
||||
}
|
||||
|
||||
// SetState set the state field of the process info.
|
||||
func (p *ProcessInfo) SetState(state string) {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
p.State = state
|
||||
}
|
||||
|
||||
// SetStarted set the started field of the process info.
|
||||
func (p *ProcessInfo) SetStarted(t time.Time) {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
p.Started = t
|
||||
}
|
||||
|
||||
// IncrActiveWorkerCount increments active worker count by delta.
|
||||
func (p *ProcessInfo) IncrActiveWorkerCount(delta int) {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
p.ActiveWorkerCount += delta
|
||||
// WorkerInfo holds information about a running worker.
|
||||
type WorkerInfo struct {
|
||||
Host string
|
||||
PID int
|
||||
ID xid.ID
|
||||
Type string
|
||||
Queue string
|
||||
Payload map[string]interface{}
|
||||
Started time.Time
|
||||
}
|
||||
|
||||
// Cancelations is a collection that holds cancel functions for all in-progress tasks.
|
||||
//
|
||||
// Its methods are safe to be used in multiple goroutines.
|
||||
// Cancelations are safe for concurrent use by multipel goroutines.
|
||||
type Cancelations struct {
|
||||
mu sync.Mutex
|
||||
cancelFuncs map[string]context.CancelFunc
|
||||
@@ -162,10 +284,11 @@ func (c *Cancelations) Delete(id string) {
|
||||
}
|
||||
|
||||
// Get returns a cancel func given an id.
|
||||
func (c *Cancelations) Get(id string) context.CancelFunc {
|
||||
func (c *Cancelations) Get(id string) (fn context.CancelFunc, ok bool) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
return c.cancelFuncs[id]
|
||||
fn, ok = c.cancelFuncs[id]
|
||||
return fn, ok
|
||||
}
|
||||
|
||||
// GetAll returns all cancel funcs.
|
||||
|
@@ -5,9 +5,14 @@
|
||||
package base
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/rs/xid"
|
||||
)
|
||||
|
||||
func TestQueueKey(t *testing.T) {
|
||||
@@ -75,34 +80,137 @@ func TestProcessInfoKey(t *testing.T) {
|
||||
for _, tc := range tests {
|
||||
got := ProcessInfoKey(tc.hostname, tc.pid)
|
||||
if got != tc.want {
|
||||
t.Errorf("ProcessInfoKey(%s, %d) = %s, want %s", tc.hostname, tc.pid, got, tc.want)
|
||||
t.Errorf("ProcessInfoKey(%q, %d) = %q, want %q", tc.hostname, tc.pid, got, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Note: Run this test with -race flag to check for data race.
|
||||
func TestProcessInfoSetter(t *testing.T) {
|
||||
pi := NewProcessInfo("localhost", 1234, 8, map[string]int{"default": 1}, false)
|
||||
func TestWorkersKey(t *testing.T) {
|
||||
tests := []struct {
|
||||
hostname string
|
||||
pid int
|
||||
want string
|
||||
}{
|
||||
{"localhost", 9876, "asynq:workers:localhost:9876"},
|
||||
{"127.0.0.1", 1234, "asynq:workers:127.0.0.1:1234"},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
got := WorkersKey(tc.hostname, tc.pid)
|
||||
if got != tc.want {
|
||||
t.Errorf("WorkersKey(%q, %d) = %q, want = %q", tc.hostname, tc.pid, got, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Test for process state being accessed by multiple goroutines.
|
||||
// Run with -race flag to check for data race.
|
||||
func TestProcessStateConcurrentAccess(t *testing.T) {
|
||||
ps := NewProcessState("127.0.0.1", 1234, 10, map[string]int{"default": 1}, false)
|
||||
var wg sync.WaitGroup
|
||||
started := time.Now()
|
||||
msgs := []*TaskMessage{
|
||||
&TaskMessage{ID: xid.New(), Type: "type1", Payload: map[string]interface{}{"user_id": 42}},
|
||||
&TaskMessage{ID: xid.New(), Type: "type2"},
|
||||
&TaskMessage{ID: xid.New(), Type: "type3"},
|
||||
}
|
||||
|
||||
wg.Add(3)
|
||||
|
||||
// Simulate hearbeater calling SetStatus and SetStarted.
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
pi.SetState("runnning")
|
||||
wg.Done()
|
||||
defer wg.Done()
|
||||
ps.SetStarted(started)
|
||||
ps.SetStatus(StatusRunning)
|
||||
}()
|
||||
|
||||
go func() {
|
||||
pi.SetStarted(time.Now())
|
||||
pi.IncrActiveWorkerCount(1)
|
||||
wg.Done()
|
||||
}()
|
||||
// Simulate processor starting worker goroutines.
|
||||
for _, msg := range msgs {
|
||||
wg.Add(1)
|
||||
ps.AddWorkerStats(msg, time.Now())
|
||||
go func(msg *TaskMessage) {
|
||||
defer wg.Done()
|
||||
time.Sleep(time.Duration(rand.Intn(500)) * time.Millisecond)
|
||||
ps.DeleteWorkerStats(msg)
|
||||
}(msg)
|
||||
}
|
||||
|
||||
// Simulate hearbeater calling Get and GetWorkers
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
pi.SetState("stopped")
|
||||
wg.Done()
|
||||
for i := 0; i < 5; i++ {
|
||||
ps.Get()
|
||||
ps.GetWorkers()
|
||||
time.Sleep(time.Duration(rand.Intn(100)) * time.Millisecond)
|
||||
}
|
||||
}()
|
||||
|
||||
wg.Wait()
|
||||
|
||||
want := &ProcessInfo{
|
||||
Host: "127.0.0.1",
|
||||
PID: 1234,
|
||||
Concurrency: 10,
|
||||
Queues: map[string]int{"default": 1},
|
||||
StrictPriority: false,
|
||||
Status: "running",
|
||||
Started: started,
|
||||
ActiveWorkerCount: 0,
|
||||
}
|
||||
|
||||
got := ps.Get()
|
||||
if diff := cmp.Diff(want, got); diff != "" {
|
||||
t.Errorf("(*ProcessState).Get() = %+v, want %+v; (-want,+got)\n%s",
|
||||
got, want, diff)
|
||||
}
|
||||
}
|
||||
|
||||
// Test for cancelations being accessed by multiple goroutines.
|
||||
// Run with -race flag to check for data race.
|
||||
func TestCancelationsConcurrentAccess(t *testing.T) {
|
||||
c := NewCancelations()
|
||||
|
||||
_, cancel1 := context.WithCancel(context.Background())
|
||||
_, cancel2 := context.WithCancel(context.Background())
|
||||
_, cancel3 := context.WithCancel(context.Background())
|
||||
var key1, key2, key3 = "key1", "key2", "key3"
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
c.Add(key1, cancel1)
|
||||
}()
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
c.Add(key2, cancel2)
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
c.Delete(key2)
|
||||
}()
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
c.Add(key3, cancel3)
|
||||
}()
|
||||
|
||||
wg.Wait()
|
||||
|
||||
_, ok := c.Get(key1)
|
||||
if !ok {
|
||||
t.Errorf("(*Cancelations).Get(%q) = _, false, want <function>, true", key1)
|
||||
}
|
||||
|
||||
_, ok = c.Get(key2)
|
||||
if ok {
|
||||
t.Errorf("(*Cancelations).Get(%q) = _, true, want <nil>, false", key2)
|
||||
}
|
||||
|
||||
funcs := c.GetAll()
|
||||
if len(funcs) != 2 {
|
||||
t.Errorf("(*Cancelations).GetAll() returns %d functions, want 2", len(funcs))
|
||||
}
|
||||
}
|
||||
|
@@ -794,3 +794,39 @@ func (r *RDB) ListProcesses() ([]*base.ProcessInfo, error) {
|
||||
}
|
||||
return processes, nil
|
||||
}
|
||||
|
||||
// Note: Script also removes stale keys.
|
||||
var listWorkersCmd = redis.NewScript(`
|
||||
local res = {}
|
||||
local now = tonumber(ARGV[1])
|
||||
local keys = redis.call("ZRANGEBYSCORE", KEYS[1], now, "+inf")
|
||||
for _, key in ipairs(keys) do
|
||||
local workers = redis.call("HVALS", key)
|
||||
for _, w in ipairs(workers) do
|
||||
table.insert(res, w)
|
||||
end
|
||||
end
|
||||
redis.call("ZREMRANGEBYSCORE", KEYS[1], "-inf", now-1)
|
||||
return res`)
|
||||
|
||||
// ListWorkers returns the list of worker stats.
|
||||
func (r *RDB) ListWorkers() ([]*base.WorkerInfo, error) {
|
||||
res, err := listWorkersCmd.Run(r.client, []string{base.AllWorkers}, time.Now().UTC().Unix()).Result()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
data, err := cast.ToStringSliceE(res)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var workers []*base.WorkerInfo
|
||||
for _, s := range data {
|
||||
var w base.WorkerInfo
|
||||
err := json.Unmarshal([]byte(s), &w)
|
||||
if err != nil {
|
||||
continue // skip bad data
|
||||
}
|
||||
workers = append(workers, &w)
|
||||
}
|
||||
return workers, nil
|
||||
}
|
||||
|
@@ -2054,32 +2054,51 @@ func TestRemoveQueueError(t *testing.T) {
|
||||
func TestListProcesses(t *testing.T) {
|
||||
r := setup(t)
|
||||
|
||||
ps1 := &base.ProcessInfo{
|
||||
started1 := time.Now().Add(-time.Hour)
|
||||
ps1 := base.NewProcessState("do.droplet1", 1234, 10, map[string]int{"default": 1}, false)
|
||||
ps1.SetStarted(started1)
|
||||
ps1.SetStatus(base.StatusRunning)
|
||||
info1 := &base.ProcessInfo{
|
||||
Concurrency: 10,
|
||||
Queues: map[string]int{"default": 1},
|
||||
Host: "do.droplet1",
|
||||
PID: 1234,
|
||||
State: "running",
|
||||
Started: time.Now().Add(-time.Hour),
|
||||
ActiveWorkerCount: 5,
|
||||
Status: "running",
|
||||
Started: started1,
|
||||
ActiveWorkerCount: 0,
|
||||
}
|
||||
|
||||
ps2 := &base.ProcessInfo{
|
||||
started2 := time.Now().Add(-2 * time.Hour)
|
||||
ps2 := base.NewProcessState("do.droplet2", 9876, 20, map[string]int{"email": 1}, false)
|
||||
ps2.SetStarted(started2)
|
||||
ps2.SetStatus(base.StatusStopped)
|
||||
ps2.AddWorkerStats(h.NewTaskMessage("send_email", nil), time.Now())
|
||||
info2 := &base.ProcessInfo{
|
||||
Concurrency: 20,
|
||||
Queues: map[string]int{"email": 1},
|
||||
Host: "do.droplet2",
|
||||
PID: 9876,
|
||||
State: "stopped",
|
||||
Started: time.Now().Add(-2 * time.Hour),
|
||||
ActiveWorkerCount: 20,
|
||||
Status: "stopped",
|
||||
Started: started2,
|
||||
ActiveWorkerCount: 1,
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
processes []*base.ProcessInfo
|
||||
processes []*base.ProcessState
|
||||
want []*base.ProcessInfo
|
||||
}{
|
||||
{processes: []*base.ProcessInfo{}},
|
||||
{processes: []*base.ProcessInfo{ps1}},
|
||||
{processes: []*base.ProcessInfo{ps1, ps2}},
|
||||
{
|
||||
processes: []*base.ProcessState{},
|
||||
want: []*base.ProcessInfo{},
|
||||
},
|
||||
{
|
||||
processes: []*base.ProcessState{ps1},
|
||||
want: []*base.ProcessInfo{info1},
|
||||
},
|
||||
{
|
||||
processes: []*base.ProcessState{ps1, ps2},
|
||||
want: []*base.ProcessInfo{info1, info2},
|
||||
},
|
||||
}
|
||||
|
||||
ignoreOpt := cmpopts.IgnoreUnexported(base.ProcessInfo{})
|
||||
@@ -2088,7 +2107,7 @@ func TestListProcesses(t *testing.T) {
|
||||
h.FlushDB(t, r.client)
|
||||
|
||||
for _, ps := range tc.processes {
|
||||
if err := r.WriteProcessInfo(ps, 5*time.Second); err != nil {
|
||||
if err := r.WriteProcessState(ps, 5*time.Second); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
@@ -2097,9 +2116,74 @@ func TestListProcesses(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Errorf("r.ListProcesses returned an error: %v", err)
|
||||
}
|
||||
if diff := cmp.Diff(tc.processes, got, h.SortProcessInfoOpt, ignoreOpt); diff != "" {
|
||||
if diff := cmp.Diff(tc.want, got, h.SortProcessInfoOpt, ignoreOpt); diff != "" {
|
||||
t.Errorf("r.ListProcesses returned %v, want %v; (-want,+got)\n%s",
|
||||
got, tc.processes, diff)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestListWorkers(t *testing.T) {
|
||||
r := setup(t)
|
||||
|
||||
const (
|
||||
host = "127.0.0.1"
|
||||
pid = 4567
|
||||
)
|
||||
|
||||
m1 := h.NewTaskMessage("send_email", map[string]interface{}{"user_id": "abc123"})
|
||||
m2 := h.NewTaskMessage("gen_thumbnail", map[string]interface{}{"path": "some/path/to/image/file"})
|
||||
m3 := h.NewTaskMessage("reindex", map[string]interface{}{})
|
||||
t1 := time.Now().Add(-time.Second)
|
||||
t2 := time.Now().Add(-10 * time.Second)
|
||||
t3 := time.Now().Add(-time.Minute)
|
||||
|
||||
type workerStats struct {
|
||||
msg *base.TaskMessage
|
||||
started time.Time
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
workers []*workerStats
|
||||
want []*base.WorkerInfo
|
||||
}{
|
||||
{
|
||||
workers: []*workerStats{
|
||||
{m1, t1},
|
||||
{m2, t2},
|
||||
{m3, t3},
|
||||
},
|
||||
want: []*base.WorkerInfo{
|
||||
{Host: host, PID: pid, ID: m1.ID, Type: m1.Type, Queue: m1.Queue, Payload: m1.Payload, Started: t1},
|
||||
{Host: host, PID: pid, ID: m2.ID, Type: m2.Type, Queue: m2.Queue, Payload: m2.Payload, Started: t2},
|
||||
{Host: host, PID: pid, ID: m3.ID, Type: m3.Type, Queue: m3.Queue, Payload: m3.Payload, Started: t3},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
h.FlushDB(t, r.client)
|
||||
|
||||
ps := base.NewProcessState(host, pid, 10, map[string]int{"default": 1}, false)
|
||||
|
||||
for _, w := range tc.workers {
|
||||
ps.AddWorkerStats(w.msg, w.started)
|
||||
}
|
||||
|
||||
err := r.WriteProcessState(ps, time.Minute)
|
||||
if err != nil {
|
||||
t.Errorf("could not write process state to redis: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
got, err := r.ListWorkers()
|
||||
if err != nil {
|
||||
t.Errorf("(*RDB).ListWorkers() returned an error: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
if diff := cmp.Diff(tc.want, got, h.SortWorkerInfoOpt); diff != "" {
|
||||
t.Errorf("(*RDB).ListWorkers() = %v, want = %v; (-want,+got)\n%s", got, tc.want, diff)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -359,56 +359,71 @@ func (r *RDB) forwardSingle(src, dst string) error {
|
||||
[]string{src, dst}, now).Err()
|
||||
}
|
||||
|
||||
// KEYS[1] -> asynq:ps
|
||||
// KEYS[2] -> asynq:ps:<host:pid>
|
||||
// ARGV[1] -> expiration time
|
||||
// ARGV[2] -> TTL in seconds
|
||||
// ARGV[3] -> process info
|
||||
// KEYS[1] -> asynq:ps:<host:pid>
|
||||
// KEYS[2] -> asynq:ps
|
||||
// KEYS[3] -> asynq:workers<host:pid>
|
||||
// keys[4] -> asynq:workers
|
||||
// ARGV[1] -> expiration time
|
||||
// ARGV[2] -> TTL in seconds
|
||||
// ARGV[3] -> process info
|
||||
// ARGV[4:] -> alternate key-value pair of (worker id, worker data)
|
||||
// Note: Add key to ZSET with expiration time as score.
|
||||
// ref: https://github.com/antirez/redis/issues/135#issuecomment-2361996
|
||||
var writeProcessInfoCmd = redis.NewScript(`
|
||||
redis.call("ZADD", KEYS[1], ARGV[1], KEYS[2])
|
||||
redis.call("SETEX", KEYS[2], ARGV[2], ARGV[3])
|
||||
redis.call("SETEX", KEYS[1], ARGV[2], ARGV[3])
|
||||
redis.call("ZADD", KEYS[2], ARGV[1], KEYS[1])
|
||||
redis.call("DEL", KEYS[3])
|
||||
for i = 4, table.getn(ARGV)-1, 2 do
|
||||
redis.call("HSET", KEYS[3], ARGV[i], ARGV[i+1])
|
||||
end
|
||||
redis.call("EXPIRE", KEYS[3], ARGV[2])
|
||||
redis.call("ZADD", KEYS[4], ARGV[1], KEYS[3])
|
||||
return redis.status_reply("OK")`)
|
||||
|
||||
// WriteProcessInfo writes process information to redis with expiration
|
||||
// set to the value ttl.
|
||||
func (r *RDB) WriteProcessInfo(ps *base.ProcessInfo, ttl time.Duration) error {
|
||||
bytes, err := json.Marshal(ps)
|
||||
// WriteProcessState writes process state data to redis with expiration set to the value ttl.
|
||||
func (r *RDB) WriteProcessState(ps *base.ProcessState, ttl time.Duration) error {
|
||||
info := ps.Get()
|
||||
bytes, err := json.Marshal(info)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Note: Add key to ZSET with expiration time as score.
|
||||
// ref: https://github.com/antirez/redis/issues/135#issuecomment-2361996
|
||||
var args []interface{} // args to the lua script
|
||||
exp := time.Now().Add(ttl).UTC()
|
||||
key := base.ProcessInfoKey(ps.Host, ps.PID)
|
||||
return writeProcessInfoCmd.Run(r.client, []string{base.AllProcesses, key}, float64(exp.Unix()), ttl.Seconds(), string(bytes)).Err()
|
||||
}
|
||||
|
||||
// ReadProcessInfo reads process information stored in redis.
|
||||
func (r *RDB) ReadProcessInfo(host string, pid int) (*base.ProcessInfo, error) {
|
||||
key := base.ProcessInfoKey(host, pid)
|
||||
data, err := r.client.Get(key).Result()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
workers := ps.GetWorkers()
|
||||
args = append(args, float64(exp.Unix()), ttl.Seconds(), bytes)
|
||||
for _, w := range workers {
|
||||
bytes, err := json.Marshal(w)
|
||||
if err != nil {
|
||||
continue // skip bad data
|
||||
}
|
||||
args = append(args, w.ID.String(), bytes)
|
||||
}
|
||||
var pinfo base.ProcessInfo
|
||||
err = json.Unmarshal([]byte(data), &pinfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &pinfo, nil
|
||||
pkey := base.ProcessInfoKey(info.Host, info.PID)
|
||||
wkey := base.WorkersKey(info.Host, info.PID)
|
||||
return writeProcessInfoCmd.Run(r.client,
|
||||
[]string{pkey, base.AllProcesses, wkey, base.AllWorkers},
|
||||
args...).Err()
|
||||
}
|
||||
|
||||
// KEYS[1] -> asynq:ps
|
||||
// KEYS[2] -> asynq:ps:<host:pid>
|
||||
// KEYS[3] -> asynq:workers
|
||||
// KEYS[4] -> asynq:workers<host:pid>
|
||||
var clearProcessInfoCmd = redis.NewScript(`
|
||||
redis.call("ZREM", KEYS[1], KEYS[2])
|
||||
redis.call("DEL", KEYS[2])
|
||||
redis.call("ZREM", KEYS[3], KEYS[4])
|
||||
redis.call("DEL", KEYS[4])
|
||||
return redis.status_reply("OK")`)
|
||||
|
||||
// ClearProcessInfo deletes process information from redis.
|
||||
func (r *RDB) ClearProcessInfo(ps *base.ProcessInfo) error {
|
||||
key := base.ProcessInfoKey(ps.Host, ps.PID)
|
||||
return clearProcessInfoCmd.Run(r.client, []string{base.AllProcesses, key}).Err()
|
||||
// ClearProcessState deletes process state data from redis.
|
||||
func (r *RDB) ClearProcessState(ps *base.ProcessState) error {
|
||||
info := ps.Get()
|
||||
host, pid := info.Host, info.PID
|
||||
pkey := base.ProcessInfoKey(host, pid)
|
||||
wkey := base.WorkersKey(host, pid)
|
||||
return clearProcessInfoCmd.Run(r.client,
|
||||
[]string{base.AllProcesses, pkey, base.AllWorkers, wkey}).Err()
|
||||
}
|
||||
|
||||
// CancelationPubSub returns a pubsub for cancelation messages.
|
||||
|
@@ -5,8 +5,9 @@
|
||||
package rdb
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -741,81 +742,278 @@ func TestCheckAndEnqueue(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadWriteClearProcessInfo(t *testing.T) {
|
||||
func TestWriteProcessState(t *testing.T) {
|
||||
r := setup(t)
|
||||
pinfo := &base.ProcessInfo{
|
||||
host, pid := "localhost", 98765
|
||||
queues := map[string]int{"default": 2, "email": 5, "low": 1}
|
||||
|
||||
started := time.Now()
|
||||
ps := base.NewProcessState(host, pid, 10, queues, false)
|
||||
ps.SetStarted(started)
|
||||
ps.SetStatus(base.StatusRunning)
|
||||
ttl := 5 * time.Second
|
||||
|
||||
h.FlushDB(t, r.client)
|
||||
|
||||
err := r.WriteProcessState(ps, ttl)
|
||||
if err != nil {
|
||||
t.Errorf("r.WriteProcessState returned an error: %v", err)
|
||||
}
|
||||
|
||||
// Check ProcessInfo was written correctly
|
||||
pkey := base.ProcessInfoKey(host, pid)
|
||||
data := r.client.Get(pkey).Val()
|
||||
var got base.ProcessInfo
|
||||
err = json.Unmarshal([]byte(data), &got)
|
||||
if err != nil {
|
||||
t.Fatalf("could not decode json: %v", err)
|
||||
}
|
||||
want := base.ProcessInfo{
|
||||
Host: "localhost",
|
||||
PID: 98765,
|
||||
Concurrency: 10,
|
||||
Queues: map[string]int{"default": 2, "email": 5, "low": 1},
|
||||
PID: 98765,
|
||||
Host: "localhost",
|
||||
State: "running",
|
||||
Started: time.Now(),
|
||||
ActiveWorkerCount: 1,
|
||||
StrictPriority: false,
|
||||
Status: "running",
|
||||
Started: started,
|
||||
ActiveWorkerCount: 0,
|
||||
}
|
||||
if diff := cmp.Diff(want, got); diff != "" {
|
||||
t.Errorf("persisted ProcessInfo was %v, want %v; (-want,+got)\n%s",
|
||||
got, want, diff)
|
||||
}
|
||||
// Check ProcessInfo TTL was set correctly
|
||||
gotTTL := r.client.TTL(pkey).Val()
|
||||
timeCmpOpt := cmpopts.EquateApproxTime(time.Second)
|
||||
if !cmp.Equal(ttl, gotTTL, timeCmpOpt) {
|
||||
t.Errorf("TTL of %q was %v, want %v", pkey, gotTTL, ttl)
|
||||
}
|
||||
// Check ProcessInfo key was added to the set correctly
|
||||
gotProcesses := r.client.ZRange(base.AllProcesses, 0, -1).Val()
|
||||
wantProcesses := []string{pkey}
|
||||
if diff := cmp.Diff(wantProcesses, gotProcesses); diff != "" {
|
||||
t.Errorf("%q contained %v, want %v", base.AllProcesses, gotProcesses, wantProcesses)
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
pi *base.ProcessInfo
|
||||
ttl time.Duration
|
||||
}{
|
||||
{pinfo, 5 * time.Second},
|
||||
// Check WorkersInfo was written correctly
|
||||
wkey := base.WorkersKey(host, pid)
|
||||
workerExist := r.client.Exists(wkey).Val()
|
||||
if workerExist != 0 {
|
||||
t.Errorf("%q key exists", wkey)
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
h.FlushDB(t, r.client)
|
||||
|
||||
err := r.WriteProcessInfo(tc.pi, tc.ttl)
|
||||
if err != nil {
|
||||
t.Errorf("r.WriteProcessInfo returned an error: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
got, err := r.ReadProcessInfo(tc.pi.Host, tc.pi.PID)
|
||||
if err != nil {
|
||||
t.Errorf("r.ReadProcessInfo returned an error: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
ignoreOpt := cmpopts.IgnoreUnexported(base.ProcessInfo{})
|
||||
if diff := cmp.Diff(tc.pi, got, ignoreOpt); diff != "" {
|
||||
t.Errorf("r.ReadProcessInfo(%q, %d) = %+v, want %+v; (-want,+got)\n%s",
|
||||
tc.pi.Host, tc.pi.PID, got, tc.pi, diff)
|
||||
}
|
||||
|
||||
key := base.ProcessInfoKey(tc.pi.Host, tc.pi.PID)
|
||||
gotTTL := r.client.TTL(key).Val()
|
||||
if !cmp.Equal(tc.ttl, gotTTL, timeCmpOpt) {
|
||||
t.Errorf("redis TTL %q returned %v, want %v", key, gotTTL, tc.ttl)
|
||||
}
|
||||
|
||||
now := time.Now().UTC()
|
||||
allKeys, err := r.client.ZRangeByScore(base.AllProcesses, &redis.ZRangeBy{
|
||||
Min: strconv.Itoa(int(now.Unix())),
|
||||
Max: "+inf",
|
||||
}).Result()
|
||||
if err != nil {
|
||||
t.Errorf("redis ZRANGEBYSCORE %q %d +inf returned an error: %v",
|
||||
base.AllProcesses, now.Unix(), err)
|
||||
continue
|
||||
}
|
||||
|
||||
wantAllKeys := []string{key}
|
||||
if diff := cmp.Diff(wantAllKeys, allKeys); diff != "" {
|
||||
t.Errorf("all keys = %v, want %v; (-want,+got)\n%s", allKeys, wantAllKeys, diff)
|
||||
}
|
||||
|
||||
if err := r.ClearProcessInfo(tc.pi); err != nil {
|
||||
t.Errorf("r.ClearProcessInfo returned an error: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
// 1 means key exists
|
||||
if r.client.Exists(key).Val() == 1 {
|
||||
t.Errorf("expected %q to be deleted", key)
|
||||
}
|
||||
|
||||
if r.client.ZCard(base.AllProcesses).Val() != 0 {
|
||||
t.Errorf("expected %q to be empty", base.AllProcesses)
|
||||
}
|
||||
|
||||
// Check WorkersInfo key was added to the set correctly
|
||||
gotWorkerKeys := r.client.ZRange(base.AllWorkers, 0, -1).Val()
|
||||
wantWorkerKeys := []string{wkey}
|
||||
if diff := cmp.Diff(wantWorkerKeys, gotWorkerKeys); diff != "" {
|
||||
t.Errorf("%q contained %v, want %v", base.AllWorkers, gotWorkerKeys, wantWorkerKeys)
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteProcessStateWithWorkers(t *testing.T) {
|
||||
r := setup(t)
|
||||
host, pid := "localhost", 98765
|
||||
queues := map[string]int{"default": 2, "email": 5, "low": 1}
|
||||
concurrency := 10
|
||||
|
||||
started := time.Now().Add(-10 * time.Minute)
|
||||
w1Started := time.Now().Add(-time.Minute)
|
||||
w2Started := time.Now().Add(-time.Second)
|
||||
msg1 := h.NewTaskMessage("send_email", map[string]interface{}{"user_id": "123"})
|
||||
msg2 := h.NewTaskMessage("gen_thumbnail", map[string]interface{}{"path": "some/path/to/imgfile"})
|
||||
ps := base.NewProcessState(host, pid, concurrency, queues, false)
|
||||
ps.SetStarted(started)
|
||||
ps.SetStatus(base.StatusRunning)
|
||||
ps.AddWorkerStats(msg1, w1Started)
|
||||
ps.AddWorkerStats(msg2, w2Started)
|
||||
ttl := 5 * time.Second
|
||||
|
||||
h.FlushDB(t, r.client)
|
||||
|
||||
err := r.WriteProcessState(ps, ttl)
|
||||
if err != nil {
|
||||
t.Errorf("r.WriteProcessState returned an error: %v", err)
|
||||
}
|
||||
|
||||
// Check ProcessInfo was written correctly
|
||||
pkey := base.ProcessInfoKey(host, pid)
|
||||
data := r.client.Get(pkey).Val()
|
||||
var got base.ProcessInfo
|
||||
err = json.Unmarshal([]byte(data), &got)
|
||||
if err != nil {
|
||||
t.Fatalf("could not decode json: %v", err)
|
||||
}
|
||||
want := base.ProcessInfo{
|
||||
Host: host,
|
||||
PID: pid,
|
||||
Concurrency: concurrency,
|
||||
Queues: queues,
|
||||
StrictPriority: false,
|
||||
Status: "running",
|
||||
Started: started,
|
||||
ActiveWorkerCount: 2,
|
||||
}
|
||||
if diff := cmp.Diff(want, got); diff != "" {
|
||||
t.Errorf("persisted ProcessInfo was %v, want %v; (-want,+got)\n%s",
|
||||
got, want, diff)
|
||||
}
|
||||
// Check ProcessInfo TTL was set correctly
|
||||
gotTTL := r.client.TTL(pkey).Val()
|
||||
timeCmpOpt := cmpopts.EquateApproxTime(time.Second)
|
||||
if !cmp.Equal(ttl, gotTTL, timeCmpOpt) {
|
||||
t.Errorf("TTL of %q was %v, want %v", pkey, gotTTL, ttl)
|
||||
}
|
||||
// Check ProcessInfo key was added to the set correctly
|
||||
gotProcesses := r.client.ZRange(base.AllProcesses, 0, -1).Val()
|
||||
wantProcesses := []string{pkey}
|
||||
if diff := cmp.Diff(wantProcesses, gotProcesses); diff != "" {
|
||||
t.Errorf("%q contained %v, want %v", base.AllProcesses, gotProcesses, wantProcesses)
|
||||
}
|
||||
|
||||
// Check WorkersInfo was written correctly
|
||||
wkey := base.WorkersKey(host, pid)
|
||||
wdata := r.client.HGetAll(wkey).Val()
|
||||
if len(wdata) != 2 {
|
||||
t.Fatalf("HGETALL %q returned a hash of size %d, want 2", wkey, len(wdata))
|
||||
}
|
||||
gotWorkers := make(map[string]*base.WorkerInfo)
|
||||
for key, val := range wdata {
|
||||
var w base.WorkerInfo
|
||||
if err := json.Unmarshal([]byte(val), &w); err != nil {
|
||||
t.Fatalf("could not unmarshal worker's data: %v", err)
|
||||
}
|
||||
gotWorkers[key] = &w
|
||||
}
|
||||
wantWorkers := map[string]*base.WorkerInfo{
|
||||
msg1.ID.String(): &base.WorkerInfo{
|
||||
Host: host,
|
||||
PID: pid,
|
||||
ID: msg1.ID,
|
||||
Type: msg1.Type,
|
||||
Queue: msg1.Queue,
|
||||
Payload: msg1.Payload,
|
||||
Started: w1Started,
|
||||
},
|
||||
msg2.ID.String(): &base.WorkerInfo{
|
||||
Host: host,
|
||||
PID: pid,
|
||||
ID: msg2.ID,
|
||||
Type: msg2.Type,
|
||||
Queue: msg2.Queue,
|
||||
Payload: msg2.Payload,
|
||||
Started: w2Started,
|
||||
},
|
||||
}
|
||||
if diff := cmp.Diff(wantWorkers, gotWorkers); diff != "" {
|
||||
t.Errorf("persisted workers info was %v, want %v; (-want,+got)\n%s",
|
||||
gotWorkers, wantWorkers, diff)
|
||||
}
|
||||
|
||||
// Check WorkersInfo TTL was set correctly
|
||||
gotTTL = r.client.TTL(wkey).Val()
|
||||
if !cmp.Equal(ttl, gotTTL, timeCmpOpt) {
|
||||
t.Errorf("TTL of %q was %v, want %v", wkey, gotTTL, ttl)
|
||||
}
|
||||
// Check WorkersInfo key was added to the set correctly
|
||||
gotWorkerKeys := r.client.ZRange(base.AllWorkers, 0, -1).Val()
|
||||
wantWorkerKeys := []string{wkey}
|
||||
if diff := cmp.Diff(wantWorkerKeys, gotWorkerKeys); diff != "" {
|
||||
t.Errorf("%q contained %v, want %v", base.AllWorkers, gotWorkerKeys, wantWorkerKeys)
|
||||
}
|
||||
}
|
||||
|
||||
func TestClearProcessState(t *testing.T) {
|
||||
r := setup(t)
|
||||
host, pid := "127.0.0.1", 1234
|
||||
|
||||
h.FlushDB(t, r.client)
|
||||
|
||||
pkey := base.ProcessInfoKey(host, pid)
|
||||
wkey := base.WorkersKey(host, pid)
|
||||
otherPKey := base.ProcessInfoKey("otherhost", 12345)
|
||||
otherWKey := base.WorkersKey("otherhost", 12345)
|
||||
// Populate the keys.
|
||||
if err := r.client.Set(pkey, "process-info", 0).Err(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := r.client.HSet(wkey, "worker-key", "worker-info").Err(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := r.client.ZAdd(base.AllProcesses, &redis.Z{Member: pkey}).Err(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := r.client.ZAdd(base.AllProcesses, &redis.Z{Member: otherPKey}).Err(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := r.client.ZAdd(base.AllWorkers, &redis.Z{Member: wkey}).Err(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := r.client.ZAdd(base.AllWorkers, &redis.Z{Member: otherWKey}).Err(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ps := base.NewProcessState(host, pid, 10, map[string]int{"default": 1}, false)
|
||||
|
||||
err := r.ClearProcessState(ps)
|
||||
if err != nil {
|
||||
t.Fatalf("(*RDB).ClearProcessState failed: %v", err)
|
||||
}
|
||||
|
||||
// Check all keys are cleared
|
||||
if r.client.Exists(pkey).Val() != 0 {
|
||||
t.Errorf("Redis key %q exists", pkey)
|
||||
}
|
||||
if r.client.Exists(wkey).Val() != 0 {
|
||||
t.Errorf("Redis key %q exists", wkey)
|
||||
}
|
||||
gotProcessKeys := r.client.ZRange(base.AllProcesses, 0, -1).Val()
|
||||
wantProcessKeys := []string{otherPKey}
|
||||
if diff := cmp.Diff(wantProcessKeys, gotProcessKeys); diff != "" {
|
||||
t.Errorf("%q contained %v, want %v", base.AllProcesses, gotProcessKeys, wantProcessKeys)
|
||||
}
|
||||
gotWorkerKeys := r.client.ZRange(base.AllWorkers, 0, -1).Val()
|
||||
wantWorkerKeys := []string{otherWKey}
|
||||
if diff := cmp.Diff(wantWorkerKeys, gotWorkerKeys); diff != "" {
|
||||
t.Errorf("%q contained %v, want %v", base.AllWorkers, gotWorkerKeys, wantWorkerKeys)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCancelationPubSub(t *testing.T) {
|
||||
r := setup(t)
|
||||
|
||||
pubsub, err := r.CancelationPubSub()
|
||||
if err != nil {
|
||||
t.Fatalf("(*RDB).CancelationPubSub() returned an error: %v", err)
|
||||
}
|
||||
|
||||
cancelCh := pubsub.Channel()
|
||||
|
||||
var (
|
||||
mu sync.Mutex
|
||||
received []string
|
||||
)
|
||||
|
||||
go func() {
|
||||
for msg := range cancelCh {
|
||||
mu.Lock()
|
||||
received = append(received, msg.Payload)
|
||||
mu.Unlock()
|
||||
}
|
||||
}()
|
||||
|
||||
publish := []string{"one", "two", "three"}
|
||||
|
||||
for _, msg := range publish {
|
||||
r.PublishCancelation(msg)
|
||||
}
|
||||
|
||||
// allow for message to reach subscribers.
|
||||
time.Sleep(time.Second)
|
||||
|
||||
pubsub.Close()
|
||||
|
||||
mu.Lock()
|
||||
if diff := cmp.Diff(publish, received, h.SortStringSliceOpt); diff != "" {
|
||||
t.Errorf("subscriber received %v, want %v; (-want,+got)\n%s", received, publish, diff)
|
||||
}
|
||||
mu.Unlock()
|
||||
}
|
||||
|
32
processor.go
32
processor.go
@@ -20,7 +20,7 @@ import (
|
||||
type processor struct {
|
||||
rdb *rdb.RDB
|
||||
|
||||
pinfo *base.ProcessInfo
|
||||
ps *base.ProcessState
|
||||
|
||||
handler Handler
|
||||
|
||||
@@ -31,6 +31,8 @@ type processor struct {
|
||||
|
||||
retryDelayFunc retryDelayFunc
|
||||
|
||||
errHandler ErrorHandler
|
||||
|
||||
// channel via which to send sync requests to syncer.
|
||||
syncRequestCh chan<- *syncRequest
|
||||
|
||||
@@ -59,25 +61,28 @@ type processor struct {
|
||||
type retryDelayFunc func(n int, err error, task *Task) time.Duration
|
||||
|
||||
// newProcessor constructs a new processor.
|
||||
func newProcessor(r *rdb.RDB, pinfo *base.ProcessInfo, fn retryDelayFunc, syncRequestCh chan<- *syncRequest, cancelations *base.Cancelations) *processor {
|
||||
qcfg := normalizeQueueCfg(pinfo.Queues)
|
||||
func newProcessor(r *rdb.RDB, ps *base.ProcessState, fn retryDelayFunc,
|
||||
syncCh chan<- *syncRequest, c *base.Cancelations, errHandler ErrorHandler) *processor {
|
||||
info := ps.Get()
|
||||
qcfg := normalizeQueueCfg(info.Queues)
|
||||
orderedQueues := []string(nil)
|
||||
if pinfo.StrictPriority {
|
||||
if info.StrictPriority {
|
||||
orderedQueues = sortByPriority(qcfg)
|
||||
}
|
||||
return &processor{
|
||||
rdb: r,
|
||||
pinfo: pinfo,
|
||||
ps: ps,
|
||||
queueConfig: qcfg,
|
||||
orderedQueues: orderedQueues,
|
||||
retryDelayFunc: fn,
|
||||
syncRequestCh: syncRequestCh,
|
||||
cancelations: cancelations,
|
||||
syncRequestCh: syncCh,
|
||||
cancelations: c,
|
||||
errLogLimiter: rate.NewLimiter(rate.Every(3*time.Second), 1),
|
||||
sema: make(chan struct{}, pinfo.Concurrency),
|
||||
sema: make(chan struct{}, info.Concurrency),
|
||||
done: make(chan struct{}),
|
||||
abort: make(chan struct{}),
|
||||
quit: make(chan struct{}),
|
||||
errHandler: errHandler,
|
||||
handler: HandlerFunc(func(ctx context.Context, t *Task) error { return fmt.Errorf("handler not set") }),
|
||||
}
|
||||
}
|
||||
@@ -117,11 +122,13 @@ func (p *processor) terminate() {
|
||||
p.restore() // move any unfinished tasks back to the queue.
|
||||
}
|
||||
|
||||
func (p *processor) start() {
|
||||
func (p *processor) start(wg *sync.WaitGroup) {
|
||||
// NOTE: The call to "restore" needs to complete before starting
|
||||
// the processor goroutine.
|
||||
p.restore()
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for {
|
||||
select {
|
||||
case <-p.done:
|
||||
@@ -162,11 +169,11 @@ func (p *processor) exec() {
|
||||
p.requeue(msg)
|
||||
return
|
||||
case p.sema <- struct{}{}: // acquire token
|
||||
p.pinfo.IncrActiveWorkerCount(1)
|
||||
p.ps.AddWorkerStats(msg, time.Now())
|
||||
go func() {
|
||||
defer func() {
|
||||
p.ps.DeleteWorkerStats(msg)
|
||||
<-p.sema /* release token */
|
||||
p.pinfo.IncrActiveWorkerCount(-1)
|
||||
}()
|
||||
|
||||
resCh := make(chan error, 1)
|
||||
@@ -189,6 +196,9 @@ func (p *processor) exec() {
|
||||
// 2) Retry -> Removes the message from InProgress & Adds the message to Retry
|
||||
// 3) Kill -> Removes the message from InProgress & Adds the message to Dead
|
||||
if resErr != nil {
|
||||
if p.errHandler != nil {
|
||||
p.errHandler.HandleError(task, resErr, msg.Retried, msg.Retry)
|
||||
}
|
||||
if msg.Retried >= msg.Retry {
|
||||
p.kill(msg, resErr)
|
||||
} else {
|
||||
|
@@ -66,12 +66,13 @@ func TestProcessorSuccess(t *testing.T) {
|
||||
processed = append(processed, task)
|
||||
return nil
|
||||
}
|
||||
pi := base.NewProcessInfo("localhost", 1234, 10, defaultQueueConfig, false)
|
||||
ps := base.NewProcessState("localhost", 1234, 10, defaultQueueConfig, false)
|
||||
cancelations := base.NewCancelations()
|
||||
p := newProcessor(rdbClient, pi, defaultDelayFunc, nil, cancelations)
|
||||
p := newProcessor(rdbClient, ps, defaultDelayFunc, nil, cancelations, nil)
|
||||
p.handler = HandlerFunc(handler)
|
||||
|
||||
p.start()
|
||||
var wg sync.WaitGroup
|
||||
p.start(&wg)
|
||||
for _, msg := range tc.incoming {
|
||||
err := rdbClient.Enqueue(msg)
|
||||
if err != nil {
|
||||
@@ -119,24 +120,30 @@ func TestProcessorRetry(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tests := []struct {
|
||||
enqueued []*base.TaskMessage // initial default queue state
|
||||
incoming []*base.TaskMessage // tasks to be enqueued during run
|
||||
delay time.Duration // retry delay duration
|
||||
wait time.Duration // wait duration between starting and stopping processor for this test case
|
||||
wantRetry []h.ZSetEntry // tasks in retry queue at the end
|
||||
wantDead []*base.TaskMessage // tasks in dead queue at the end
|
||||
enqueued []*base.TaskMessage // initial default queue state
|
||||
incoming []*base.TaskMessage // tasks to be enqueued during run
|
||||
delay time.Duration // retry delay duration
|
||||
handler Handler // task handler
|
||||
wait time.Duration // wait duration between starting and stopping processor for this test case
|
||||
wantRetry []h.ZSetEntry // tasks in retry queue at the end
|
||||
wantDead []*base.TaskMessage // tasks in dead queue at the end
|
||||
wantErrCount int // number of times error handler should be called
|
||||
}{
|
||||
{
|
||||
enqueued: []*base.TaskMessage{m1, m2},
|
||||
incoming: []*base.TaskMessage{m3, m4},
|
||||
delay: time.Minute,
|
||||
wait: time.Second,
|
||||
handler: HandlerFunc(func(ctx context.Context, task *Task) error {
|
||||
return fmt.Errorf(errMsg)
|
||||
}),
|
||||
wait: time.Second,
|
||||
wantRetry: []h.ZSetEntry{
|
||||
{Msg: &r2, Score: float64(now.Add(time.Minute).Unix())},
|
||||
{Msg: &r3, Score: float64(now.Add(time.Minute).Unix())},
|
||||
{Msg: &r4, Score: float64(now.Add(time.Minute).Unix())},
|
||||
},
|
||||
wantDead: []*base.TaskMessage{&r1},
|
||||
wantDead: []*base.TaskMessage{&r1},
|
||||
wantErrCount: 4,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -148,15 +155,22 @@ func TestProcessorRetry(t *testing.T) {
|
||||
delayFunc := func(n int, e error, t *Task) time.Duration {
|
||||
return tc.delay
|
||||
}
|
||||
handler := func(ctx context.Context, task *Task) error {
|
||||
return fmt.Errorf(errMsg)
|
||||
var (
|
||||
mu sync.Mutex // guards n
|
||||
n int // number of times error handler is called
|
||||
)
|
||||
errHandler := func(t *Task, err error, retried, maxRetry int) {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
n++
|
||||
}
|
||||
pi := base.NewProcessInfo("localhost", 1234, 10, defaultQueueConfig, false)
|
||||
ps := base.NewProcessState("localhost", 1234, 10, defaultQueueConfig, false)
|
||||
cancelations := base.NewCancelations()
|
||||
p := newProcessor(rdbClient, pi, delayFunc, nil, cancelations)
|
||||
p.handler = HandlerFunc(handler)
|
||||
p := newProcessor(rdbClient, ps, delayFunc, nil, cancelations, ErrorHandlerFunc(errHandler))
|
||||
p.handler = tc.handler
|
||||
|
||||
p.start()
|
||||
var wg sync.WaitGroup
|
||||
p.start(&wg)
|
||||
for _, msg := range tc.incoming {
|
||||
err := rdbClient.Enqueue(msg)
|
||||
if err != nil {
|
||||
@@ -181,6 +195,10 @@ func TestProcessorRetry(t *testing.T) {
|
||||
if l := r.LLen(base.InProgressQueue).Val(); l != 0 {
|
||||
t.Errorf("%q has %d tasks, want 0", base.InProgressQueue, l)
|
||||
}
|
||||
|
||||
if n != tc.wantErrCount {
|
||||
t.Errorf("error handler was called %d times, want %d", n, tc.wantErrCount)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -212,9 +230,9 @@ func TestProcessorQueues(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
pi := base.NewProcessInfo("localhost", 1234, 10, tc.queueCfg, false)
|
||||
cancelations := base.NewCancelations()
|
||||
p := newProcessor(nil, pi, defaultDelayFunc, nil, cancelations)
|
||||
ps := base.NewProcessState("localhost", 1234, 10, tc.queueCfg, false)
|
||||
p := newProcessor(nil, ps, defaultDelayFunc, nil, cancelations, nil)
|
||||
got := p.queues()
|
||||
if diff := cmp.Diff(tc.want, got, sortOpt); diff != "" {
|
||||
t.Errorf("with queue config: %v\n(*processor).queues() = %v, want %v\n(-want,+got):\n%s",
|
||||
@@ -280,12 +298,13 @@ func TestProcessorWithStrictPriority(t *testing.T) {
|
||||
"low": 1,
|
||||
}
|
||||
// Note: Set concurrency to 1 to make sure tasks are processed one at a time.
|
||||
pi := base.NewProcessInfo("localhost", 1234, 1 /*concurrency */, queueCfg, true /* strict */)
|
||||
cancelations := base.NewCancelations()
|
||||
p := newProcessor(rdbClient, pi, defaultDelayFunc, nil, cancelations)
|
||||
ps := base.NewProcessState("localhost", 1234, 1 /* concurrency */, queueCfg, true /*strict*/)
|
||||
p := newProcessor(rdbClient, ps, defaultDelayFunc, nil, cancelations, nil)
|
||||
p.handler = HandlerFunc(handler)
|
||||
|
||||
p.start()
|
||||
var wg sync.WaitGroup
|
||||
p.start(&wg)
|
||||
time.Sleep(tc.wait)
|
||||
p.terminate()
|
||||
|
||||
|
@@ -5,6 +5,7 @@
|
||||
package asynq
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/hibiken/asynq/internal/rdb"
|
||||
@@ -43,8 +44,10 @@ func (s *scheduler) terminate() {
|
||||
}
|
||||
|
||||
// start starts the "scheduler" goroutine.
|
||||
func (s *scheduler) start() {
|
||||
func (s *scheduler) start(wg *sync.WaitGroup) {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for {
|
||||
select {
|
||||
case <-s.done:
|
||||
|
@@ -5,6 +5,7 @@
|
||||
package asynq
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -69,7 +70,8 @@ func TestScheduler(t *testing.T) {
|
||||
h.SeedRetryQueue(t, r, tc.initRetry) // initialize retry queue
|
||||
h.SeedEnqueuedQueue(t, r, tc.initQueue) // initialize default queue
|
||||
|
||||
s.start()
|
||||
var wg sync.WaitGroup
|
||||
s.start(&wg)
|
||||
time.Sleep(tc.wait)
|
||||
s.terminate()
|
||||
|
||||
|
139
servemux.go
Normal file
139
servemux.go
Normal file
@@ -0,0 +1,139 @@
|
||||
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
package asynq
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// ServeMux is a multiplexer for asynchronous tasks.
|
||||
// It matches the type of each task against a list of registered patterns
|
||||
// and calls the handler for the pattern that most closely matches the
|
||||
// taks's type name.
|
||||
//
|
||||
// Longer patterns take precedence over shorter ones, so that if there are
|
||||
// handlers registered for both "images" and "images:thumbnails",
|
||||
// the latter handler will be called for tasks with a type name beginning with
|
||||
// "images:thumbnails" and the former will receive tasks with type name beginning
|
||||
// with "images".
|
||||
type ServeMux struct {
|
||||
mu sync.RWMutex
|
||||
m map[string]muxEntry
|
||||
es []muxEntry // slice of entries sorted from longest to shortest.
|
||||
}
|
||||
|
||||
type muxEntry struct {
|
||||
h Handler
|
||||
pattern string
|
||||
}
|
||||
|
||||
// NewServeMux allocates and returns a new ServeMux.
|
||||
func NewServeMux() *ServeMux {
|
||||
return new(ServeMux)
|
||||
}
|
||||
|
||||
// ProcessTask dispatches the task to the handler whose
|
||||
// pattern most closely matches the task type.
|
||||
func (mux *ServeMux) ProcessTask(ctx context.Context, task *Task) error {
|
||||
h, _ := mux.Handler(task)
|
||||
return h.ProcessTask(ctx, task)
|
||||
}
|
||||
|
||||
// Handler returns the handler to use for the given task.
|
||||
// It always return a non-nil handler.
|
||||
//
|
||||
// Handler also returns the registered pattern that matches the task.
|
||||
//
|
||||
// If there is no registered handler that applies to the task,
|
||||
// handler returns a 'not found' handler which returns an error.
|
||||
func (mux *ServeMux) Handler(t *Task) (h Handler, pattern string) {
|
||||
mux.mu.RLock()
|
||||
defer mux.mu.RUnlock()
|
||||
|
||||
h, pattern = mux.match(t.Type)
|
||||
if h == nil {
|
||||
h, pattern = NotFoundHandler(), ""
|
||||
}
|
||||
return h, pattern
|
||||
}
|
||||
|
||||
// Find a handler on a handler map given a typename string.
|
||||
// Most-specific (longest) pattern wins.
|
||||
func (mux *ServeMux) match(typename string) (h Handler, pattern string) {
|
||||
// Check for exact match first.
|
||||
v, ok := mux.m[typename]
|
||||
if ok {
|
||||
return v.h, v.pattern
|
||||
}
|
||||
|
||||
// Check for longest valid match.
|
||||
// mux.es contains all patterns from longest to shortest.
|
||||
for _, e := range mux.es {
|
||||
if strings.HasPrefix(typename, e.pattern) {
|
||||
return e.h, e.pattern
|
||||
}
|
||||
}
|
||||
return nil, ""
|
||||
|
||||
}
|
||||
|
||||
// Handle registers the handler for the given pattern.
|
||||
// If a handler already exists for pattern, Handle panics.
|
||||
func (mux *ServeMux) Handle(pattern string, handler Handler) {
|
||||
mux.mu.Lock()
|
||||
defer mux.mu.Unlock()
|
||||
|
||||
if pattern == "" {
|
||||
panic("asynq: invalid pattern")
|
||||
}
|
||||
if handler == nil {
|
||||
panic("asynq: nil handler")
|
||||
}
|
||||
if _, exist := mux.m[pattern]; exist {
|
||||
panic("asynq: multiple registrations for " + pattern)
|
||||
}
|
||||
|
||||
if mux.m == nil {
|
||||
mux.m = make(map[string]muxEntry)
|
||||
}
|
||||
e := muxEntry{h: handler, pattern: pattern}
|
||||
mux.m[pattern] = e
|
||||
mux.es = appendSorted(mux.es, e)
|
||||
}
|
||||
|
||||
func appendSorted(es []muxEntry, e muxEntry) []muxEntry {
|
||||
n := len(es)
|
||||
i := sort.Search(n, func(i int) bool {
|
||||
return len(es[i].pattern) < len(e.pattern)
|
||||
})
|
||||
if i == n {
|
||||
return append(es, e)
|
||||
}
|
||||
// we now know that i points at where we want to insert.
|
||||
es = append(es, muxEntry{}) // try to grow the slice in place, any entry works.
|
||||
copy(es[i+1:], es[i:]) // shift shorter entries down.
|
||||
es[i] = e
|
||||
return es
|
||||
}
|
||||
|
||||
// HandleFunc registers the handler function for the given pattern.
|
||||
func (mux *ServeMux) HandleFunc(pattern string, handler func(context.Context, *Task) error) {
|
||||
if handler == nil {
|
||||
panic("asynq: nil handler")
|
||||
}
|
||||
mux.Handle(pattern, HandlerFunc(handler))
|
||||
}
|
||||
|
||||
// NotFound returns an error indicating that the handler was not found for the given task.
|
||||
func NotFound(ctx context.Context, task *Task) error {
|
||||
return fmt.Errorf("handler not found for task %q", task.Type)
|
||||
}
|
||||
|
||||
// NotFoundHandler returns a simple task handler that returns a ``not found`` error.
|
||||
func NotFoundHandler() Handler { return HandlerFunc(NotFound) }
|
116
servemux_test.go
Normal file
116
servemux_test.go
Normal file
@@ -0,0 +1,116 @@
|
||||
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
package asynq
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var called string
|
||||
|
||||
// makeFakeHandler returns a handler that updates the global called variable
|
||||
// to the given identity.
|
||||
func makeFakeHandler(identity string) Handler {
|
||||
return HandlerFunc(func(ctx context.Context, t *Task) error {
|
||||
called = identity
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// A list of pattern, handler pair that is registered with mux.
|
||||
var serveMuxRegister = []struct {
|
||||
pattern string
|
||||
h Handler
|
||||
}{
|
||||
{"email:", makeFakeHandler("default email handler")},
|
||||
{"email:signup", makeFakeHandler("signup email handler")},
|
||||
{"csv:export", makeFakeHandler("csv export handler")},
|
||||
}
|
||||
|
||||
var serveMuxTests = []struct {
|
||||
typename string // task's type name
|
||||
want string // identifier of the handler that should be called
|
||||
}{
|
||||
{"email:signup", "signup email handler"},
|
||||
{"csv:export", "csv export handler"},
|
||||
{"email:daily", "default email handler"},
|
||||
}
|
||||
|
||||
func TestServeMux(t *testing.T) {
|
||||
mux := NewServeMux()
|
||||
for _, e := range serveMuxRegister {
|
||||
mux.Handle(e.pattern, e.h)
|
||||
}
|
||||
|
||||
for _, tc := range serveMuxTests {
|
||||
called = "" // reset to zero value
|
||||
|
||||
task := NewTask(tc.typename, nil)
|
||||
if err := mux.ProcessTask(context.Background(), task); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if called != tc.want {
|
||||
t.Errorf("%q handler was called for task %q, want %q to be called", called, task.Type, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestServeMuxRegisterNilHandler(t *testing.T) {
|
||||
defer func() {
|
||||
if err := recover(); err == nil {
|
||||
t.Error("expected call to mux.HandleFunc to panic")
|
||||
}
|
||||
}()
|
||||
|
||||
mux := NewServeMux()
|
||||
mux.HandleFunc("email:signup", nil)
|
||||
}
|
||||
|
||||
func TestServeMuxRegisterEmptyPattern(t *testing.T) {
|
||||
defer func() {
|
||||
if err := recover(); err == nil {
|
||||
t.Error("expected call to mux.HandleFunc to panic")
|
||||
}
|
||||
}()
|
||||
|
||||
mux := NewServeMux()
|
||||
mux.Handle("", makeFakeHandler("email"))
|
||||
}
|
||||
|
||||
func TestServeMuxRegisterDuplicatePattern(t *testing.T) {
|
||||
defer func() {
|
||||
if err := recover(); err == nil {
|
||||
t.Error("expected call to mux.HandleFunc to panic")
|
||||
}
|
||||
}()
|
||||
|
||||
mux := NewServeMux()
|
||||
mux.Handle("email", makeFakeHandler("email"))
|
||||
mux.Handle("email", makeFakeHandler("email:default"))
|
||||
}
|
||||
|
||||
var notFoundTests = []struct {
|
||||
typename string // task's type name
|
||||
}{
|
||||
{"image:minimize"},
|
||||
{"csv:"}, // registered patterns match the task's type prefix, not the other way around.
|
||||
}
|
||||
|
||||
func TestServeMuxNotFound(t *testing.T) {
|
||||
mux := NewServeMux()
|
||||
for _, e := range serveMuxRegister {
|
||||
mux.Handle(e.pattern, e.h)
|
||||
}
|
||||
|
||||
for _, tc := range notFoundTests {
|
||||
task := NewTask(tc.typename, nil)
|
||||
err := mux.ProcessTask(context.Background(), task)
|
||||
if err == nil {
|
||||
t.Errorf("ProcessTask did not return error for task %q, should return 'not found' error", task.Type)
|
||||
}
|
||||
}
|
||||
}
|
@@ -5,6 +5,8 @@
|
||||
package asynq
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/hibiken/asynq/internal/base"
|
||||
"github.com/hibiken/asynq/internal/rdb"
|
||||
)
|
||||
@@ -33,14 +35,16 @@ func (s *subscriber) terminate() {
|
||||
s.done <- struct{}{}
|
||||
}
|
||||
|
||||
func (s *subscriber) start() {
|
||||
func (s *subscriber) start(wg *sync.WaitGroup) {
|
||||
pubsub, err := s.rdb.CancelationPubSub()
|
||||
cancelCh := pubsub.Channel()
|
||||
if err != nil {
|
||||
logger.error("cannot subscribe to cancelation channel: %v", err)
|
||||
return
|
||||
}
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for {
|
||||
select {
|
||||
case <-s.done:
|
||||
@@ -48,8 +52,8 @@ func (s *subscriber) start() {
|
||||
logger.info("Subscriber done")
|
||||
return
|
||||
case msg := <-cancelCh:
|
||||
cancel := s.cancelations.Get(msg.Payload)
|
||||
if cancel != nil {
|
||||
cancel, ok := s.cancelations.Get(msg.Payload)
|
||||
if ok {
|
||||
cancel()
|
||||
}
|
||||
}
|
||||
|
@@ -5,6 +5,7 @@
|
||||
package asynq
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -26,15 +27,19 @@ func TestSubscriber(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
var mu sync.Mutex
|
||||
called := false
|
||||
fakeCancelFunc := func() {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
called = true
|
||||
}
|
||||
cancelations := base.NewCancelations()
|
||||
cancelations.Add(tc.registeredID, fakeCancelFunc)
|
||||
|
||||
subscriber := newSubscriber(rdbClient, cancelations)
|
||||
subscriber.start()
|
||||
var wg sync.WaitGroup
|
||||
subscriber.start(&wg)
|
||||
|
||||
if err := rdbClient.PublishCancelation(tc.publishID); err != nil {
|
||||
subscriber.terminate()
|
||||
@@ -44,6 +49,7 @@ func TestSubscriber(t *testing.T) {
|
||||
// allow for redis to publish message
|
||||
time.Sleep(time.Second)
|
||||
|
||||
mu.Lock()
|
||||
if called != tc.wantCalled {
|
||||
if tc.wantCalled {
|
||||
t.Errorf("fakeCancelFunc was not called, want the function to be called")
|
||||
@@ -51,6 +57,7 @@ func TestSubscriber(t *testing.T) {
|
||||
t.Errorf("fakeCancelFunc was called, want the function to not be called")
|
||||
}
|
||||
}
|
||||
mu.Unlock()
|
||||
|
||||
subscriber.terminate()
|
||||
}
|
||||
|
@@ -5,6 +5,7 @@
|
||||
package asynq
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
@@ -39,8 +40,10 @@ func (s *syncer) terminate() {
|
||||
s.done <- struct{}{}
|
||||
}
|
||||
|
||||
func (s *syncer) start() {
|
||||
func (s *syncer) start(wg *sync.WaitGroup) {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
var requests []*syncRequest
|
||||
for {
|
||||
select {
|
||||
|
@@ -5,10 +5,11 @@
|
||||
package asynq
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/go-redis/redis/v7"
|
||||
h "github.com/hibiken/asynq/internal/asynqtest"
|
||||
"github.com/hibiken/asynq/internal/base"
|
||||
"github.com/hibiken/asynq/internal/rdb"
|
||||
@@ -27,7 +28,8 @@ func TestSyncer(t *testing.T) {
|
||||
const interval = time.Second
|
||||
syncRequestCh := make(chan *syncRequest)
|
||||
syncer := newSyncer(syncRequestCh, interval)
|
||||
syncer.start()
|
||||
var wg sync.WaitGroup
|
||||
syncer.start(&wg)
|
||||
defer syncer.terminate()
|
||||
|
||||
for _, msg := range inProgress {
|
||||
@@ -48,53 +50,43 @@ func TestSyncer(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSyncerRetry(t *testing.T) {
|
||||
inProgress := []*base.TaskMessage{
|
||||
h.NewTaskMessage("send_email", nil),
|
||||
h.NewTaskMessage("reindex", nil),
|
||||
h.NewTaskMessage("gen_thumbnail", nil),
|
||||
}
|
||||
goodClient := setup(t)
|
||||
h.SeedInProgressQueue(t, goodClient, inProgress)
|
||||
|
||||
// Simulate the situation where redis server is down
|
||||
// by connecting to a wrong port.
|
||||
badClient := redis.NewClient(&redis.Options{
|
||||
Addr: "localhost:6390",
|
||||
})
|
||||
rdbClient := rdb.NewRDB(badClient)
|
||||
|
||||
const interval = time.Second
|
||||
syncRequestCh := make(chan *syncRequest)
|
||||
syncer := newSyncer(syncRequestCh, interval)
|
||||
syncer.start()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
syncer.start(&wg)
|
||||
defer syncer.terminate()
|
||||
|
||||
for _, msg := range inProgress {
|
||||
m := msg
|
||||
syncRequestCh <- &syncRequest{
|
||||
fn: func() error {
|
||||
return rdbClient.Done(m)
|
||||
},
|
||||
var (
|
||||
mu sync.Mutex
|
||||
counter int
|
||||
)
|
||||
|
||||
// Increment the counter for each call.
|
||||
// Initial call will fail and second call will succeed.
|
||||
requestFunc := func() error {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
if counter == 0 {
|
||||
counter++
|
||||
return fmt.Errorf("zero")
|
||||
}
|
||||
counter++
|
||||
return nil
|
||||
}
|
||||
|
||||
time.Sleep(2 * interval) // ensure that syncer runs at least once
|
||||
|
||||
// Sanity check to ensure that message was not successfully deleted
|
||||
// from in-progress list.
|
||||
gotInProgress := h.GetInProgressMessages(t, goodClient)
|
||||
if l := len(gotInProgress); l != len(inProgress) {
|
||||
t.Errorf("%q has length %d; want %d", base.InProgressQueue, l, len(inProgress))
|
||||
syncRequestCh <- &syncRequest{
|
||||
fn: requestFunc,
|
||||
errMsg: "error",
|
||||
}
|
||||
|
||||
// FIXME: This assignment introduces data race and running the test with -race will fail.
|
||||
// simualate failover.
|
||||
rdbClient = rdb.NewRDB(goodClient)
|
||||
// allow syncer to retry
|
||||
time.Sleep(3 * interval)
|
||||
|
||||
time.Sleep(2 * interval) // ensure that syncer runs at least once
|
||||
|
||||
gotInProgress = h.GetInProgressMessages(t, goodClient)
|
||||
if l := len(gotInProgress); l != 0 {
|
||||
t.Errorf("%q has length %d; want 0", base.InProgressQueue, l)
|
||||
mu.Lock()
|
||||
if counter != 2 {
|
||||
t.Errorf("counter = %d, want 2", counter)
|
||||
}
|
||||
mu.Unlock()
|
||||
}
|
||||
|
@@ -73,7 +73,7 @@ func ps(cmd *cobra.Command, args []string) {
|
||||
printRows := func(w io.Writer, tmpl string) {
|
||||
for _, ps := range processes {
|
||||
fmt.Fprintf(w, tmpl,
|
||||
ps.Host, ps.PID, ps.State,
|
||||
ps.Host, ps.PID, ps.Status,
|
||||
fmt.Sprintf("%d/%d", ps.ActiveWorkerCount, ps.Concurrency),
|
||||
formatQueues(ps.Queues), timeAgo(ps.Started))
|
||||
}
|
||||
|
75
tools/asynqmon/cmd/workers.go
Normal file
75
tools/asynqmon/cmd/workers.go
Normal file
@@ -0,0 +1,75 @@
|
||||
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"sort"
|
||||
|
||||
"github.com/go-redis/redis/v7"
|
||||
"github.com/hibiken/asynq/internal/rdb"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
// workersCmd represents the workers command
|
||||
var workersCmd = &cobra.Command{
|
||||
Use: "workers",
|
||||
Short: "Shows all running workers information",
|
||||
Long: `Workers (asynqmon workers) will show all running workers information.
|
||||
|
||||
The command shows the follwoing for each worker:
|
||||
* Process in which the worker is running
|
||||
* ID of the task worker is processing
|
||||
* Type of the task worker is processing
|
||||
* Payload of the task worker is processing
|
||||
* Queue that the task was pulled from.
|
||||
* Time the worker started processing the task`,
|
||||
Args: cobra.NoArgs,
|
||||
Run: workers,
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(workersCmd)
|
||||
}
|
||||
|
||||
func workers(cmd *cobra.Command, args []string) {
|
||||
r := rdb.NewRDB(redis.NewClient(&redis.Options{
|
||||
Addr: viper.GetString("uri"),
|
||||
DB: viper.GetInt("db"),
|
||||
Password: viper.GetString("password"),
|
||||
}))
|
||||
|
||||
workers, err := r.ListWorkers()
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if len(workers) == 0 {
|
||||
fmt.Println("No workers")
|
||||
return
|
||||
}
|
||||
|
||||
// sort by started timestamp or ID.
|
||||
sort.Slice(workers, func(i, j int) bool {
|
||||
x, y := workers[i], workers[j]
|
||||
if x.Started != y.Started {
|
||||
return x.Started.Before(y.Started)
|
||||
}
|
||||
return x.ID.String() < y.ID.String()
|
||||
})
|
||||
|
||||
cols := []string{"Process", "ID", "Type", "Payload", "Queue", "Started"}
|
||||
printRows := func(w io.Writer, tmpl string) {
|
||||
for _, wk := range workers {
|
||||
fmt.Fprintf(w, tmpl,
|
||||
fmt.Sprintf("%s:%d", wk.Host, wk.PID), wk.ID, wk.Type, wk.Payload, wk.Queue, timeAgo(wk.Started))
|
||||
}
|
||||
}
|
||||
printTable(cols, printRows)
|
||||
}
|
14
tools/go.mod
Normal file
14
tools/go.mod
Normal file
@@ -0,0 +1,14 @@
|
||||
module github.com/hibiken/asynq/tools
|
||||
|
||||
go 1.13
|
||||
|
||||
require (
|
||||
github.com/go-redis/redis/v7 v7.2.0
|
||||
github.com/hibiken/asynq v0.4.0
|
||||
github.com/mitchellh/go-homedir v1.1.0
|
||||
github.com/rs/xid v1.2.1
|
||||
github.com/spf13/cobra v0.0.5
|
||||
github.com/spf13/viper v1.6.2
|
||||
)
|
||||
|
||||
replace github.com/hibiken/asynq => ./..
|
195
tools/go.sum
Normal file
195
tools/go.sum
Normal file
@@ -0,0 +1,195 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-redis/redis v6.15.7+incompatible h1:3skhDh95XQMpnqeqNftPkQD9jL9e5e36z/1SUm6dy1U=
|
||||
github.com/go-redis/redis/v7 v7.0.0-beta.4/go.mod h1:xhhSbUMTsleRPur+Vgx9sUHtyN33bdjxY+9/0n9Ig8s=
|
||||
github.com/go-redis/redis/v7 v7.1.0 h1:I4C4a8UGbFejiVjtYVTRVOiMIJ5pm5Yru6ibvDX/OS0=
|
||||
github.com/go-redis/redis/v7 v7.1.0/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRfnwdHj/Dcg=
|
||||
github.com/go-redis/redis/v7 v7.2.0 h1:CrCexy/jYWZjW0AyVoHlcJUeZN19VWlbepTh1Vq6dJs=
|
||||
github.com/go-redis/redis/v7 v7.2.0/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRfnwdHj/Dcg=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/hibiken/asynq v0.4.0 h1:NvAfYX0DRe04WgGMKRg5oX7bs6ktv2fu9YwB6O356FI=
|
||||
github.com/hibiken/asynq v0.4.0/go.mod h1:dtrVkxCsGPVhVNHMDXAH7lFq64kbj43+G6lt4FQZfW4=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=
|
||||
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/pelletier/go-toml v1.6.0 h1:aetoXYr0Tv7xRU/V4B4IZJ2QcbtMUFoNb3ORp7TzIK4=
|
||||
github.com/pelletier/go-toml v1.6.0/go.mod h1:5N711Q9dKgbdkxHL+MEfF31hpT7l0S0s/t2kKREewys=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rs/xid v1.2.1 h1:mhH9Nq+C1fY2l1XIpgxIiUOfNpRBYH1kKcr+qfKgjRc=
|
||||
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
|
||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI=
|
||||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||
github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc=
|
||||
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
|
||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng=
|
||||
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s=
|
||||
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
|
||||
github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=
|
||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
|
||||
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
|
||||
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
|
||||
github.com/spf13/viper v1.6.0/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k=
|
||||
github.com/spf13/viper v1.6.2 h1:7aKfF+e8/k68gda3LOjo5RxiUqddoFxVq4BKBPrxk5E=
|
||||
github.com/spf13/viper v1.6.2/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
|
||||
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
|
||||
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/goleak v0.10.0/go.mod h1:VCZuO8V8mFPlL0F5J5GK1rtHV3DrFcQ1R8ryq7FK0aI=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e h1:9vRrk9YW2BTzLP0VCB9ZDjU4cPqkg+IDWL7XgxA1yxQ=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno=
|
||||
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo=
|
||||
gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
Reference in New Issue
Block a user