2
0
mirror of https://github.com/hibiken/asynq.git synced 2025-10-20 09:16:12 +08:00

Compare commits

..

24 Commits

Author SHA1 Message Date
Ken Hibino
04702ddfd2 Change ErrorHandler function signature 2020-07-04 05:53:50 -07:00
Ken Hibino
6705f7c27a Return Result struct to caller of Enqueue 2020-07-03 21:49:53 -07:00
Ken Hibino
e27ae0d33a Replace github.com/rs/xid with github.com/google/uuid 2020-07-02 06:38:13 -07:00
Ken Hibino
6cd0ab65a3 Add version command to CLI 2020-06-29 20:59:15 -07:00
Ken Hibino
83c9d5ae94 Add migrate command to CLI
The command converts all messages in redis to be compatible for asynq
v0.10.0
2020-06-29 06:11:47 -07:00
Ken Hibino
7eebbf181e Update docs 2020-06-29 06:11:47 -07:00
Ken Hibino
7b1770da96 Minor code cleanup 2020-06-29 06:11:47 -07:00
Ken Hibino
e2c5882368 Use int64 type for Timeout and Deadline in TaskMessage 2020-06-29 06:11:47 -07:00
Ken Hibino
50df107ace Clean up processor test 2020-06-29 06:11:47 -07:00
Ken Hibino
9699d196e5 Add recoverer 2020-06-29 06:11:47 -07:00
Ken Hibino
1c5f7a791b Add RDB.ListDeadlineExceeded 2020-06-29 06:11:47 -07:00
Ken Hibino
232efe8279 Fix processor 2020-06-29 06:11:47 -07:00
Ken Hibino
ef4a4a8334 Add deadline to syncRequest
- syncer will drop a request if its deadline has been exceeded
2020-06-29 06:11:47 -07:00
Ken Hibino
65e17a3469 Update processor to adapt for deadlines set change
- Processor dequeues tasks only when it's available to process
- Processor retries a task when its context's Done channel is closed
2020-06-29 06:11:47 -07:00
Ken Hibino
88d94a2a9d Update RDB.Requeue to remove message from deadlines set 2020-06-29 06:11:47 -07:00
Ken Hibino
7433b94aac Update RDB.Dequeue to return deadline as time.Time 2020-06-29 06:11:47 -07:00
Ken Hibino
08ac7793ab Update RDB.Kill to remove message from deadlines set 2020-06-29 06:11:47 -07:00
Ken Hibino
02b653df72 Update RDB.Retry to remove message from deadlines set 2020-06-29 06:11:47 -07:00
Ken Hibino
bee784c052 Update RDB.Done to remove message from deadlines set 2020-06-29 06:11:47 -07:00
Ken Hibino
4ea58052f8 Update RDB.Dequeue to return message and deadline 2020-06-29 06:11:47 -07:00
Ken Hibino
5afb4861a5 Add task message to deadlines set on dequeue
Updated dequeueCmd to decode the message and compute its deadline and add
the message to the Deadline set.
2020-06-29 06:11:47 -07:00
Ken Hibino
68e6b379fc Use default timeout of 30mins if both timeout and deadline are not
provided
2020-06-29 06:11:47 -07:00
Ken Hibino
0e70a14899 Change TaskMessage Timeout and Deadline to int
* This change breaks existing tasks in Redis
2020-06-29 06:11:47 -07:00
Ken Hibino
f01c7b8e66 Add redis key for deadlines in base package 2020-06-29 06:11:47 -07:00
76 changed files with 4530 additions and 11636 deletions

View File

@@ -1,82 +0,0 @@
# This workflow runs benchmarks against the current branch,
# compares them to benchmarks against master,
# and uploads the results as an artifact.
name: benchstat
on: [pull_request]
jobs:
incoming:
runs-on: ubuntu-latest
services:
redis:
image: redis
ports:
- 6379:6379
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Set up Go
uses: actions/setup-go@v2
with:
go-version: 1.15.x
- name: Benchmark
run: go test -run=^$ -bench=. -count=5 -timeout=60m ./... | tee -a new.txt
- name: Upload Benchmark
uses: actions/upload-artifact@v2
with:
name: bench-incoming
path: new.txt
current:
runs-on: ubuntu-latest
services:
redis:
image: redis
ports:
- 6379:6379
steps:
- name: Checkout
uses: actions/checkout@v2
with:
ref: master
- name: Set up Go
uses: actions/setup-go@v2
with:
go-version: 1.15.x
- name: Benchmark
run: go test -run=^$ -bench=. -count=5 -timeout=60m ./... | tee -a old.txt
- name: Upload Benchmark
uses: actions/upload-artifact@v2
with:
name: bench-current
path: old.txt
benchstat:
needs: [incoming, current]
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Set up Go
uses: actions/setup-go@v2
with:
go-version: 1.15.x
- name: Install benchstat
run: go get -u golang.org/x/perf/cmd/benchstat
- name: Download Incoming
uses: actions/download-artifact@v2
with:
name: bench-incoming
- name: Download Current
uses: actions/download-artifact@v2
with:
name: bench-current
- name: Benchstat Results
run: benchstat old.txt new.txt | tee -a benchstat.txt
- name: Upload benchstat results
uses: actions/upload-artifact@v2
with:
name: benchstat
path: benchstat.txt

View File

@@ -1,35 +0,0 @@
name: build
on: [push, pull_request]
jobs:
build:
strategy:
matrix:
os: [ubuntu-latest]
go-version: [1.13.x, 1.14.x, 1.15.x]
runs-on: ${{ matrix.os }}
services:
redis:
image: redis
ports:
- 6379:6379
steps:
- uses: actions/checkout@v2
- name: Set up Go
uses: actions/setup-go@v2
with:
go-version: ${{ matrix.go-version }}
- name: Build
run: go build -v ./...
- name: Test
run: go test -race -v -coverprofile=coverage.txt -covermode=atomic ./...
- name: Benchmark Test
run: go test -run=^$ -bench=. -loglevel=debug ./...
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v1

13
.travis.yml Normal file
View File

@@ -0,0 +1,13 @@
language: go
go_import_path: github.com/hibiken/asynq
git:
depth: 1
go: [1.13.x, 1.14.x]
script:
- go test -race -v -coverprofile=coverage.txt -covermode=atomic ./...
- go test -run=XXX -bench=. -loglevel=debug ./...
services:
- redis-server
after_success:
- bash ./.travis/benchcmp.sh
- bash <(curl -s https://codecov.io/bash)

18
.travis/benchcmp.sh Executable file
View File

@@ -0,0 +1,18 @@
if [ "${TRAVIS_PULL_REQUEST_BRANCH:-$TRAVIS_BRANCH}" != "master" ]; then
REMOTE_URL="$(git config --get remote.origin.url)";
cd ${TRAVIS_BUILD_DIR}/.. && \
git clone ${REMOTE_URL} "${TRAVIS_REPO_SLUG}-bench" && \
cd "${TRAVIS_REPO_SLUG}-bench" && \
# Benchmark master
git checkout master && \
go test -run=XXX -bench=. ./... > master.txt && \
# Benchmark feature branch
git checkout ${TRAVIS_COMMIT} && \
go test -run=XXX -bench=. ./... > feature.txt && \
# compare two benchmarks
go get -u golang.org/x/tools/cmd/benchcmp && \
benchcmp master.txt feature.txt;
fi

View File

@@ -7,155 +7,6 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [Unreleased]
## [0.15.0] - 2021-01-31
**IMPORTATNT**: All `Inspector` related code are moved to subpackage "github.com/hibiken/asynq/inspeq"
### Changed
- `Inspector` related code are moved to subpackage "github.com/hibken/asynq/inspeq".
- `RedisConnOpt` interface has changed slightly. If you have been passing `RedisClientOpt`, `RedisFailoverClientOpt`, or `RedisClusterClientOpt` as a pointer,
update your code to pass as a value.
- `ErrorMsg` field in `RetryTask` and `ArchivedTask` was renamed to `LastError`.
### Added
- `MaxRetry`, `Retried`, `LastError` fields were added to all task types returned from `Inspector`.
- `MemoryUsage` field was added to `QueueStats`.
- `DeleteAllPendingTasks`, `ArchiveAllPendingTasks` were added to `Inspector`
- `DeleteTaskByKey` and `ArchiveTaskByKey` now supports deleting/archiving `PendingTask`.
- asynq CLI now supports deleting/archiving pending tasks.
## [0.14.1] - 2021-01-19
### Fixed
- `go.mod` file for CLI
## [0.14.0] - 2021-01-14
**IMPORTATNT**: Please run `asynq migrate` command to migrate from the previous versions.
### Changed
- Renamed `DeadTask` to `ArchivedTask`.
- Renamed the operation `Kill` to `Archive` in `Inpsector`.
- Print stack trace when Handler panics.
- Include a file name and a line number in the error message when recovering from a panic.
### Added
- `DefaultRetryDelayFunc` is now a public API, which can be used in the custom `RetryDelayFunc`.
- `SkipRetry` error is added to be used as a return value from `Handler`.
- `Servers` method is added to `Inspector`
- `CancelActiveTask` method is added to `Inspector`.
- `ListSchedulerEnqueueEvents` method is added to `Inspector`.
- `SchedulerEntries` method is added to `Inspector`.
- `DeleteQueue` method is added to `Inspector`.
## [0.13.1] - 2020-11-22
### Fixed
- Fixed processor to wait for specified time duration before forcefully shutdown workers.
## [0.13.0] - 2020-10-13
### Added
- `Scheduler` type is added to enable periodic tasks. See the godoc for its APIs and [wiki](https://github.com/hibiken/asynq/wiki/Periodic-Tasks) for the getting-started guide.
### Changed
- interface `Option` has changed. See the godoc for the new interface.
This change would have no impact as long as you are using exported functions (e.g. `MaxRetry`, `Queue`, etc)
to create `Option`s.
### Added
- `Payload.String() string` method is added
- `Payload.MarshalJSON() ([]byte, error)` method is added
## [0.12.0] - 2020-09-12
**IMPORTANT**: If you are upgrading from a previous version, please install the latest version of the CLI `go get -u github.com/hibiken/asynq/tools/asynq` and run `asynq migrate` command. No process should be writing to Redis while you run the migration command.
## The semantics of queue have changed
Previously, we called tasks that are ready to be processed _"Enqueued tasks"_, and other tasks that are scheduled to be processed in the future _"Scheduled tasks"_, etc.
We changed the semantics of _"Enqueue"_ slightly; All tasks that client pushes to Redis are _Enqueued_ to a queue. Within a queue, tasks will transition from one state to another.
Possible task states are:
- `Pending`: task is ready to be processed (previously called "Enqueued")
- `Active`: tasks is currently being processed (previously called "InProgress")
- `Scheduled`: task is scheduled to be processed in the future
- `Retry`: task failed to be processed and will be retried again in the future
- `Dead`: task has exhausted all of its retries and stored for manual inspection purpose
**These semantics change is reflected in the new `Inspector` API and CLI commands.**
---
### Changed
#### `Client`
Use `ProcessIn` or `ProcessAt` option to schedule a task instead of `EnqueueIn` or `EnqueueAt`.
| Previously | v0.12.0 |
| --------------------------- | ------------------------------------------ |
| `client.EnqueueAt(t, task)` | `client.Enqueue(task, asynq.ProcessAt(t))` |
| `client.EnqueueIn(d, task)` | `client.Enqueue(task, asynq.ProcessIn(d))` |
#### `Inspector`
All Inspector methods are scoped to a queue, and the methods take `qname (string)` as the first argument.
`EnqueuedTask` is renamed to `PendingTask` and its corresponding methods.
`InProgressTask` is renamed to `ActiveTask` and its corresponding methods.
Command "Enqueue" is replaced by the verb "Run" (e.g. `EnqueueAllScheduledTasks` --> `RunAllScheduledTasks`)
#### `CLI`
CLI commands are restructured to use subcommands. Commands are organized into a few management commands:
To view details on any command, use `asynq help <command> <subcommand>`.
- `asynq stats`
- `asynq queue [ls inspect history rm pause unpause]`
- `asynq task [ls cancel delete kill run delete-all kill-all run-all]`
- `asynq server [ls]`
### Added
#### `RedisConnOpt`
- `RedisClusterClientOpt` is added to connect to Redis Cluster.
- `Username` field is added to all `RedisConnOpt` types in order to authenticate connection when Redis ACLs are used.
#### `Client`
- `ProcessIn(d time.Duration) Option` and `ProcessAt(t time.Time) Option` are added to replace `EnqueueIn` and `EnqueueAt` functionality.
#### `Inspector`
- `Queues() ([]string, error)` method is added to get all queue names.
- `ClusterKeySlot(qname string) (int64, error)` method is added to get queue's hash slot in Redis cluster.
- `ClusterNodes(qname string) ([]ClusterNode, error)` method is added to get a list of Redis cluster nodes for the given queue.
- `Close() error` method is added to close connection with redis.
### `Handler`
- `GetQueueName(ctx context.Context) (string, bool)` helper is added to extract queue name from a context.
## [0.11.0] - 2020-07-28
### Added
- `Inspector` type was added to monitor and mutate state of queues and tasks.
- `HealthCheckFunc` and `HealthCheckInterval` fields were added to `Config` to allow user to specify a callback
function to check for broker connection.
## [0.10.0] - 2020-07-06
### Changed
- All tasks now requires timeout or deadline. By default, timeout is set to 30 mins.
@@ -178,6 +29,7 @@ To view details on any command, use `asynq help <command> <subcommand>`.
- Fixes the JSON number overflow issue (https://github.com/hibiken/asynq/issues/166).
## [0.9.2] - 2020-06-08
### Added

View File

@@ -45,7 +45,6 @@ Thank you! We'll try to respond as quickly as possible.
6. Create a new pull request
Please try to keep your pull request focused in scope and avoid including unrelated commits.
Please run tests against redis cluster locally with `--redis_cluster` flag to ensure that code works for Redis cluster. TODO: Run tests using Redis cluster on CI.
After you have submitted your pull request, we'll try to get back to you as soon as possible. We may suggest some changes or improvements.

View File

@@ -1,14 +1,15 @@
# Asynq
![Build Status](https://github.com/hibiken/asynq/workflows/build/badge.svg)
[![GoDoc](https://godoc.org/github.com/hibiken/asynq?status.svg)](https://godoc.org/github.com/hibiken/asynq)
[![Go Report Card](https://goreportcard.com/badge/github.com/hibiken/asynq)](https://goreportcard.com/report/github.com/hibiken/asynq)
[![Build Status](https://travis-ci.com/hibiken/asynq.svg?token=paqzfpSkF4p23s5Ux39b&branch=master)](https://travis-ci.com/hibiken/asynq)
[![License: MIT](https://img.shields.io/badge/license-MIT-green.svg)](https://opensource.org/licenses/MIT)
[![Go Report Card](https://goreportcard.com/badge/github.com/hibiken/asynq)](https://goreportcard.com/report/github.com/hibiken/asynq)
[![GoDoc](https://godoc.org/github.com/hibiken/asynq?status.svg)](https://godoc.org/github.com/hibiken/asynq)
[![Gitter chat](https://badges.gitter.im/go-asynq/gitter.svg)](https://gitter.im/go-asynq/community)
[![codecov](https://codecov.io/gh/hibiken/asynq/branch/master/graph/badge.svg)](https://codecov.io/gh/hibiken/asynq)
## Overview
Asynq is a Go library for queueing tasks and processing them asynchronously with workers. It's backed by Redis and is designed to be scalable yet easy to get started.
Asynq is a Go library for queueing tasks and processing them in the background with workers. It is backed by Redis and it is designed to have a low barrier to entry. It should be integrated in your web stack easily.
Highlevel overview of how Asynq works:
@@ -33,7 +34,6 @@ A system can consist of multiple worker servers and brokers, giving way to high
- Scheduling of tasks
- Durability since tasks are written to Redis
- [Retries](https://github.com/hibiken/asynq/wiki/Task-Retry) of failed tasks
- Automatic recovery of tasks in the event of a worker crash
- [Weighted priority queues](https://github.com/hibiken/asynq/wiki/Priority-Queues#weighted-priority-queues)
- [Strict priority queues](https://github.com/hibiken/asynq/wiki/Priority-Queues#strict-priority-queues)
- Low latency to add a task since writes are fast in Redis
@@ -41,9 +41,7 @@ A system can consist of multiple worker servers and brokers, giving way to high
- Allow [timeout and deadline per task](https://github.com/hibiken/asynq/wiki/Task-Timeout-and-Cancelation)
- [Flexible handler interface with support for middlewares](https://github.com/hibiken/asynq/wiki/Handler-Deep-Dive)
- [Ability to pause queue](/tools/asynq/README.md#pause) to stop processing tasks from the queue
- [Periodic Tasks](https://github.com/hibiken/asynq/wiki/Periodic-Tasks)
- [Support Redis Cluster](https://github.com/hibiken/asynq/wiki/Redis-Cluster) for automatic sharding and high availability
- [Support Redis Sentinels](https://github.com/hibiken/asynq/wiki/Automatic-Failover) for high availability
- [Support Redis Sentinels](https://github.com/hibiken/asynq/wiki/Automatic-Failover) for HA
- [CLI](#command-line-tool) to inspect and remote-control queues and tasks
## Quickstart
@@ -67,8 +65,8 @@ import (
// A list of task types.
const (
TypeEmailDelivery = "email:deliver"
TypeImageResize = "image:resize"
EmailDelivery = "email:deliver"
ImageProcessing = "image:process"
)
//----------------------------------------------
@@ -78,12 +76,12 @@ const (
func NewEmailDeliveryTask(userID int, tmplID string) *asynq.Task {
payload := map[string]interface{}{"user_id": userID, "template_id": tmplID}
return asynq.NewTask(TypeEmailDelivery, payload)
return asynq.NewTask(EmailDelivery, payload)
}
func NewImageResizeTask(src string) *asynq.Task {
payload := map[string]interface{}{"src": src}
return asynq.NewTask(TypeImageResize, payload)
func NewImageProcessingTask(src, dst string) *asynq.Task {
payload := map[string]interface{}{"src": src, "dst": dst}
return asynq.NewTask(ImageProcessing, payload)
}
//---------------------------------------------------------------
@@ -104,12 +102,12 @@ func HandleEmailDeliveryTask(ctx context.Context, t *asynq.Task) error {
return err
}
fmt.Printf("Send Email to User: user_id = %d, template_id = %s\n", userID, tmplID)
// Email delivery code ...
// Email delivery logic ...
return nil
}
// ImageProcessor implements asynq.Handler interface.
type ImageProcessor struct {
type ImageProcesser struct {
// ... fields for struct
}
@@ -118,8 +116,12 @@ func (p *ImageProcessor) ProcessTask(ctx context.Context, t *asynq.Task) error {
if err != nil {
return err
}
fmt.Printf("Resize image: src = %s\n", src)
// Image resizing code ...
dst, err := t.Payload.GetString("dst")
if err != nil {
return err
}
fmt.Printf("Process image: src = %s, dst = %s\n", src, dst)
// Image processing logic ...
return nil
}
@@ -128,14 +130,14 @@ func NewImageProcessor() *ImageProcessor {
}
```
In your application code, import the above package and use [`Client`](https://pkg.go.dev/github.com/hibiken/asynq?tab=doc#Client) to put tasks on the queue.
In your web application code, import the above package and use [`Client`](https://pkg.go.dev/github.com/hibiken/asynq?tab=doc#Client) to put tasks on the queue.
A task will be processed asynchronously by a background worker as soon as the task gets enqueued.
Scheduled tasks will be stored in Redis and will be enqueued at the specified time.
```go
package main
import (
"fmt"
"log"
"time"
"github.com/hibiken/asynq"
@@ -164,11 +166,11 @@ func main() {
// ------------------------------------------------------------
// Example 2: Schedule task to be processed in the future.
// Use ProcessIn or ProcessAt option.
// Use (*Client).EnqueueIn or (*Client).EnqueueAt.
// ------------------------------------------------------------
t = tasks.NewEmailDeliveryTask(42, "other:template:id")
res, err = c.Enqueue(t, asynq.ProcessIn(24*time.Hour))
res, err = c.EnqueueIn(24*time.Hour, t)
if err != nil {
log.Fatal("could not schedule task: %v", err)
}
@@ -176,13 +178,13 @@ func main() {
// ----------------------------------------------------------------------------
// Example 3: Set other options to tune task processing behavior.
// Example 3: Set options to tune task processing behavior.
// Options include MaxRetry, Queue, Timeout, Deadline, Unique etc.
// ----------------------------------------------------------------------------
c.SetDefaultOptions(tasks.TypeImageResize, asynq.MaxRetry(10), asynq.Timeout(3*time.Minute))
c.SetDefaultOptions(tasks.ImageProcessing, asynq.MaxRetry(10), asynq.Timeout(time.Minute))
t = tasks.NewImageResizeTask("some/blobstore/path")
t = tasks.NewImageProcessingTask("some/blobstore/url", "other/blobstore/url")
res, err = c.Enqueue(t)
if err != nil {
log.Fatal("could not enqueue task: %v", err)
@@ -194,7 +196,7 @@ func main() {
// Options passed at enqueue time override default ones, if any.
// ---------------------------------------------------------------------------
t = tasks.NewImageResizeTask("some/blobstore/path")
t = tasks.NewImageProcessingTask("some/blobstore/url", "other/blobstore/url")
res, err = c.Enqueue(t, asynq.Queue("critical"), asynq.Timeout(30*time.Second))
if err != nil {
log.Fatal("could not enqueue task: %v", err)
@@ -203,7 +205,7 @@ func main() {
}
```
Next, start a worker server to process these tasks in the background.
Next, create a worker server to process these tasks in the background.
To start the background workers, use [`Server`](https://pkg.go.dev/github.com/hibiken/asynq?tab=doc#Server) and provide your [`Handler`](https://pkg.go.dev/github.com/hibiken/asynq?tab=doc#Handler) to process the tasks.
You can optionally use [`ServeMux`](https://pkg.go.dev/github.com/hibiken/asynq?tab=doc#ServeMux) to create a handler, just as you would with [`"net/http"`](https://golang.org/pkg/net/http/) Handler.
@@ -237,8 +239,8 @@ func main() {
// mux maps a type to a handler
mux := asynq.NewServeMux()
mux.HandleFunc(tasks.TypeEmailDelivery, tasks.HandleEmailDeliveryTask)
mux.Handle(tasks.TypeImageResize, tasks.NewImageProcessor())
mux.HandleFunc(tasks.EmailDelivery, tasks.HandleEmailDeliveryTask)
mux.Handle(tasks.ImageProcessing, tasks.NewImageProcessor())
// ...register other handlers...
if err := srv.Run(mux); err != nil {
@@ -279,7 +281,7 @@ go get -u github.com/hibiken/asynq/tools/asynq
| Dependency | Version |
| -------------------------- | ------- |
| [Redis](https://redis.io/) | v3.0+ |
| [Redis](https://redis.io/) | v2.8+ |
| [Go](https://golang.org/) | v1.13+ |
## Contributing

131
asynq.go
View File

@@ -37,14 +37,8 @@ func NewTask(typename string, payload map[string]interface{}) *Task {
//
// RedisConnOpt represents a sum of following types:
//
// - RedisClientOpt
// - RedisFailoverClientOpt
// - RedisClusterClientOpt
type RedisConnOpt interface {
// MakeRedisClient returns a new redis client instance.
// Return value is intentionally opaque to hide the implementation detail of redis client.
MakeRedisClient() interface{}
}
// RedisClientOpt | *RedisClientOpt | RedisFailoverClientOpt | *RedisFailoverClientOpt
type RedisConnOpt interface{}
// RedisClientOpt is used to create a redis client that connects
// to a redis server directly.
@@ -56,12 +50,7 @@ type RedisClientOpt struct {
// Redis server address in "host:port" format.
Addr string
// Username to authenticate the current connection when Redis ACLs are used.
// See: https://redis.io/commands/auth.
Username string
// Password to authenticate the current connection.
// See: https://redis.io/commands/auth.
// Redis server password.
Password string
// Redis DB to select after connecting to a server.
@@ -77,18 +66,6 @@ type RedisClientOpt struct {
TLSConfig *tls.Config
}
func (opt RedisClientOpt) MakeRedisClient() interface{} {
return redis.NewClient(&redis.Options{
Network: opt.Network,
Addr: opt.Addr,
Username: opt.Username,
Password: opt.Password,
DB: opt.DB,
PoolSize: opt.PoolSize,
TLSConfig: opt.TLSConfig,
})
}
// RedisFailoverClientOpt is used to creates a redis client that talks
// to redis sentinels for service discovery and has an automatic failover
// capability.
@@ -104,12 +81,7 @@ type RedisFailoverClientOpt struct {
// Redis sentinel password.
SentinelPassword string
// Username to authenticate the current connection when Redis ACLs are used.
// See: https://redis.io/commands/auth.
Username string
// Password to authenticate the current connection.
// See: https://redis.io/commands/auth.
// Redis server password.
Password string
// Redis DB to select after connecting to a server.
@@ -125,53 +97,6 @@ type RedisFailoverClientOpt struct {
TLSConfig *tls.Config
}
func (opt RedisFailoverClientOpt) MakeRedisClient() interface{} {
return redis.NewFailoverClient(&redis.FailoverOptions{
MasterName: opt.MasterName,
SentinelAddrs: opt.SentinelAddrs,
SentinelPassword: opt.SentinelPassword,
Username: opt.Username,
Password: opt.Password,
DB: opt.DB,
PoolSize: opt.PoolSize,
TLSConfig: opt.TLSConfig,
})
}
// RedisFailoverClientOpt is used to creates a redis client that connects to
// redis cluster.
type RedisClusterClientOpt struct {
// A seed list of host:port addresses of cluster nodes.
Addrs []string
// The maximum number of retries before giving up.
// Command is retried on network errors and MOVED/ASK redirects.
// Default is 8 retries.
MaxRedirects int
// Username to authenticate the current connection when Redis ACLs are used.
// See: https://redis.io/commands/auth.
Username string
// Password to authenticate the current connection.
// See: https://redis.io/commands/auth.
Password string
// TLS Config used to connect to a server.
// TLS will be negotiated only if this field is set.
TLSConfig *tls.Config
}
func (opt RedisClusterClientOpt) MakeRedisClient() interface{} {
return redis.NewClusterClient(&redis.ClusterOptions{
Addrs: opt.Addrs,
MaxRedirects: opt.MaxRedirects,
Username: opt.Username,
Password: opt.Password,
TLSConfig: opt.TLSConfig,
})
}
// ParseRedisURI parses redis uri string and returns RedisConnOpt if uri is valid.
// It returns a non-nil error if uri cannot be parsed.
//
@@ -244,3 +169,51 @@ func parseRedisSentinelURI(u *url.URL) (RedisConnOpt, error) {
}
return RedisFailoverClientOpt{MasterName: master, SentinelAddrs: addrs, Password: password}, nil
}
// createRedisClient returns a redis client given a redis connection configuration.
//
// Passing an unexpected type as a RedisConnOpt argument will cause panic.
func createRedisClient(r RedisConnOpt) *redis.Client {
switch r := r.(type) {
case *RedisClientOpt:
return redis.NewClient(&redis.Options{
Network: r.Network,
Addr: r.Addr,
Password: r.Password,
DB: r.DB,
PoolSize: r.PoolSize,
TLSConfig: r.TLSConfig,
})
case RedisClientOpt:
return redis.NewClient(&redis.Options{
Network: r.Network,
Addr: r.Addr,
Password: r.Password,
DB: r.DB,
PoolSize: r.PoolSize,
TLSConfig: r.TLSConfig,
})
case *RedisFailoverClientOpt:
return redis.NewFailoverClient(&redis.FailoverOptions{
MasterName: r.MasterName,
SentinelAddrs: r.SentinelAddrs,
SentinelPassword: r.SentinelPassword,
Password: r.Password,
DB: r.DB,
PoolSize: r.PoolSize,
TLSConfig: r.TLSConfig,
})
case RedisFailoverClientOpt:
return redis.NewFailoverClient(&redis.FailoverOptions{
MasterName: r.MasterName,
SentinelAddrs: r.SentinelAddrs,
SentinelPassword: r.SentinelPassword,
Password: r.Password,
DB: r.DB,
PoolSize: r.PoolSize,
TLSConfig: r.TLSConfig,
})
default:
panic(fmt.Sprintf("asynq: unexpected type %T for RedisConnOpt", r))
}
}

View File

@@ -7,7 +7,6 @@ package asynq
import (
"flag"
"sort"
"strings"
"testing"
"github.com/go-redis/redis/v7"
@@ -25,9 +24,6 @@ var (
redisAddr string
redisDB int
useRedisCluster bool
redisClusterAddrs string // comma-separated list of host:port
testLogLevel = FatalLevel
)
@@ -36,52 +32,23 @@ var testLogger *log.Logger
func init() {
flag.StringVar(&redisAddr, "redis_addr", "localhost:6379", "redis address to use in testing")
flag.IntVar(&redisDB, "redis_db", 14, "redis db number to use in testing")
flag.BoolVar(&useRedisCluster, "redis_cluster", false, "use redis cluster as a broker in testing")
flag.StringVar(&redisClusterAddrs, "redis_cluster_addrs", "localhost:7000,localhost:7001,localhost:7002", "comma separated list of redis server addresses")
flag.Var(&testLogLevel, "loglevel", "log level to use in testing")
testLogger = log.NewLogger(nil)
testLogger.SetLevel(toInternalLogLevel(testLogLevel))
}
func setup(tb testing.TB) (r redis.UniversalClient) {
func setup(tb testing.TB) *redis.Client {
tb.Helper()
if useRedisCluster {
addrs := strings.Split(redisClusterAddrs, ",")
if len(addrs) == 0 {
tb.Fatal("No redis cluster addresses provided. Please set addresses using --redis_cluster_addrs flag.")
}
r = redis.NewClusterClient(&redis.ClusterOptions{
Addrs: addrs,
})
} else {
r = redis.NewClient(&redis.Options{
r := redis.NewClient(&redis.Options{
Addr: redisAddr,
DB: redisDB,
})
}
// Start each test with a clean slate.
h.FlushDB(tb, r)
return r
}
func getRedisConnOpt(tb testing.TB) RedisConnOpt {
tb.Helper()
if useRedisCluster {
addrs := strings.Split(redisClusterAddrs, ",")
if len(addrs) == 0 {
tb.Fatal("No redis cluster addresses provided. Please set addresses using --redis_cluster_addrs flag.")
}
return RedisClusterClientOpt{
Addrs: addrs,
}
}
return RedisClientOpt{
Addr: redisAddr,
DB: redisDB,
}
}
var sortTaskOpt = cmp.Transformer("SortMsg", func(in []*Task) []*Task {
out := append([]*Task(nil), in...) // Copy input to avoid mutating it
sort.Slice(out, func(i, j int) bool {

View File

@@ -18,7 +18,10 @@ func BenchmarkEndToEndSimple(b *testing.B) {
for n := 0; n < b.N; n++ {
b.StopTimer() // begin setup
setup(b)
redis := getRedisConnOpt(b)
redis := &RedisClientOpt{
Addr: redisAddr,
DB: redisDB,
}
client := NewClient(redis)
srv := NewServer(redis, Config{
Concurrency: 10,
@@ -34,7 +37,6 @@ func BenchmarkEndToEndSimple(b *testing.B) {
b.Fatalf("could not enqueue a task: %v", err)
}
}
client.Close()
var wg sync.WaitGroup
wg.Add(count)
@@ -59,7 +61,10 @@ func BenchmarkEndToEnd(b *testing.B) {
for n := 0; n < b.N; n++ {
b.StopTimer() // begin setup
setup(b)
redis := getRedisConnOpt(b)
redis := &RedisClientOpt{
Addr: redisAddr,
DB: redisDB,
}
client := NewClient(redis)
srv := NewServer(redis, Config{
Concurrency: 10,
@@ -77,11 +82,10 @@ func BenchmarkEndToEnd(b *testing.B) {
}
for i := 0; i < count; i++ {
t := NewTask(fmt.Sprintf("scheduled%d", i), map[string]interface{}{"data": i})
if _, err := client.Enqueue(t, ProcessIn(1*time.Second)); err != nil {
if _, err := client.EnqueueAt(time.Now().Add(time.Second), t); err != nil {
b.Fatalf("could not enqueue a task: %v", err)
}
}
client.Close()
var wg sync.WaitGroup
wg.Add(count * 2)
@@ -123,7 +127,10 @@ func BenchmarkEndToEndMultipleQueues(b *testing.B) {
for n := 0; n < b.N; n++ {
b.StopTimer() // begin setup
setup(b)
redis := getRedisConnOpt(b)
redis := &RedisClientOpt{
Addr: redisAddr,
DB: redisDB,
}
client := NewClient(redis)
srv := NewServer(redis, Config{
Concurrency: 10,
@@ -153,7 +160,6 @@ func BenchmarkEndToEndMultipleQueues(b *testing.B) {
b.Fatalf("could not enqueue a task: %v", err)
}
}
client.Close()
var wg sync.WaitGroup
wg.Add(highCount + defaultCount + lowCount)
@@ -179,7 +185,10 @@ func BenchmarkClientWhileServerRunning(b *testing.B) {
for n := 0; n < b.N; n++ {
b.StopTimer() // begin setup
setup(b)
redis := getRedisConnOpt(b)
redis := &RedisClientOpt{
Addr: redisAddr,
DB: redisDB,
}
client := NewClient(redis)
srv := NewServer(redis, Config{
Concurrency: 10,
@@ -198,7 +207,7 @@ func BenchmarkClientWhileServerRunning(b *testing.B) {
// Schedule 10,000 tasks.
for i := 0; i < count; i++ {
t := NewTask(fmt.Sprintf("scheduled%d", i), map[string]interface{}{"data": i})
if _, err := client.Enqueue(t, ProcessIn(1*time.Second)); err != nil {
if _, err := client.EnqueueAt(time.Now().Add(time.Second), t); err != nil {
b.Fatalf("could not enqueue a task: %v", err)
}
}
@@ -224,7 +233,6 @@ func BenchmarkClientWhileServerRunning(b *testing.B) {
b.StopTimer() // begin teardown
srv.Stop()
client.Close()
b.StartTimer() // end teardown
}
}

201
client.go
View File

@@ -7,11 +7,11 @@ package asynq
import (
"errors"
"fmt"
"sort"
"strings"
"sync"
"time"
"github.com/go-redis/redis/v7"
"github.com/google/uuid"
"github.com/hibiken/asynq/internal/base"
"github.com/hibiken/asynq/internal/rdb"
@@ -29,42 +29,17 @@ type Client struct {
rdb *rdb.RDB
}
// NewClient returns a new Client instance given a redis connection option.
// NewClient and returns a new Client given a redis connection option.
func NewClient(r RedisConnOpt) *Client {
c, ok := r.MakeRedisClient().(redis.UniversalClient)
if !ok {
panic(fmt.Sprintf("asynq: unsupported RedisConnOpt type %T", r))
}
rdb := rdb.NewRDB(c)
rdb := rdb.NewRDB(createRedisClient(r))
return &Client{
opts: make(map[string][]Option),
rdb: rdb,
}
}
type OptionType int
const (
MaxRetryOpt OptionType = iota
QueueOpt
TimeoutOpt
DeadlineOpt
UniqueOpt
ProcessAtOpt
ProcessInOpt
)
// Option specifies the task processing behavior.
type Option interface {
// String returns a string representation of the option.
String() string
// Type describes the type of the option.
Type() OptionType
// Value returns a value used to create this option.
Value() interface{}
}
type Option interface{}
// Internal option representations.
type (
@@ -73,8 +48,6 @@ type (
timeoutOption time.Duration
deadlineOption time.Time
uniqueOption time.Duration
processAtOption time.Time
processInOption time.Duration
)
// MaxRetry returns an option to specify the max number of times
@@ -88,21 +61,13 @@ func MaxRetry(n int) Option {
return retryOption(n)
}
func (n retryOption) String() string { return fmt.Sprintf("MaxRetry(%d)", int(n)) }
func (n retryOption) Type() OptionType { return MaxRetryOpt }
func (n retryOption) Value() interface{} { return int(n) }
// Queue returns an option to specify the queue to enqueue the task into.
//
// Queue name is case-insensitive and the lowercased version is used.
func Queue(qname string) Option {
return queueOption(strings.ToLower(qname))
func Queue(name string) Option {
return queueOption(strings.ToLower(name))
}
func (qname queueOption) String() string { return fmt.Sprintf("Queue(%q)", string(qname)) }
func (qname queueOption) Type() OptionType { return QueueOpt }
func (qname queueOption) Value() interface{} { return string(qname) }
// Timeout returns an option to specify how long a task may run.
// If the timeout elapses before the Handler returns, then the task
// will be retried.
@@ -115,10 +80,6 @@ func Timeout(d time.Duration) Option {
return timeoutOption(d)
}
func (d timeoutOption) String() string { return fmt.Sprintf("Timeout(%v)", time.Duration(d)) }
func (d timeoutOption) Type() OptionType { return TimeoutOpt }
func (d timeoutOption) Value() interface{} { return time.Duration(d) }
// Deadline returns an option to specify the deadline for the given task.
// If it reaches the deadline before the Handler returns, then the task
// will be retried.
@@ -129,12 +90,6 @@ func Deadline(t time.Time) Option {
return deadlineOption(t)
}
func (t deadlineOption) String() string {
return fmt.Sprintf("Deadline(%v)", time.Time(t).Format(time.UnixDate))
}
func (t deadlineOption) Type() OptionType { return DeadlineOpt }
func (t deadlineOption) Value() interface{} { return time.Time(t) }
// Unique returns an option to enqueue a task only if the given task is unique.
// Task enqueued with this option is guaranteed to be unique within the given ttl.
// Once the task gets processed successfully or once the TTL has expired, another task with the same uniqueness may be enqueued.
@@ -148,35 +103,6 @@ func Unique(ttl time.Duration) Option {
return uniqueOption(ttl)
}
func (ttl uniqueOption) String() string { return fmt.Sprintf("Unique(%v)", time.Duration(ttl)) }
func (ttl uniqueOption) Type() OptionType { return UniqueOpt }
func (ttl uniqueOption) Value() interface{} { return time.Duration(ttl) }
// ProcessAt returns an option to specify when to process the given task.
//
// If there's a conflicting ProcessIn option, the last option passed to Enqueue overrides the others.
func ProcessAt(t time.Time) Option {
return processAtOption(t)
}
func (t processAtOption) String() string {
return fmt.Sprintf("ProcessAt(%v)", time.Time(t).Format(time.UnixDate))
}
func (t processAtOption) Type() OptionType { return ProcessAtOpt }
func (t processAtOption) Value() interface{} { return time.Time(t) }
// ProcessIn returns an option to specify when to process the given task relative to the current time.
//
// If there's a conflicting ProcessAt option, the last option passed to Enqueue overrides the others.
func ProcessIn(d time.Duration) Option {
return processInOption(d)
}
func (d processInOption) String() string { return fmt.Sprintf("ProcessIn(%v)", time.Duration(d)) }
func (d processInOption) Type() OptionType { return ProcessInOpt }
func (d processInOption) Value() interface{} { return time.Duration(d) }
// ErrDuplicateTask indicates that the given task could not be enqueued since it's a duplicate of another task.
//
// ErrDuplicateTask error only applies to tasks enqueued with a Unique option.
@@ -188,46 +114,65 @@ type option struct {
timeout time.Duration
deadline time.Time
uniqueTTL time.Duration
processAt time.Time
}
// composeOptions merges user provided options into the default options
// and returns the composed option.
// It also validates the user provided options and returns an error if any of
// the user provided options fail the validations.
func composeOptions(opts ...Option) (option, error) {
func composeOptions(opts ...Option) option {
res := option{
retry: defaultMaxRetry,
queue: base.DefaultQueueName,
timeout: 0, // do not set to deafultTimeout here
deadline: time.Time{},
processAt: time.Now(),
}
for _, opt := range opts {
switch opt := opt.(type) {
case retryOption:
res.retry = int(opt)
case queueOption:
trimmed := strings.TrimSpace(string(opt))
if err := base.ValidateQueueName(trimmed); err != nil {
return option{}, err
}
res.queue = trimmed
res.queue = string(opt)
case timeoutOption:
res.timeout = time.Duration(opt)
case deadlineOption:
res.deadline = time.Time(opt)
case uniqueOption:
res.uniqueTTL = time.Duration(opt)
case processAtOption:
res.processAt = time.Time(opt)
case processInOption:
res.processAt = time.Now().Add(time.Duration(opt))
default:
// ignore unexpected option
}
}
return res, nil
return res
}
// uniqueKey computes the redis key used for the given task.
// It returns an empty string if ttl is zero.
func uniqueKey(t *Task, ttl time.Duration, qname string) string {
if ttl == 0 {
return ""
}
return fmt.Sprintf("%s:%s:%s", t.Type, serializePayload(t.Payload.data), qname)
}
func serializePayload(payload map[string]interface{}) string {
if payload == nil {
return "nil"
}
type entry struct {
k string
v interface{}
}
var es []entry
for k, v := range payload {
es = append(es, entry{k, v})
}
// sort entries by key
sort.Slice(es, func(i, j int) bool { return es[i].k < es[j].k })
var b strings.Builder
for _, e := range es {
if b.Len() > 0 {
b.WriteString(",")
}
b.WriteString(fmt.Sprintf("%s=%v", e.k, e.v))
}
return b.String()
}
const (
@@ -260,12 +205,6 @@ type Result struct {
// ID is a unique identifier for the task.
ID string
// EnqueuedAt is the time the task was enqueued in UTC.
EnqueuedAt time.Time
// ProcessAt indicates when the task should be processed.
ProcessAt time.Time
// Retry is the maximum number of retry for the task.
Retry int
@@ -290,29 +229,51 @@ type Result struct {
Deadline time.Time
}
// Close closes the connection with redis.
func (c *Client) Close() error {
return c.rdb.Close()
// EnqueueAt schedules task to be enqueued at the specified time.
//
// EnqueueAt returns nil if the task is scheduled successfully, otherwise returns a non-nil error.
//
// The argument opts specifies the behavior of task processing.
// If there are conflicting Option values the last one overrides others.
// By deafult, max retry is set to 25 and timeout is set to 30 minutes.
func (c *Client) EnqueueAt(t time.Time, task *Task, opts ...Option) (*Result, error) {
return c.enqueueAt(t, task, opts...)
}
// Enqueue enqueues the given task to be processed asynchronously.
// Enqueue enqueues task to be processed immediately.
//
// Enqueue returns nil if the task is enqueued successfully, otherwise returns a non-nil error.
//
// The argument opts specifies the behavior of task processing.
// If there are conflicting Option values the last one overrides others.
// By deafult, max retry is set to 25 and timeout is set to 30 minutes.
// If no ProcessAt or ProcessIn options are passed, the task will be processed immediately.
func (c *Client) Enqueue(task *Task, opts ...Option) (*Result, error) {
return c.enqueueAt(time.Now(), task, opts...)
}
// EnqueueIn schedules task to be enqueued after the specified delay.
//
// EnqueueIn returns nil if the task is scheduled successfully, otherwise returns a non-nil error.
//
// The argument opts specifies the behavior of task processing.
// If there are conflicting Option values the last one overrides others.
// By deafult, max retry is set to 25 and timeout is set to 30 minutes.
func (c *Client) EnqueueIn(d time.Duration, task *Task, opts ...Option) (*Result, error) {
return c.enqueueAt(time.Now().Add(d), task, opts...)
}
// Close closes the connection with redis server.
func (c *Client) Close() error {
return c.rdb.Close()
}
func (c *Client) enqueueAt(t time.Time, task *Task, opts ...Option) (*Result, error) {
c.mu.Lock()
defer c.mu.Unlock()
if defaults, ok := c.opts[task.Type]; ok {
opts = append(defaults, opts...)
}
c.mu.Unlock()
opt, err := composeOptions(opts...)
if err != nil {
return nil, err
}
opt := composeOptions(opts...)
deadline := noDeadline
if !opt.deadline.IsZero() {
deadline = opt.deadline
@@ -325,10 +286,6 @@ func (c *Client) Enqueue(task *Task, opts ...Option) (*Result, error) {
// If neither deadline nor timeout are set, use default timeout.
timeout = defaultTimeout
}
var uniqueKey string
if opt.uniqueTTL > 0 {
uniqueKey = base.UniqueKey(opt.queue, task.Type, task.Payload.data)
}
msg := &base.TaskMessage{
ID: uuid.New(),
Type: task.Type,
@@ -337,14 +294,14 @@ func (c *Client) Enqueue(task *Task, opts ...Option) (*Result, error) {
Retry: opt.retry,
Deadline: deadline.Unix(),
Timeout: int64(timeout.Seconds()),
UniqueKey: uniqueKey,
UniqueKey: uniqueKey(task, opt.uniqueTTL, opt.queue),
}
var err error
now := time.Now()
if opt.processAt.Before(now) || opt.processAt.Equal(now) {
opt.processAt = now
if t.Before(now) || t.Equal(now) {
err = c.enqueue(msg, opt.uniqueTTL)
} else {
err = c.schedule(msg, opt.processAt, opt.uniqueTTL)
err = c.schedule(msg, t, opt.uniqueTTL)
}
switch {
case err == rdb.ErrDuplicateTask:
@@ -354,8 +311,6 @@ func (c *Client) Enqueue(task *Task, opts ...Option) (*Result, error) {
}
return &Result{
ID: msg.ID.String(),
EnqueuedAt: time.Now().UTC(),
ProcessAt: opt.processAt,
Queue: msg.Queue,
Retry: msg.Retry,
Timeout: timeout,

View File

@@ -15,10 +15,12 @@ import (
"github.com/hibiken/asynq/internal/base"
)
func TestClientEnqueueWithProcessAtOption(t *testing.T) {
func TestClientEnqueueAt(t *testing.T) {
r := setup(t)
client := NewClient(getRedisConnOpt(t))
defer client.Close()
client := NewClient(RedisClientOpt{
Addr: redisAddr,
DB: redisDB,
})
task := NewTask("send_email", map[string]interface{}{"to": "customer@gmail.com", "from": "merchant@example.com"})
@@ -30,11 +32,11 @@ func TestClientEnqueueWithProcessAtOption(t *testing.T) {
tests := []struct {
desc string
task *Task
processAt time.Time // value for ProcessAt option
opts []Option // other options
processAt time.Time
opts []Option
wantRes *Result
wantPending map[string][]*base.TaskMessage
wantScheduled map[string][]base.Z
wantEnqueued map[string][]*base.TaskMessage
wantScheduled []h.ZSetEntry
}{
{
desc: "Process task immediately",
@@ -42,14 +44,12 @@ func TestClientEnqueueWithProcessAtOption(t *testing.T) {
processAt: now,
opts: []Option{},
wantRes: &Result{
EnqueuedAt: now.UTC(),
ProcessAt: now,
Queue: "default",
Retry: defaultMaxRetry,
Timeout: defaultTimeout,
Deadline: noDeadline,
},
wantPending: map[string][]*base.TaskMessage{
wantEnqueued: map[string][]*base.TaskMessage{
"default": {
{
Type: task.Type,
@@ -61,9 +61,7 @@ func TestClientEnqueueWithProcessAtOption(t *testing.T) {
},
},
},
wantScheduled: map[string][]base.Z{
"default": {},
},
wantScheduled: nil, // db is flushed in setup so zset does not exist hence nil
},
{
desc: "Schedule task to be processed in the future",
@@ -71,20 +69,15 @@ func TestClientEnqueueWithProcessAtOption(t *testing.T) {
processAt: oneHourLater,
opts: []Option{},
wantRes: &Result{
EnqueuedAt: now.UTC(),
ProcessAt: oneHourLater,
Queue: "default",
Retry: defaultMaxRetry,
Timeout: defaultTimeout,
Deadline: noDeadline,
},
wantPending: map[string][]*base.TaskMessage{
"default": {},
},
wantScheduled: map[string][]base.Z{
"default": {
wantEnqueued: nil, // db is flushed in setup so list does not exist hence nil
wantScheduled: []h.ZSetEntry{
{
Message: &base.TaskMessage{
Msg: &base.TaskMessage{
Type: task.Type,
Payload: task.Payload.data,
Retry: defaultMaxRetry,
@@ -92,8 +85,7 @@ func TestClientEnqueueWithProcessAtOption(t *testing.T) {
Timeout: int64(defaultTimeout.Seconds()),
Deadline: noDeadline.Unix(),
},
Score: oneHourLater.Unix(),
},
Score: float64(oneHourLater.Unix()),
},
},
},
@@ -102,50 +94,45 @@ func TestClientEnqueueWithProcessAtOption(t *testing.T) {
for _, tc := range tests {
h.FlushDB(t, r) // clean up db before each test case.
opts := append(tc.opts, ProcessAt(tc.processAt))
gotRes, err := client.Enqueue(tc.task, opts...)
gotRes, err := client.EnqueueAt(tc.processAt, tc.task, tc.opts...)
if err != nil {
t.Error(err)
continue
}
cmpOptions := []cmp.Option{
cmpopts.IgnoreFields(Result{}, "ID"),
cmpopts.EquateApproxTime(500 * time.Millisecond),
}
if diff := cmp.Diff(tc.wantRes, gotRes, cmpOptions...); diff != "" {
t.Errorf("%s;\nEnqueue(task, ProcessAt(%v)) returned %v, want %v; (-want,+got)\n%s",
tc.desc, tc.processAt, gotRes, tc.wantRes, diff)
if diff := cmp.Diff(tc.wantRes, gotRes, cmpopts.IgnoreFields(Result{}, "ID")); diff != "" {
t.Errorf("%s;\nEnqueueAt(processAt, task) returned %v, want %v; (-want,+got)\n%s",
tc.desc, gotRes, tc.wantRes, diff)
}
for qname, want := range tc.wantPending {
gotPending := h.GetPendingMessages(t, r, qname)
if diff := cmp.Diff(want, gotPending, h.IgnoreIDOpt, cmpopts.EquateEmpty()); diff != "" {
for qname, want := range tc.wantEnqueued {
gotEnqueued := h.GetEnqueuedMessages(t, r, qname)
if diff := cmp.Diff(want, gotEnqueued, h.IgnoreIDOpt); diff != "" {
t.Errorf("%s;\nmismatch found in %q; (-want,+got)\n%s", tc.desc, base.QueueKey(qname), diff)
}
}
for qname, want := range tc.wantScheduled {
gotScheduled := h.GetScheduledEntries(t, r, qname)
if diff := cmp.Diff(want, gotScheduled, h.IgnoreIDOpt, cmpopts.EquateEmpty()); diff != "" {
t.Errorf("%s;\nmismatch found in %q; (-want,+got)\n%s", tc.desc, base.ScheduledKey(qname), diff)
}
gotScheduled := h.GetScheduledEntries(t, r)
if diff := cmp.Diff(tc.wantScheduled, gotScheduled, h.IgnoreIDOpt); diff != "" {
t.Errorf("%s;\nmismatch found in %q; (-want,+got)\n%s", tc.desc, base.ScheduledQueue, diff)
}
}
}
func TestClientEnqueue(t *testing.T) {
r := setup(t)
client := NewClient(getRedisConnOpt(t))
defer client.Close()
client := NewClient(RedisClientOpt{
Addr: redisAddr,
DB: redisDB,
})
task := NewTask("send_email", map[string]interface{}{"to": "customer@gmail.com", "from": "merchant@example.com"})
now := time.Now()
tests := []struct {
desc string
task *Task
opts []Option
wantRes *Result
wantPending map[string][]*base.TaskMessage
wantEnqueued map[string][]*base.TaskMessage
}{
{
desc: "Process task immediately with a custom retry count",
@@ -154,13 +141,12 @@ func TestClientEnqueue(t *testing.T) {
MaxRetry(3),
},
wantRes: &Result{
ProcessAt: now,
Queue: "default",
Retry: 3,
Timeout: defaultTimeout,
Deadline: noDeadline,
},
wantPending: map[string][]*base.TaskMessage{
wantEnqueued: map[string][]*base.TaskMessage{
"default": {
{
Type: task.Type,
@@ -180,13 +166,12 @@ func TestClientEnqueue(t *testing.T) {
MaxRetry(-2),
},
wantRes: &Result{
ProcessAt: now,
Queue: "default",
Retry: 0,
Timeout: defaultTimeout,
Deadline: noDeadline,
},
wantPending: map[string][]*base.TaskMessage{
wantEnqueued: map[string][]*base.TaskMessage{
"default": {
{
Type: task.Type,
@@ -207,13 +192,12 @@ func TestClientEnqueue(t *testing.T) {
MaxRetry(10),
},
wantRes: &Result{
ProcessAt: now,
Queue: "default",
Retry: 10,
Timeout: defaultTimeout,
Deadline: noDeadline,
},
wantPending: map[string][]*base.TaskMessage{
wantEnqueued: map[string][]*base.TaskMessage{
"default": {
{
Type: task.Type,
@@ -233,13 +217,12 @@ func TestClientEnqueue(t *testing.T) {
Queue("custom"),
},
wantRes: &Result{
ProcessAt: now,
Queue: "custom",
Retry: defaultMaxRetry,
Timeout: defaultTimeout,
Deadline: noDeadline,
},
wantPending: map[string][]*base.TaskMessage{
wantEnqueued: map[string][]*base.TaskMessage{
"custom": {
{
Type: task.Type,
@@ -259,13 +242,12 @@ func TestClientEnqueue(t *testing.T) {
Queue("HIGH"),
},
wantRes: &Result{
ProcessAt: now,
Queue: "high",
Retry: defaultMaxRetry,
Timeout: defaultTimeout,
Deadline: noDeadline,
},
wantPending: map[string][]*base.TaskMessage{
wantEnqueued: map[string][]*base.TaskMessage{
"high": {
{
Type: task.Type,
@@ -285,13 +267,12 @@ func TestClientEnqueue(t *testing.T) {
Timeout(20 * time.Second),
},
wantRes: &Result{
ProcessAt: now,
Queue: "default",
Retry: defaultMaxRetry,
Timeout: 20 * time.Second,
Deadline: noDeadline,
},
wantPending: map[string][]*base.TaskMessage{
wantEnqueued: map[string][]*base.TaskMessage{
"default": {
{
Type: task.Type,
@@ -311,13 +292,12 @@ func TestClientEnqueue(t *testing.T) {
Deadline(time.Date(2020, time.June, 24, 0, 0, 0, 0, time.UTC)),
},
wantRes: &Result{
ProcessAt: now,
Queue: "default",
Retry: defaultMaxRetry,
Timeout: noTimeout,
Deadline: time.Date(2020, time.June, 24, 0, 0, 0, 0, time.UTC),
},
wantPending: map[string][]*base.TaskMessage{
wantEnqueued: map[string][]*base.TaskMessage{
"default": {
{
Type: task.Type,
@@ -338,13 +318,12 @@ func TestClientEnqueue(t *testing.T) {
Deadline(time.Date(2020, time.June, 24, 0, 0, 0, 0, time.UTC)),
},
wantRes: &Result{
ProcessAt: now,
Queue: "default",
Retry: defaultMaxRetry,
Timeout: 20 * time.Second,
Deadline: time.Date(2020, time.June, 24, 0, 0, 0, 0, time.UTC),
},
wantPending: map[string][]*base.TaskMessage{
wantEnqueued: map[string][]*base.TaskMessage{
"default": {
{
Type: task.Type,
@@ -367,17 +346,13 @@ func TestClientEnqueue(t *testing.T) {
t.Error(err)
continue
}
cmpOptions := []cmp.Option{
cmpopts.IgnoreFields(Result{}, "ID", "EnqueuedAt"),
cmpopts.EquateApproxTime(500 * time.Millisecond),
}
if diff := cmp.Diff(tc.wantRes, gotRes, cmpOptions...); diff != "" {
if diff := cmp.Diff(tc.wantRes, gotRes, cmpopts.IgnoreFields(Result{}, "ID")); diff != "" {
t.Errorf("%s;\nEnqueue(task) returned %v, want %v; (-want,+got)\n%s",
tc.desc, gotRes, tc.wantRes, diff)
}
for qname, want := range tc.wantPending {
got := h.GetPendingMessages(t, r, qname)
for qname, want := range tc.wantEnqueued {
got := h.GetEnqueuedMessages(t, r, qname)
if diff := cmp.Diff(want, got, h.IgnoreIDOpt); diff != "" {
t.Errorf("%s;\nmismatch found in %q; (-want,+got)\n%s", tc.desc, base.QueueKey(qname), diff)
}
@@ -385,42 +360,39 @@ func TestClientEnqueue(t *testing.T) {
}
}
func TestClientEnqueueWithProcessInOption(t *testing.T) {
func TestClientEnqueueIn(t *testing.T) {
r := setup(t)
client := NewClient(getRedisConnOpt(t))
defer client.Close()
client := NewClient(RedisClientOpt{
Addr: redisAddr,
DB: redisDB,
})
task := NewTask("send_email", map[string]interface{}{"to": "customer@gmail.com", "from": "merchant@example.com"})
now := time.Now()
tests := []struct {
desc string
task *Task
delay time.Duration // value for ProcessIn option
opts []Option // other options
delay time.Duration
opts []Option
wantRes *Result
wantPending map[string][]*base.TaskMessage
wantScheduled map[string][]base.Z
wantEnqueued map[string][]*base.TaskMessage
wantScheduled []h.ZSetEntry
}{
{
desc: "schedule a task to be processed in one hour",
desc: "schedule a task to be enqueued in one hour",
task: task,
delay: 1 * time.Hour,
delay: time.Hour,
opts: []Option{},
wantRes: &Result{
ProcessAt: now.Add(1 * time.Hour),
Queue: "default",
Retry: defaultMaxRetry,
Timeout: defaultTimeout,
Deadline: noDeadline,
},
wantPending: map[string][]*base.TaskMessage{
"default": {},
},
wantScheduled: map[string][]base.Z{
"default": {
wantEnqueued: nil, // db is flushed in setup so list does not exist hence nil
wantScheduled: []h.ZSetEntry{
{
Message: &base.TaskMessage{
Msg: &base.TaskMessage{
Type: task.Type,
Payload: task.Payload.data,
Retry: defaultMaxRetry,
@@ -428,8 +400,7 @@ func TestClientEnqueueWithProcessInOption(t *testing.T) {
Timeout: int64(defaultTimeout.Seconds()),
Deadline: noDeadline.Unix(),
},
Score: time.Now().Add(time.Hour).Unix(),
},
Score: float64(time.Now().Add(time.Hour).Unix()),
},
},
},
@@ -439,13 +410,12 @@ func TestClientEnqueueWithProcessInOption(t *testing.T) {
delay: 0,
opts: []Option{},
wantRes: &Result{
ProcessAt: now,
Queue: "default",
Retry: defaultMaxRetry,
Timeout: defaultTimeout,
Deadline: noDeadline,
},
wantPending: map[string][]*base.TaskMessage{
wantEnqueued: map[string][]*base.TaskMessage{
"default": {
{
Type: task.Type,
@@ -457,72 +427,33 @@ func TestClientEnqueueWithProcessInOption(t *testing.T) {
},
},
},
wantScheduled: map[string][]base.Z{
"default": {},
},
wantScheduled: nil, // db is flushed in setup so zset does not exist hence nil
},
}
for _, tc := range tests {
h.FlushDB(t, r) // clean up db before each test case.
opts := append(tc.opts, ProcessIn(tc.delay))
gotRes, err := client.Enqueue(tc.task, opts...)
gotRes, err := client.EnqueueIn(tc.delay, tc.task, tc.opts...)
if err != nil {
t.Error(err)
continue
}
cmpOptions := []cmp.Option{
cmpopts.IgnoreFields(Result{}, "ID", "EnqueuedAt"),
cmpopts.EquateApproxTime(500 * time.Millisecond),
}
if diff := cmp.Diff(tc.wantRes, gotRes, cmpOptions...); diff != "" {
t.Errorf("%s;\nEnqueue(task, ProcessIn(%v)) returned %v, want %v; (-want,+got)\n%s",
tc.desc, tc.delay, gotRes, tc.wantRes, diff)
if diff := cmp.Diff(tc.wantRes, gotRes, cmpopts.IgnoreFields(Result{}, "ID")); diff != "" {
t.Errorf("%s;\nEnqueueIn(delay, task) returned %v, want %v; (-want,+got)\n%s",
tc.desc, gotRes, tc.wantRes, diff)
}
for qname, want := range tc.wantPending {
gotPending := h.GetPendingMessages(t, r, qname)
if diff := cmp.Diff(want, gotPending, h.IgnoreIDOpt, cmpopts.EquateEmpty()); diff != "" {
for qname, want := range tc.wantEnqueued {
gotEnqueued := h.GetEnqueuedMessages(t, r, qname)
if diff := cmp.Diff(want, gotEnqueued, h.IgnoreIDOpt); diff != "" {
t.Errorf("%s;\nmismatch found in %q; (-want,+got)\n%s", tc.desc, base.QueueKey(qname), diff)
}
}
for qname, want := range tc.wantScheduled {
gotScheduled := h.GetScheduledEntries(t, r, qname)
if diff := cmp.Diff(want, gotScheduled, h.IgnoreIDOpt, cmpopts.EquateEmpty()); diff != "" {
t.Errorf("%s;\nmismatch found in %q; (-want,+got)\n%s", tc.desc, base.ScheduledKey(qname), diff)
}
}
}
}
func TestClientEnqueueError(t *testing.T) {
r := setup(t)
client := NewClient(getRedisConnOpt(t))
defer client.Close()
task := NewTask("send_email", map[string]interface{}{"to": "customer@gmail.com", "from": "merchant@example.com"})
tests := []struct {
desc string
task *Task
opts []Option
}{
{
desc: "With empty queue name",
task: task,
opts: []Option{
Queue(""),
},
},
}
for _, tc := range tests {
h.FlushDB(t, r)
_, err := client.Enqueue(tc.task, tc.opts...)
if err == nil {
t.Errorf("%s; client.Enqueue(task, opts...) did not return non-nil error", tc.desc)
gotScheduled := h.GetScheduledEntries(t, r)
if diff := cmp.Diff(tc.wantScheduled, gotScheduled, h.IgnoreIDOpt); diff != "" {
t.Errorf("%s;\nmismatch found in %q; (-want,+got)\n%s", tc.desc, base.ScheduledQueue, diff)
}
}
}
@@ -530,8 +461,6 @@ func TestClientEnqueueError(t *testing.T) {
func TestClientDefaultOptions(t *testing.T) {
r := setup(t)
now := time.Now()
tests := []struct {
desc string
defaultOpts []Option // options set at the client level.
@@ -547,7 +476,6 @@ func TestClientDefaultOptions(t *testing.T) {
opts: []Option{},
task: NewTask("feed:import", nil),
wantRes: &Result{
ProcessAt: now,
Queue: "feed",
Retry: defaultMaxRetry,
Timeout: defaultTimeout,
@@ -569,7 +497,6 @@ func TestClientDefaultOptions(t *testing.T) {
opts: []Option{},
task: NewTask("feed:import", nil),
wantRes: &Result{
ProcessAt: now,
Queue: "feed",
Retry: 5,
Timeout: defaultTimeout,
@@ -591,7 +518,6 @@ func TestClientDefaultOptions(t *testing.T) {
opts: []Option{Queue("critical")},
task: NewTask("feed:import", nil),
wantRes: &Result{
ProcessAt: now,
Queue: "critical",
Retry: 5,
Timeout: defaultTimeout,
@@ -611,39 +537,102 @@ func TestClientDefaultOptions(t *testing.T) {
for _, tc := range tests {
h.FlushDB(t, r)
c := NewClient(getRedisConnOpt(t))
defer c.Close()
c := NewClient(RedisClientOpt{Addr: redisAddr, DB: redisDB})
c.SetDefaultOptions(tc.task.Type, tc.defaultOpts...)
gotRes, err := c.Enqueue(tc.task, tc.opts...)
if err != nil {
t.Fatal(err)
}
cmpOptions := []cmp.Option{
cmpopts.IgnoreFields(Result{}, "ID", "EnqueuedAt"),
cmpopts.EquateApproxTime(500 * time.Millisecond),
}
if diff := cmp.Diff(tc.wantRes, gotRes, cmpOptions...); diff != "" {
if diff := cmp.Diff(tc.wantRes, gotRes, cmpopts.IgnoreFields(Result{}, "ID")); diff != "" {
t.Errorf("%s;\nEnqueue(task, opts...) returned %v, want %v; (-want,+got)\n%s",
tc.desc, gotRes, tc.wantRes, diff)
}
pending := h.GetPendingMessages(t, r, tc.queue)
if len(pending) != 1 {
enqueued := h.GetEnqueuedMessages(t, r, tc.queue)
if len(enqueued) != 1 {
t.Errorf("%s;\nexpected queue %q to have one message; got %d messages in the queue.",
tc.desc, tc.queue, len(pending))
tc.desc, tc.queue, len(enqueued))
continue
}
got := pending[0]
got := enqueued[0]
if diff := cmp.Diff(tc.want, got, h.IgnoreIDOpt); diff != "" {
t.Errorf("%s;\nmismatch found in pending task message; (-want,+got)\n%s",
t.Errorf("%s;\nmismatch found in enqueued task message; (-want,+got)\n%s",
tc.desc, diff)
}
}
}
func TestClientEnqueueUnique(t *testing.T) {
func TestUniqueKey(t *testing.T) {
tests := []struct {
desc string
task *Task
ttl time.Duration
qname string
want string
}{
{
"with zero TTL",
NewTask("email:send", map[string]interface{}{"a": 123, "b": "hello", "c": true}),
0,
"default",
"",
},
{
"with primitive types",
NewTask("email:send", map[string]interface{}{"a": 123, "b": "hello", "c": true}),
10 * time.Minute,
"default",
"email:send:a=123,b=hello,c=true:default",
},
{
"with unsorted keys",
NewTask("email:send", map[string]interface{}{"b": "hello", "c": true, "a": 123}),
10 * time.Minute,
"default",
"email:send:a=123,b=hello,c=true:default",
},
{
"with composite types",
NewTask("email:send",
map[string]interface{}{
"address": map[string]string{"line": "123 Main St", "city": "Boston", "state": "MA"},
"names": []string{"bob", "mike", "rob"}}),
10 * time.Minute,
"default",
"email:send:address=map[city:Boston line:123 Main St state:MA],names=[bob mike rob]:default",
},
{
"with complex types",
NewTask("email:send",
map[string]interface{}{
"time": time.Date(2020, time.July, 28, 0, 0, 0, 0, time.UTC),
"duration": time.Hour}),
10 * time.Minute,
"default",
"email:send:duration=1h0m0s,time=2020-07-28 00:00:00 +0000 UTC:default",
},
{
"with nil payload",
NewTask("reindex", nil),
10 * time.Minute,
"default",
"reindex:nil:default",
},
}
for _, tc := range tests {
got := uniqueKey(tc.task, tc.ttl, tc.qname)
if got != tc.want {
t.Errorf("%s: uniqueKey(%v, %v, %q) = %q, want %q", tc.desc, tc.task, tc.ttl, tc.qname, got, tc.want)
}
}
}
func TestEnqueueUnique(t *testing.T) {
r := setup(t)
c := NewClient(getRedisConnOpt(t))
defer c.Close()
c := NewClient(RedisClientOpt{
Addr: redisAddr,
DB: redisDB,
})
tests := []struct {
task *Task
@@ -664,7 +653,7 @@ func TestClientEnqueueUnique(t *testing.T) {
t.Fatal(err)
}
gotTTL := r.TTL(base.UniqueKey(base.DefaultQueueName, tc.task.Type, tc.task.Payload.data)).Val()
gotTTL := r.TTL(uniqueKey(tc.task, tc.ttl, base.DefaultQueueName)).Val()
if !cmp.Equal(tc.ttl.Seconds(), gotTTL.Seconds(), cmpopts.EquateApprox(0, 1)) {
t.Errorf("TTL = %v, want %v", gotTTL, tc.ttl)
continue
@@ -683,10 +672,12 @@ func TestClientEnqueueUnique(t *testing.T) {
}
}
func TestClientEnqueueUniqueWithProcessInOption(t *testing.T) {
func TestEnqueueInUnique(t *testing.T) {
r := setup(t)
c := NewClient(getRedisConnOpt(t))
defer c.Close()
c := NewClient(RedisClientOpt{
Addr: redisAddr,
DB: redisDB,
})
tests := []struct {
task *Task
@@ -704,12 +695,12 @@ func TestClientEnqueueUniqueWithProcessInOption(t *testing.T) {
h.FlushDB(t, r) // clean up db before each test case.
// Enqueue the task first. It should succeed.
_, err := c.Enqueue(tc.task, ProcessIn(tc.d), Unique(tc.ttl))
_, err := c.EnqueueIn(tc.d, tc.task, Unique(tc.ttl))
if err != nil {
t.Fatal(err)
}
gotTTL := r.TTL(base.UniqueKey(base.DefaultQueueName, tc.task.Type, tc.task.Payload.data)).Val()
gotTTL := r.TTL(uniqueKey(tc.task, tc.ttl, base.DefaultQueueName)).Val()
wantTTL := time.Duration(tc.ttl.Seconds()+tc.d.Seconds()) * time.Second
if !cmp.Equal(wantTTL.Seconds(), gotTTL.Seconds(), cmpopts.EquateApprox(0, 1)) {
t.Errorf("TTL = %v, want %v", gotTTL, wantTTL)
@@ -717,7 +708,7 @@ func TestClientEnqueueUniqueWithProcessInOption(t *testing.T) {
}
// Enqueue the task again. It should fail.
_, err = c.Enqueue(tc.task, ProcessIn(tc.d), Unique(tc.ttl))
_, err = c.EnqueueIn(tc.d, tc.task, Unique(tc.ttl))
if err == nil {
t.Errorf("Enqueueing %+v did not return an error", tc.task)
continue
@@ -729,10 +720,12 @@ func TestClientEnqueueUniqueWithProcessInOption(t *testing.T) {
}
}
func TestClientEnqueueUniqueWithProcessAtOption(t *testing.T) {
func TestEnqueueAtUnique(t *testing.T) {
r := setup(t)
c := NewClient(getRedisConnOpt(t))
defer c.Close()
c := NewClient(RedisClientOpt{
Addr: redisAddr,
DB: redisDB,
})
tests := []struct {
task *Task
@@ -750,12 +743,12 @@ func TestClientEnqueueUniqueWithProcessAtOption(t *testing.T) {
h.FlushDB(t, r) // clean up db before each test case.
// Enqueue the task first. It should succeed.
_, err := c.Enqueue(tc.task, ProcessAt(tc.at), Unique(tc.ttl))
_, err := c.EnqueueAt(tc.at, tc.task, Unique(tc.ttl))
if err != nil {
t.Fatal(err)
}
gotTTL := r.TTL(base.UniqueKey(base.DefaultQueueName, tc.task.Type, tc.task.Payload.data)).Val()
gotTTL := r.TTL(uniqueKey(tc.task, tc.ttl, base.DefaultQueueName)).Val()
wantTTL := tc.at.Add(tc.ttl).Sub(time.Now())
if !cmp.Equal(wantTTL.Seconds(), gotTTL.Seconds(), cmpopts.EquateApprox(0, 1)) {
t.Errorf("TTL = %v, want %v", gotTTL, wantTTL)
@@ -763,7 +756,7 @@ func TestClientEnqueueUniqueWithProcessAtOption(t *testing.T) {
}
// Enqueue the task again. It should fail.
_, err = c.Enqueue(tc.task, ProcessAt(tc.at), Unique(tc.ttl))
_, err = c.EnqueueAt(tc.at, tc.task, Unique(tc.ttl))
if err == nil {
t.Errorf("Enqueueing %+v did not return an error", tc.task)
continue
@@ -774,4 +767,3 @@ func TestClientEnqueueUniqueWithProcessAtOption(t *testing.T) {
}
}
}

View File

@@ -16,7 +16,6 @@ type taskMetadata struct {
id string
maxRetry int
retryCount int
qname string
}
// ctxKey type is unexported to prevent collisions with context keys defined in
@@ -33,7 +32,6 @@ func createContext(msg *base.TaskMessage, deadline time.Time) (context.Context,
id: msg.ID.String(),
maxRetry: msg.Retry,
retryCount: msg.Retried,
qname: msg.Queue,
}
ctx := context.WithValue(context.Background(), metadataCtxKey, metadata)
return context.WithDeadline(ctx, deadline)
@@ -74,14 +72,3 @@ func GetMaxRetry(ctx context.Context) (n int, ok bool) {
}
return metadata.maxRetry, true
}
// GetQueueName extracts queue name from a context, if any.
//
// Return value qname indicates which queue the task was pulled from.
func GetQueueName(ctx context.Context) (qname string, ok bool) {
metadata, ok := ctx.Value(metadataCtxKey).(taskMetadata)
if !ok {
return "", false
}
return metadata.qname, true
}

View File

@@ -92,9 +92,8 @@ func TestGetTaskMetadataFromContext(t *testing.T) {
desc string
msg *base.TaskMessage
}{
{"with zero retried message", &base.TaskMessage{Type: "something", ID: uuid.New(), Retry: 25, Retried: 0, Timeout: 1800, Queue: "default"}},
{"with non-zero retried message", &base.TaskMessage{Type: "something", ID: uuid.New(), Retry: 10, Retried: 5, Timeout: 1800, Queue: "default"}},
{"with custom queue name", &base.TaskMessage{Type: "something", ID: uuid.New(), Retry: 25, Retried: 0, Timeout: 1800, Queue: "custom"}},
{"with zero retried message", &base.TaskMessage{Type: "something", ID: uuid.New(), Retry: 25, Retried: 0, Timeout: 1800}},
{"with non-zero retried message", &base.TaskMessage{Type: "something", ID: uuid.New(), Retry: 10, Retried: 5, Timeout: 1800}},
}
for _, tc := range tests {
@@ -124,14 +123,6 @@ func TestGetTaskMetadataFromContext(t *testing.T) {
if ok && maxRetry != tc.msg.Retry {
t.Errorf("%s: GetMaxRetry(ctx) returned n == %d want %d", tc.desc, maxRetry, tc.msg.Retry)
}
qname, ok := GetQueueName(ctx)
if !ok {
t.Errorf("%s: GetQueueName(ctx) returned ok == false", tc.desc)
}
if ok && qname != tc.msg.Queue {
t.Errorf("%s: GetQueueName(ctx) returned qname == %q, want %q", tc.desc, qname, tc.msg.Queue)
}
}
}
@@ -153,8 +144,5 @@ func TestGetTaskMetadataFromContextError(t *testing.T) {
if _, ok := GetMaxRetry(tc.ctx); ok {
t.Errorf("%s: GetMaxRetry(ctx) returned ok == true", tc.desc)
}
if _, ok := GetQueueName(tc.ctx); ok {
t.Errorf("%s: GetQueueName(ctx) returned ok == true", tc.desc)
}
}
}

24
doc.go
View File

@@ -3,23 +3,23 @@
// that can be found in the LICENSE file.
/*
Package asynq provides a framework for Redis based distrubted task queue.
Package asynq provides a framework for asynchronous task processing.
Asynq uses Redis as a message broker. To connect to redis,
specify the connection using one of RedisConnOpt types.
Asynq uses Redis as a message broker. To connect to redis server,
specify the options using one of RedisConnOpt types.
redisConnOpt = asynq.RedisClientOpt{
redis = &asynq.RedisClientOpt{
Addr: "127.0.0.1:6379",
Password: "xxxxx",
DB: 3,
}
The Client is used to enqueue a task.
The Client is used to enqueue a task to be processed at the specified time.
Task is created with two parameters: its type and payload.
client := asynq.NewClient(redisConnOpt)
client := asynq.NewClient(redis)
// Task is created with two parameters: its type and payload.
t := asynq.NewTask(
"send_email",
map[string]interface{}{"user_id": 42})
@@ -28,17 +28,15 @@ The Client is used to enqueue a task.
res, err := client.Enqueue(t)
// Schedule the task to be processed after one minute.
res, err = client.Enqueue(t, asynq.ProcessIn(1*time.Minute))
res, err = client.EnqueueIn(time.Minute, t)
The Server is used to run the task processing workers with a given
The Server is used to run the background task processing with a given
handler.
srv := asynq.NewServer(redisConnOpt, asynq.Config{
srv := asynq.NewServer(redis, asynq.Config{
Concurrency: 10,
})
if err := srv.Run(handler); err != nil {
log.Fatal(err)
}
srv.Run(handler)
Handler is an interface type with a method which
takes a task and returns an error. Handler should return nil if

Binary file not shown.

Before

Width:  |  Height:  |  Size: 60 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 329 KiB

After

Width:  |  Height:  |  Size: 983 KiB

View File

@@ -9,7 +9,6 @@ import (
"log"
"os"
"os/signal"
"time"
"github.com/hibiken/asynq"
"golang.org/x/sys/unix"
@@ -79,25 +78,6 @@ func ExampleServer_Quiet() {
srv.Stop()
}
func ExampleScheduler() {
scheduler := asynq.NewScheduler(
asynq.RedisClientOpt{Addr: ":6379"},
&asynq.SchedulerOpts{Location: time.Local},
)
if _, err := scheduler.Register("* * * * *", asynq.NewTask("task1", nil)); err != nil {
log.Fatal(err)
}
if _, err := scheduler.Register("@every 30s", asynq.NewTask("task2", nil)); err != nil {
log.Fatal(err)
}
// Run blocks and waits for os signal to terminate the program.
if err := scheduler.Run(); err != nil {
log.Fatal(err)
}
}
func ExampleParseRedisURI() {
rconn, err := asynq.ParseRedisURI("redis://localhost:6379/10")
if err != nil {

View File

@@ -1,75 +0,0 @@
// Copyright 2020 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
package asynq
import (
"sync"
"time"
"github.com/hibiken/asynq/internal/base"
"github.com/hibiken/asynq/internal/log"
)
// A forwarder is responsible for moving scheduled and retry tasks to pending state
// so that the tasks get processed by the workers.
type forwarder struct {
logger *log.Logger
broker base.Broker
// channel to communicate back to the long running "forwarder" goroutine.
done chan struct{}
// list of queue names to check and enqueue.
queues []string
// poll interval on average
avgInterval time.Duration
}
type forwarderParams struct {
logger *log.Logger
broker base.Broker
queues []string
interval time.Duration
}
func newForwarder(params forwarderParams) *forwarder {
return &forwarder{
logger: params.logger,
broker: params.broker,
done: make(chan struct{}),
queues: params.queues,
avgInterval: params.interval,
}
}
func (f *forwarder) terminate() {
f.logger.Debug("Forwarder shutting down...")
// Signal the forwarder goroutine to stop polling.
f.done <- struct{}{}
}
// start starts the "forwarder" goroutine.
func (f *forwarder) start(wg *sync.WaitGroup) {
wg.Add(1)
go func() {
defer wg.Done()
for {
select {
case <-f.done:
f.logger.Debug("Forwarder done")
return
case <-time.After(f.avgInterval):
f.exec()
}
}
}()
}
func (f *forwarder) exec() {
if err := f.broker.CheckAndEnqueue(f.queues...); err != nil {
f.logger.Errorf("Could not enqueue scheduled tasks: %v", err)
}
}

View File

@@ -1,137 +0,0 @@
// Copyright 2020 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
package asynq
import (
"sync"
"testing"
"time"
"github.com/google/go-cmp/cmp"
h "github.com/hibiken/asynq/internal/asynqtest"
"github.com/hibiken/asynq/internal/base"
"github.com/hibiken/asynq/internal/rdb"
)
func TestForwarder(t *testing.T) {
r := setup(t)
defer r.Close()
rdbClient := rdb.NewRDB(r)
const pollInterval = time.Second
s := newForwarder(forwarderParams{
logger: testLogger,
broker: rdbClient,
queues: []string{"default", "critical"},
interval: pollInterval,
})
t1 := h.NewTaskMessageWithQueue("gen_thumbnail", nil, "default")
t2 := h.NewTaskMessageWithQueue("send_email", nil, "critical")
t3 := h.NewTaskMessageWithQueue("reindex", nil, "default")
t4 := h.NewTaskMessageWithQueue("sync", nil, "critical")
now := time.Now()
tests := []struct {
initScheduled map[string][]base.Z // scheduled queue initial state
initRetry map[string][]base.Z // retry queue initial state
initPending map[string][]*base.TaskMessage // default queue initial state
wait time.Duration // wait duration before checking for final state
wantScheduled map[string][]*base.TaskMessage // schedule queue final state
wantRetry map[string][]*base.TaskMessage // retry queue final state
wantPending map[string][]*base.TaskMessage // default queue final state
}{
{
initScheduled: map[string][]base.Z{
"default": {{Message: t1, Score: now.Add(time.Hour).Unix()}},
"critical": {{Message: t2, Score: now.Add(-2 * time.Second).Unix()}},
},
initRetry: map[string][]base.Z{
"default": {{Message: t3, Score: time.Now().Add(-500 * time.Millisecond).Unix()}},
"critical": {},
},
initPending: map[string][]*base.TaskMessage{
"default": {},
"critical": {t4},
},
wait: pollInterval * 2,
wantScheduled: map[string][]*base.TaskMessage{
"default": {t1},
"critical": {},
},
wantRetry: map[string][]*base.TaskMessage{
"default": {},
"critical": {},
},
wantPending: map[string][]*base.TaskMessage{
"default": {t3},
"critical": {t2, t4},
},
},
{
initScheduled: map[string][]base.Z{
"default": {
{Message: t1, Score: now.Unix()},
{Message: t3, Score: now.Add(-500 * time.Millisecond).Unix()},
},
"critical": {
{Message: t2, Score: now.Add(-2 * time.Second).Unix()},
},
},
initRetry: map[string][]base.Z{
"default": {},
"critical": {},
},
initPending: map[string][]*base.TaskMessage{
"default": {},
"critical": {t4},
},
wait: pollInterval * 2,
wantScheduled: map[string][]*base.TaskMessage{
"default": {},
"critical": {},
},
wantRetry: map[string][]*base.TaskMessage{
"default": {},
"critical": {},
},
wantPending: map[string][]*base.TaskMessage{
"default": {t1, t3},
"critical": {t2, t4},
},
},
}
for _, tc := range tests {
h.FlushDB(t, r) // clean up db before each test case.
h.SeedAllScheduledQueues(t, r, tc.initScheduled) // initialize scheduled queue
h.SeedAllRetryQueues(t, r, tc.initRetry) // initialize retry queue
h.SeedAllPendingQueues(t, r, tc.initPending) // initialize default queue
var wg sync.WaitGroup
s.start(&wg)
time.Sleep(tc.wait)
s.terminate()
for qname, want := range tc.wantScheduled {
gotScheduled := h.GetScheduledMessages(t, r, qname)
if diff := cmp.Diff(want, gotScheduled, h.SortMsgOpt); diff != "" {
t.Errorf("mismatch found in %q after running forwarder: (-want, +got)\n%s", base.ScheduledKey(qname), diff)
}
}
for qname, want := range tc.wantRetry {
gotRetry := h.GetRetryMessages(t, r, qname)
if diff := cmp.Diff(want, gotRetry, h.SortMsgOpt); diff != "" {
t.Errorf("mismatch found in %q after running forwarder: (-want, +got)\n%s", base.RetryKey(qname), diff)
}
}
for qname, want := range tc.wantPending {
gotPending := h.GetPendingMessages(t, r, qname)
if diff := cmp.Diff(want, gotPending, h.SortMsgOpt); diff != "" {
t.Errorf("mismatch found in %q after running forwarder: (-want, +got)\n%s", base.QueueKey(qname), diff)
}
}
}
}

3
go.mod
View File

@@ -3,10 +3,9 @@ module github.com/hibiken/asynq
go 1.13
require (
github.com/go-redis/redis/v7 v7.4.0
github.com/go-redis/redis/v7 v7.2.0
github.com/google/go-cmp v0.4.0
github.com/google/uuid v1.1.1
github.com/robfig/cron/v3 v3.0.1
github.com/spf13/cast v1.3.1
go.uber.org/goleak v0.10.0
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e

4
go.sum
View File

@@ -4,8 +4,6 @@ github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/go-redis/redis/v7 v7.2.0 h1:CrCexy/jYWZjW0AyVoHlcJUeZN19VWlbepTh1Vq6dJs=
github.com/go-redis/redis/v7 v7.2.0/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRfnwdHj/Dcg=
github.com/go-redis/redis/v7 v7.4.0 h1:7obg6wUoj05T0EpY0o8B59S9w5yeMWql7sw2kwNW1x4=
github.com/go-redis/redis/v7 v7.4.0/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRfnwdHj/Dcg=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
@@ -27,8 +25,6 @@ github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng=
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=

View File

@@ -1,80 +0,0 @@
// Copyright 2020 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
package asynq
import (
"sync"
"time"
"github.com/hibiken/asynq/internal/base"
"github.com/hibiken/asynq/internal/log"
)
// healthchecker is responsible for pinging broker periodically
// and call user provided HeathCheckFunc with the ping result.
type healthchecker struct {
logger *log.Logger
broker base.Broker
// channel to communicate back to the long running "healthchecker" goroutine.
done chan struct{}
// interval between healthchecks.
interval time.Duration
// function to call periodically.
healthcheckFunc func(error)
}
type healthcheckerParams struct {
logger *log.Logger
broker base.Broker
interval time.Duration
healthcheckFunc func(error)
}
func newHealthChecker(params healthcheckerParams) *healthchecker {
return &healthchecker{
logger: params.logger,
broker: params.broker,
done: make(chan struct{}),
interval: params.interval,
healthcheckFunc: params.healthcheckFunc,
}
}
func (hc *healthchecker) terminate() {
if hc.healthcheckFunc == nil {
return
}
hc.logger.Debug("Healthchecker shutting down...")
// Signal the healthchecker goroutine to stop.
hc.done <- struct{}{}
}
func (hc *healthchecker) start(wg *sync.WaitGroup) {
if hc.healthcheckFunc == nil {
return
}
wg.Add(1)
go func() {
defer wg.Done()
timer := time.NewTimer(hc.interval)
for {
select {
case <-hc.done:
hc.logger.Debug("Healthchecker done")
timer.Stop()
return
case <-timer.C:
err := hc.broker.Ping()
hc.healthcheckFunc(err)
timer.Reset(hc.interval)
}
}
}()
}

View File

@@ -1,103 +0,0 @@
// Copyright 2020 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
package asynq
import (
"sync"
"testing"
"time"
"github.com/hibiken/asynq/internal/rdb"
"github.com/hibiken/asynq/internal/testbroker"
)
func TestHealthChecker(t *testing.T) {
r := setup(t)
defer r.Close()
rdbClient := rdb.NewRDB(r)
var (
// mu guards called and e variables.
mu sync.Mutex
called int
e error
)
checkFn := func(err error) {
mu.Lock()
defer mu.Unlock()
called++
e = err
}
hc := newHealthChecker(healthcheckerParams{
logger: testLogger,
broker: rdbClient,
interval: 1 * time.Second,
healthcheckFunc: checkFn,
})
hc.start(&sync.WaitGroup{})
time.Sleep(2 * time.Second)
mu.Lock()
if called == 0 {
t.Errorf("Healthchecker did not call the provided HealthCheckFunc")
}
if e != nil {
t.Errorf("HealthCheckFunc was called with non-nil error: %v", e)
}
mu.Unlock()
hc.terminate()
}
func TestHealthCheckerWhenRedisDown(t *testing.T) {
// Make sure that healthchecker goroutine doesn't panic
// if it cannot connect to redis.
defer func() {
if r := recover(); r != nil {
t.Errorf("panic occurred: %v", r)
}
}()
r := rdb.NewRDB(setup(t))
defer r.Close()
testBroker := testbroker.NewTestBroker(r)
var (
// mu guards called and e variables.
mu sync.Mutex
called int
e error
)
checkFn := func(err error) {
mu.Lock()
defer mu.Unlock()
called++
e = err
}
hc := newHealthChecker(healthcheckerParams{
logger: testLogger,
broker: testBroker,
interval: 1 * time.Second,
healthcheckFunc: checkFn,
})
testBroker.Sleep()
hc.start(&sync.WaitGroup{})
time.Sleep(2 * time.Second)
mu.Lock()
if called == 0 {
t.Errorf("Healthchecker did not call the provided HealthCheckFunc")
}
if e == nil {
t.Errorf("HealthCheckFunc was called with nil; want non-nil error")
}
mu.Unlock()
hc.terminate()
}

View File

@@ -38,13 +38,13 @@ type heartbeater struct {
// heartbeater goroutine. In other words, confine these variables
// to this goroutine only.
started time.Time
workers map[string]*workerInfo
workers map[string]workerStat
// status is shared with other goroutine but is concurrency safe.
status *base.ServerStatus
// channels to receive updates on active workers.
starting <-chan *workerInfo
starting <-chan *base.TaskMessage
finished <-chan *base.TaskMessage
}
@@ -56,7 +56,7 @@ type heartbeaterParams struct {
queues map[string]int
strictPriority bool
status *base.ServerStatus
starting <-chan *workerInfo
starting <-chan *base.TaskMessage
finished <-chan *base.TaskMessage
}
@@ -80,7 +80,7 @@ func newHeartbeater(params heartbeaterParams) *heartbeater {
strictPriority: params.strictPriority,
status: params.status,
workers: make(map[string]*workerInfo),
workers: make(map[string]workerStat),
starting: params.starting,
finished: params.finished,
}
@@ -92,14 +92,11 @@ func (h *heartbeater) terminate() {
h.done <- struct{}{}
}
// A workerInfo holds an active worker information.
type workerInfo struct {
// the task message the worker is processing.
msg *base.TaskMessage
// the time the worker has started processing the message.
// A workerStat records the message a worker is working on
// and the time the worker has started processing the message.
type workerStat struct {
started time.Time
// deadline the worker has to finish processing the task by.
deadline time.Time
msg *base.TaskMessage
}
func (h *heartbeater) start(wg *sync.WaitGroup) {
@@ -124,8 +121,8 @@ func (h *heartbeater) start(wg *sync.WaitGroup) {
h.beat()
timer.Reset(h.interval)
case w := <-h.starting:
h.workers[w.msg.ID.String()] = w
case msg := <-h.starting:
h.workers[msg.ID.String()] = workerStat{time.Now(), msg}
case msg := <-h.finished:
delete(h.workers, msg.ID.String())
@@ -148,17 +145,15 @@ func (h *heartbeater) beat() {
}
var ws []*base.WorkerInfo
for id, w := range h.workers {
for id, stat := range h.workers {
ws = append(ws, &base.WorkerInfo{
Host: h.host,
PID: h.pid,
ServerID: h.serverID,
ID: id,
Type: w.msg.Type,
Queue: w.msg.Queue,
Payload: w.msg.Payload,
Started: w.started,
Deadline: w.deadline,
Type: stat.msg.Type,
Queue: stat.msg.Queue,
Payload: stat.msg.Payload,
Started: stat.started,
})
}

View File

@@ -19,7 +19,6 @@ import (
func TestHeartbeater(t *testing.T) {
r := setup(t)
defer r.Close()
rdbClient := rdb.NewRDB(r)
tests := []struct {
@@ -29,7 +28,7 @@ func TestHeartbeater(t *testing.T) {
queues map[string]int
concurrency int
}{
{2 * time.Second, "localhost", 45678, map[string]int{"default": 1}, 10},
{time.Second, "localhost", 45678, map[string]int{"default": 1}, 10},
}
timeCmpOpt := cmpopts.EquateApproxTime(10 * time.Millisecond)
@@ -47,7 +46,7 @@ func TestHeartbeater(t *testing.T) {
queues: tc.queues,
strictPriority: false,
status: status,
starting: make(chan *workerInfo),
starting: make(chan *base.TaskMessage),
finished: make(chan *base.TaskMessage),
})
@@ -69,7 +68,7 @@ func TestHeartbeater(t *testing.T) {
}
// allow for heartbeater to write to redis
time.Sleep(tc.interval)
time.Sleep(tc.interval * 2)
ss, err := rdbClient.ListServers()
if err != nil {
@@ -129,7 +128,6 @@ func TestHeartbeaterWithRedisDown(t *testing.T) {
}
}()
r := rdb.NewRDB(setup(t))
defer r.Close()
testBroker := testbroker.NewTestBroker(r)
hb := newHeartbeater(heartbeaterParams{
logger: testLogger,
@@ -139,7 +137,7 @@ func TestHeartbeaterWithRedisDown(t *testing.T) {
queues: map[string]int{"default": 1},
strictPriority: false,
status: base.NewServerStatus(base.StatusRunning),
starting: make(chan *workerInfo),
starting: make(chan *base.TaskMessage),
finished: make(chan *base.TaskMessage),
})

View File

@@ -1,22 +0,0 @@
// Copyright 2020 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
/*
Package inspeq provides helper types and functions to inspect queues and tasks managed by Asynq.
Inspector is used to query and mutate the state of queues and tasks.
Example:
inspector := inspeq.New(asynq.RedisClientOpt{Addr: "localhost:6379"})
tasks, err := inspector.ListArchivedTasks("my-queue")
for _, t := range tasks {
if err := inspector.DeleteTaskByKey(t.Key()); err != nil {
// handle error
}
}
*/
package inspeq

View File

@@ -1,956 +0,0 @@
// Copyright 2020 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
package inspeq
import (
"fmt"
"strconv"
"strings"
"time"
"github.com/go-redis/redis/v7"
"github.com/google/uuid"
"github.com/hibiken/asynq"
"github.com/hibiken/asynq/internal/base"
"github.com/hibiken/asynq/internal/rdb"
)
// Inspector is a client interface to inspect and mutate the state of
// queues and tasks.
type Inspector struct {
rdb *rdb.RDB
}
// New returns a new instance of Inspector.
func New(r asynq.RedisConnOpt) *Inspector {
c, ok := r.MakeRedisClient().(redis.UniversalClient)
if !ok {
panic(fmt.Sprintf("inspeq: unsupported RedisConnOpt type %T", r))
}
return &Inspector{
rdb: rdb.NewRDB(c),
}
}
// Close closes the connection with redis.
func (i *Inspector) Close() error {
return i.rdb.Close()
}
// Queues returns a list of all queue names.
func (i *Inspector) Queues() ([]string, error) {
return i.rdb.AllQueues()
}
// QueueStats represents a state of queues at a certain time.
type QueueStats struct {
// Name of the queue.
Queue string
// Total number of bytes that the queue and its tasks require to be stored in redis.
MemoryUsage int64
// Size is the total number of tasks in the queue.
// The value is the sum of Pending, Active, Scheduled, Retry, and Archived.
Size int
// Number of pending tasks.
Pending int
// Number of active tasks.
Active int
// Number of scheduled tasks.
Scheduled int
// Number of retry tasks.
Retry int
// Number of archived tasks.
Archived int
// Total number of tasks being processed during the given date.
// The number includes both succeeded and failed tasks.
Processed int
// Total number of tasks failed to be processed during the given date.
Failed int
// Paused indicates whether the queue is paused.
// If true, tasks in the queue will not be processed.
Paused bool
// Time when this stats was taken.
Timestamp time.Time
}
// CurrentStats returns a current stats of the given queue.
func (i *Inspector) CurrentStats(qname string) (*QueueStats, error) {
if err := base.ValidateQueueName(qname); err != nil {
return nil, err
}
stats, err := i.rdb.CurrentStats(qname)
if err != nil {
return nil, err
}
return &QueueStats{
Queue: stats.Queue,
MemoryUsage: stats.MemoryUsage,
Size: stats.Size,
Pending: stats.Pending,
Active: stats.Active,
Scheduled: stats.Scheduled,
Retry: stats.Retry,
Archived: stats.Archived,
Processed: stats.Processed,
Failed: stats.Failed,
Paused: stats.Paused,
Timestamp: stats.Timestamp,
}, nil
}
// DailyStats holds aggregate data for a given day for a given queue.
type DailyStats struct {
// Name of the queue.
Queue string
// Total number of tasks being processed during the given date.
// The number includes both succeeded and failed tasks.
Processed int
// Total number of tasks failed to be processed during the given date.
Failed int
// Date this stats was taken.
Date time.Time
}
// History returns a list of stats from the last n days.
func (i *Inspector) History(qname string, n int) ([]*DailyStats, error) {
if err := base.ValidateQueueName(qname); err != nil {
return nil, err
}
stats, err := i.rdb.HistoricalStats(qname, n)
if err != nil {
return nil, err
}
var res []*DailyStats
for _, s := range stats {
res = append(res, &DailyStats{
Queue: s.Queue,
Processed: s.Processed,
Failed: s.Failed,
Date: s.Time,
})
}
return res, nil
}
// ErrQueueNotFound indicates that the specified queue does not exist.
type ErrQueueNotFound struct {
qname string
}
func (e *ErrQueueNotFound) Error() string {
return fmt.Sprintf("queue %q does not exist", e.qname)
}
// ErrQueueNotEmpty indicates that the specified queue is not empty.
type ErrQueueNotEmpty struct {
qname string
}
func (e *ErrQueueNotEmpty) Error() string {
return fmt.Sprintf("queue %q is not empty", e.qname)
}
// DeleteQueue removes the specified queue.
//
// If force is set to true, DeleteQueue will remove the queue regardless of
// the queue size as long as no tasks are active in the queue.
// If force is set to false, DeleteQueue will remove the queue only if
// the queue is empty.
//
// If the specified queue does not exist, DeleteQueue returns ErrQueueNotFound.
// If force is set to false and the specified queue is not empty, DeleteQueue
// returns ErrQueueNotEmpty.
func (i *Inspector) DeleteQueue(qname string, force bool) error {
err := i.rdb.RemoveQueue(qname, force)
if _, ok := err.(*rdb.ErrQueueNotFound); ok {
return &ErrQueueNotFound{qname}
}
if _, ok := err.(*rdb.ErrQueueNotEmpty); ok {
return &ErrQueueNotEmpty{qname}
}
return err
}
// PendingTask is a task in a queue and is ready to be processed.
type PendingTask struct {
*asynq.Task
ID string
Queue string
MaxRetry int
Retried int
LastError string
}
// ActiveTask is a task that's currently being processed.
type ActiveTask struct {
*asynq.Task
ID string
Queue string
MaxRetry int
Retried int
LastError string
}
// ScheduledTask is a task scheduled to be processed in the future.
type ScheduledTask struct {
*asynq.Task
ID string
Queue string
MaxRetry int
Retried int
LastError string
NextProcessAt time.Time
score int64
}
// RetryTask is a task scheduled to be retried in the future.
type RetryTask struct {
*asynq.Task
ID string
Queue string
NextProcessAt time.Time
MaxRetry int
Retried int
LastError string
// TODO: LastFailedAt time.Time
score int64
}
// ArchivedTask is a task archived for debugging and inspection purposes, and
// it won't be retried automatically.
// A task can be archived when the task exhausts its retry counts or manually
// archived by a user via the CLI or Inspector.
type ArchivedTask struct {
*asynq.Task
ID string
Queue string
MaxRetry int
Retried int
LastFailedAt time.Time
LastError string
score int64
}
// Format string used for task key.
// Format is <prefix>:<uuid>:<score>.
const taskKeyFormat = "%s:%v:%v"
// Prefix used for task key.
const (
keyPrefixPending = "p"
keyPrefixScheduled = "s"
keyPrefixRetry = "r"
keyPrefixArchived = "a"
allKeyPrefixes = keyPrefixPending + keyPrefixScheduled + keyPrefixRetry + keyPrefixArchived
)
// Key returns a key used to delete, and archive the pending task.
func (t *PendingTask) Key() string {
// Note: Pending tasks are stored in redis LIST, therefore no score.
// Use zero for the score to use the same key format.
return fmt.Sprintf(taskKeyFormat, keyPrefixPending, t.ID, 0)
}
// Key returns a key used to delete, run, and archive the scheduled task.
func (t *ScheduledTask) Key() string {
return fmt.Sprintf(taskKeyFormat, keyPrefixScheduled, t.ID, t.score)
}
// Key returns a key used to delete, run, and archive the retry task.
func (t *RetryTask) Key() string {
return fmt.Sprintf(taskKeyFormat, keyPrefixRetry, t.ID, t.score)
}
// Key returns a key used to delete and run the archived task.
func (t *ArchivedTask) Key() string {
return fmt.Sprintf(taskKeyFormat, keyPrefixArchived, t.ID, t.score)
}
// parseTaskKey parses a key string and returns each part of key with proper
// type if valid, otherwise it reports an error.
func parseTaskKey(key string) (prefix string, id uuid.UUID, score int64, err error) {
parts := strings.Split(key, ":")
if len(parts) != 3 {
return "", uuid.Nil, 0, fmt.Errorf("invalid id")
}
id, err = uuid.Parse(parts[1])
if err != nil {
return "", uuid.Nil, 0, fmt.Errorf("invalid id")
}
score, err = strconv.ParseInt(parts[2], 10, 64)
if err != nil {
return "", uuid.Nil, 0, fmt.Errorf("invalid id")
}
prefix = parts[0]
if len(prefix) != 1 || !strings.Contains(allKeyPrefixes, prefix) {
return "", uuid.Nil, 0, fmt.Errorf("invalid id")
}
return prefix, id, score, nil
}
// ListOption specifies behavior of list operation.
type ListOption interface{}
// Internal list option representations.
type (
pageSizeOpt int
pageNumOpt int
)
type listOption struct {
pageSize int
pageNum int
}
const (
// Page size used by default in list operation.
defaultPageSize = 30
// Page number used by default in list operation.
defaultPageNum = 1
)
func composeListOptions(opts ...ListOption) listOption {
res := listOption{
pageSize: defaultPageSize,
pageNum: defaultPageNum,
}
for _, opt := range opts {
switch opt := opt.(type) {
case pageSizeOpt:
res.pageSize = int(opt)
case pageNumOpt:
res.pageNum = int(opt)
default:
// ignore unexpected option
}
}
return res
}
// PageSize returns an option to specify the page size for list operation.
//
// Negative page size is treated as zero.
func PageSize(n int) ListOption {
if n < 0 {
n = 0
}
return pageSizeOpt(n)
}
// Page returns an option to specify the page number for list operation.
// The value 1 fetches the first page.
//
// Negative page number is treated as one.
func Page(n int) ListOption {
if n < 0 {
n = 1
}
return pageNumOpt(n)
}
// ListPendingTasks retrieves pending tasks from the specified queue.
//
// By default, it retrieves the first 30 tasks.
func (i *Inspector) ListPendingTasks(qname string, opts ...ListOption) ([]*PendingTask, error) {
if err := base.ValidateQueueName(qname); err != nil {
return nil, err
}
opt := composeListOptions(opts...)
pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1}
msgs, err := i.rdb.ListPending(qname, pgn)
if err != nil {
return nil, err
}
var tasks []*PendingTask
for _, m := range msgs {
tasks = append(tasks, &PendingTask{
Task: asynq.NewTask(m.Type, m.Payload),
ID: m.ID.String(),
Queue: m.Queue,
MaxRetry: m.Retry,
Retried: m.Retried,
LastError: m.ErrorMsg,
})
}
return tasks, err
}
// ListActiveTasks retrieves active tasks from the specified queue.
//
// By default, it retrieves the first 30 tasks.
func (i *Inspector) ListActiveTasks(qname string, opts ...ListOption) ([]*ActiveTask, error) {
if err := base.ValidateQueueName(qname); err != nil {
return nil, err
}
opt := composeListOptions(opts...)
pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1}
msgs, err := i.rdb.ListActive(qname, pgn)
if err != nil {
return nil, err
}
var tasks []*ActiveTask
for _, m := range msgs {
tasks = append(tasks, &ActiveTask{
Task: asynq.NewTask(m.Type, m.Payload),
ID: m.ID.String(),
Queue: m.Queue,
MaxRetry: m.Retry,
Retried: m.Retried,
LastError: m.ErrorMsg,
})
}
return tasks, err
}
// ListScheduledTasks retrieves scheduled tasks from the specified queue.
// Tasks are sorted by NextProcessAt field in ascending order.
//
// By default, it retrieves the first 30 tasks.
func (i *Inspector) ListScheduledTasks(qname string, opts ...ListOption) ([]*ScheduledTask, error) {
if err := base.ValidateQueueName(qname); err != nil {
return nil, err
}
opt := composeListOptions(opts...)
pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1}
zs, err := i.rdb.ListScheduled(qname, pgn)
if err != nil {
return nil, err
}
var tasks []*ScheduledTask
for _, z := range zs {
processAt := time.Unix(z.Score, 0)
t := asynq.NewTask(z.Message.Type, z.Message.Payload)
tasks = append(tasks, &ScheduledTask{
Task: t,
ID: z.Message.ID.String(),
Queue: z.Message.Queue,
MaxRetry: z.Message.Retry,
Retried: z.Message.Retried,
LastError: z.Message.ErrorMsg,
NextProcessAt: processAt,
score: z.Score,
})
}
return tasks, nil
}
// ListRetryTasks retrieves retry tasks from the specified queue.
// Tasks are sorted by NextProcessAt field in ascending order.
//
// By default, it retrieves the first 30 tasks.
func (i *Inspector) ListRetryTasks(qname string, opts ...ListOption) ([]*RetryTask, error) {
if err := base.ValidateQueueName(qname); err != nil {
return nil, err
}
opt := composeListOptions(opts...)
pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1}
zs, err := i.rdb.ListRetry(qname, pgn)
if err != nil {
return nil, err
}
var tasks []*RetryTask
for _, z := range zs {
processAt := time.Unix(z.Score, 0)
t := asynq.NewTask(z.Message.Type, z.Message.Payload)
tasks = append(tasks, &RetryTask{
Task: t,
ID: z.Message.ID.String(),
Queue: z.Message.Queue,
NextProcessAt: processAt,
MaxRetry: z.Message.Retry,
Retried: z.Message.Retried,
// TODO: LastFailedAt: z.Message.LastFailedAt
LastError: z.Message.ErrorMsg,
score: z.Score,
})
}
return tasks, nil
}
// ListArchivedTasks retrieves archived tasks from the specified queue.
// Tasks are sorted by LastFailedAt field in descending order.
//
// By default, it retrieves the first 30 tasks.
func (i *Inspector) ListArchivedTasks(qname string, opts ...ListOption) ([]*ArchivedTask, error) {
if err := base.ValidateQueueName(qname); err != nil {
return nil, err
}
opt := composeListOptions(opts...)
pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1}
zs, err := i.rdb.ListArchived(qname, pgn)
if err != nil {
return nil, err
}
var tasks []*ArchivedTask
for _, z := range zs {
failedAt := time.Unix(z.Score, 0)
t := asynq.NewTask(z.Message.Type, z.Message.Payload)
tasks = append(tasks, &ArchivedTask{
Task: t,
ID: z.Message.ID.String(),
Queue: z.Message.Queue,
MaxRetry: z.Message.Retry,
Retried: z.Message.Retried,
LastFailedAt: failedAt,
LastError: z.Message.ErrorMsg,
score: z.Score,
})
}
return tasks, nil
}
// DeleteAllPendingTasks deletes all pending tasks from the specified queue,
// and reports the number tasks deleted.
func (i *Inspector) DeleteAllPendingTasks(qname string) (int, error) {
if err := base.ValidateQueueName(qname); err != nil {
return 0, err
}
n, err := i.rdb.DeleteAllPendingTasks(qname)
return int(n), err
}
// DeleteAllScheduledTasks deletes all scheduled tasks from the specified queue,
// and reports the number tasks deleted.
func (i *Inspector) DeleteAllScheduledTasks(qname string) (int, error) {
if err := base.ValidateQueueName(qname); err != nil {
return 0, err
}
n, err := i.rdb.DeleteAllScheduledTasks(qname)
return int(n), err
}
// DeleteAllRetryTasks deletes all retry tasks from the specified queue,
// and reports the number tasks deleted.
func (i *Inspector) DeleteAllRetryTasks(qname string) (int, error) {
if err := base.ValidateQueueName(qname); err != nil {
return 0, err
}
n, err := i.rdb.DeleteAllRetryTasks(qname)
return int(n), err
}
// DeleteAllArchivedTasks deletes all archived tasks from the specified queue,
// and reports the number tasks deleted.
func (i *Inspector) DeleteAllArchivedTasks(qname string) (int, error) {
if err := base.ValidateQueueName(qname); err != nil {
return 0, err
}
n, err := i.rdb.DeleteAllArchivedTasks(qname)
return int(n), err
}
// DeleteTaskByKey deletes a task with the given key from the given queue.
func (i *Inspector) DeleteTaskByKey(qname, key string) error {
if err := base.ValidateQueueName(qname); err != nil {
return err
}
prefix, id, score, err := parseTaskKey(key)
if err != nil {
return err
}
switch prefix {
case keyPrefixPending:
return i.rdb.DeletePendingTask(qname, id)
case keyPrefixScheduled:
return i.rdb.DeleteScheduledTask(qname, id, score)
case keyPrefixRetry:
return i.rdb.DeleteRetryTask(qname, id, score)
case keyPrefixArchived:
return i.rdb.DeleteArchivedTask(qname, id, score)
default:
return fmt.Errorf("invalid key")
}
}
// RunAllScheduledTasks transition all scheduled tasks to pending state from the given queue,
// and reports the number of tasks transitioned.
func (i *Inspector) RunAllScheduledTasks(qname string) (int, error) {
if err := base.ValidateQueueName(qname); err != nil {
return 0, err
}
n, err := i.rdb.RunAllScheduledTasks(qname)
return int(n), err
}
// RunAllRetryTasks transition all retry tasks to pending state from the given queue,
// and reports the number of tasks transitioned.
func (i *Inspector) RunAllRetryTasks(qname string) (int, error) {
if err := base.ValidateQueueName(qname); err != nil {
return 0, err
}
n, err := i.rdb.RunAllRetryTasks(qname)
return int(n), err
}
// RunAllArchivedTasks transition all archived tasks to pending state from the given queue,
// and reports the number of tasks transitioned.
func (i *Inspector) RunAllArchivedTasks(qname string) (int, error) {
if err := base.ValidateQueueName(qname); err != nil {
return 0, err
}
n, err := i.rdb.RunAllArchivedTasks(qname)
return int(n), err
}
// RunTaskByKey transition a task to pending state given task key and queue name.
func (i *Inspector) RunTaskByKey(qname, key string) error {
if err := base.ValidateQueueName(qname); err != nil {
return err
}
prefix, id, score, err := parseTaskKey(key)
if err != nil {
return err
}
switch prefix {
case keyPrefixScheduled:
return i.rdb.RunScheduledTask(qname, id, score)
case keyPrefixRetry:
return i.rdb.RunRetryTask(qname, id, score)
case keyPrefixArchived:
return i.rdb.RunArchivedTask(qname, id, score)
case keyPrefixPending:
return fmt.Errorf("task is already pending for run")
default:
return fmt.Errorf("invalid key")
}
}
// ArchiveAllPendingTasks archives all pending tasks from the given queue,
// and reports the number of tasks archived.
func (i *Inspector) ArchiveAllPendingTasks(qname string) (int, error) {
if err := base.ValidateQueueName(qname); err != nil {
return 0, err
}
n, err := i.rdb.ArchiveAllPendingTasks(qname)
return int(n), err
}
// ArchiveAllScheduledTasks archives all scheduled tasks from the given queue,
// and reports the number of tasks archiveed.
func (i *Inspector) ArchiveAllScheduledTasks(qname string) (int, error) {
if err := base.ValidateQueueName(qname); err != nil {
return 0, err
}
n, err := i.rdb.ArchiveAllScheduledTasks(qname)
return int(n), err
}
// ArchiveAllRetryTasks archives all retry tasks from the given queue,
// and reports the number of tasks archiveed.
func (i *Inspector) ArchiveAllRetryTasks(qname string) (int, error) {
if err := base.ValidateQueueName(qname); err != nil {
return 0, err
}
n, err := i.rdb.ArchiveAllRetryTasks(qname)
return int(n), err
}
// ArchiveTaskByKey archives a task with the given key in the given queue.
func (i *Inspector) ArchiveTaskByKey(qname, key string) error {
if err := base.ValidateQueueName(qname); err != nil {
return err
}
prefix, id, score, err := parseTaskKey(key)
if err != nil {
return err
}
switch prefix {
case keyPrefixPending:
return i.rdb.ArchivePendingTask(qname, id)
case keyPrefixScheduled:
return i.rdb.ArchiveScheduledTask(qname, id, score)
case keyPrefixRetry:
return i.rdb.ArchiveRetryTask(qname, id, score)
case keyPrefixArchived:
return fmt.Errorf("task is already archived")
default:
return fmt.Errorf("invalid key")
}
}
// CancelActiveTask sends a signal to cancel processing of the task with
// the given id. CancelActiveTask is best-effort, which means that it does not
// guarantee that the task with the given id will be canceled. The return
// value only indicates whether the cancelation signal has been sent.
func (i *Inspector) CancelActiveTask(id string) error {
return i.rdb.PublishCancelation(id)
}
// PauseQueue pauses task processing on the specified queue.
// If the queue is already paused, it will return a non-nil error.
func (i *Inspector) PauseQueue(qname string) error {
if err := base.ValidateQueueName(qname); err != nil {
return err
}
return i.rdb.Pause(qname)
}
// UnpauseQueue resumes task processing on the specified queue.
// If the queue is not paused, it will return a non-nil error.
func (i *Inspector) UnpauseQueue(qname string) error {
if err := base.ValidateQueueName(qname); err != nil {
return err
}
return i.rdb.Unpause(qname)
}
// Servers return a list of running servers' information.
func (i *Inspector) Servers() ([]*ServerInfo, error) {
servers, err := i.rdb.ListServers()
if err != nil {
return nil, err
}
workers, err := i.rdb.ListWorkers()
if err != nil {
return nil, err
}
m := make(map[string]*ServerInfo) // ServerInfo keyed by serverID
for _, s := range servers {
m[s.ServerID] = &ServerInfo{
ID: s.ServerID,
Host: s.Host,
PID: s.PID,
Concurrency: s.Concurrency,
Queues: s.Queues,
StrictPriority: s.StrictPriority,
Started: s.Started,
Status: s.Status,
ActiveWorkers: make([]*WorkerInfo, 0),
}
}
for _, w := range workers {
srvInfo, ok := m[w.ServerID]
if !ok {
continue
}
wrkInfo := &WorkerInfo{
Started: w.Started,
Deadline: w.Deadline,
Task: &ActiveTask{
Task: asynq.NewTask(w.Type, w.Payload),
ID: w.ID,
Queue: w.Queue,
},
}
srvInfo.ActiveWorkers = append(srvInfo.ActiveWorkers, wrkInfo)
}
var out []*ServerInfo
for _, srvInfo := range m {
out = append(out, srvInfo)
}
return out, nil
}
// ServerInfo describes a running Server instance.
type ServerInfo struct {
// Unique Identifier for the server.
ID string
// Host machine on which the server is running.
Host string
// PID of the process in which the server is running.
PID int
// Server configuration details.
// See Config doc for field descriptions.
Concurrency int
Queues map[string]int
StrictPriority bool
// Time the server started.
Started time.Time
// Status indicates the status of the server.
// TODO: Update comment with more details.
Status string
// A List of active workers currently processing tasks.
ActiveWorkers []*WorkerInfo
}
// WorkerInfo describes a running worker processing a task.
type WorkerInfo struct {
// The task the worker is processing.
Task *ActiveTask
// Time the worker started processing the task.
Started time.Time
// Time the worker needs to finish processing the task by.
Deadline time.Time
}
// ClusterKeySlot returns an integer identifying the hash slot the given queue hashes to.
func (i *Inspector) ClusterKeySlot(qname string) (int64, error) {
return i.rdb.ClusterKeySlot(qname)
}
// ClusterNode describes a node in redis cluster.
type ClusterNode struct {
// Node ID in the cluster.
ID string
// Address of the node.
Addr string
}
// ClusterNodes returns a list of nodes the given queue belongs to.
func (i *Inspector) ClusterNodes(qname string) ([]ClusterNode, error) {
nodes, err := i.rdb.ClusterNodes(qname)
if err != nil {
return nil, err
}
var res []ClusterNode
for _, node := range nodes {
res = append(res, ClusterNode{ID: node.ID, Addr: node.Addr})
}
return res, nil
}
// SchedulerEntry holds information about a periodic task registered with a scheduler.
type SchedulerEntry struct {
// Identifier of this entry.
ID string
// Spec describes the schedule of this entry.
Spec string
// Periodic Task registered for this entry.
Task *asynq.Task
// Opts is the options for the periodic task.
Opts []asynq.Option
// Next shows the next time the task will be enqueued.
Next time.Time
// Prev shows the last time the task was enqueued.
// Zero time if task was never enqueued.
Prev time.Time
}
// SchedulerEntries returns a list of all entries registered with
// currently running schedulers.
func (i *Inspector) SchedulerEntries() ([]*SchedulerEntry, error) {
var entries []*SchedulerEntry
res, err := i.rdb.ListSchedulerEntries()
if err != nil {
return nil, err
}
for _, e := range res {
task := asynq.NewTask(e.Type, e.Payload)
var opts []asynq.Option
for _, s := range e.Opts {
if o, err := parseOption(s); err == nil {
// ignore bad data
opts = append(opts, o)
}
}
entries = append(entries, &SchedulerEntry{
ID: e.ID,
Spec: e.Spec,
Task: task,
Opts: opts,
Next: e.Next,
Prev: e.Prev,
})
}
return entries, nil
}
// parseOption interprets a string s as an Option and returns the Option if parsing is successful,
// otherwise returns non-nil error.
func parseOption(s string) (asynq.Option, error) {
fn, arg := parseOptionFunc(s), parseOptionArg(s)
switch fn {
case "Queue":
qname, err := strconv.Unquote(arg)
if err != nil {
return nil, err
}
return asynq.Queue(qname), nil
case "MaxRetry":
n, err := strconv.Atoi(arg)
if err != nil {
return nil, err
}
return asynq.MaxRetry(n), nil
case "Timeout":
d, err := time.ParseDuration(arg)
if err != nil {
return nil, err
}
return asynq.Timeout(d), nil
case "Deadline":
t, err := time.Parse(time.UnixDate, arg)
if err != nil {
return nil, err
}
return asynq.Deadline(t), nil
case "Unique":
d, err := time.ParseDuration(arg)
if err != nil {
return nil, err
}
return asynq.Unique(d), nil
case "ProcessAt":
t, err := time.Parse(time.UnixDate, arg)
if err != nil {
return nil, err
}
return asynq.ProcessAt(t), nil
case "ProcessIn":
d, err := time.ParseDuration(arg)
if err != nil {
return nil, err
}
return asynq.ProcessIn(d), nil
default:
return nil, fmt.Errorf("cannot not parse option string %q", s)
}
}
func parseOptionFunc(s string) string {
i := strings.Index(s, "(")
return s[:i]
}
func parseOptionArg(s string) string {
i := strings.Index(s, "(")
if i >= 0 {
j := strings.Index(s, ")")
if j > i {
return s[i+1 : j]
}
}
return ""
}
// SchedulerEnqueueEvent holds information about an enqueue event by a scheduler.
type SchedulerEnqueueEvent struct {
// ID of the task that was enqueued.
TaskID string
// Time the task was enqueued.
EnqueuedAt time.Time
}
// ListSchedulerEnqueueEvents retrieves a list of enqueue events from the specified scheduler entry.
//
// By default, it retrieves the first 30 tasks.
func (i *Inspector) ListSchedulerEnqueueEvents(entryID string, opts ...ListOption) ([]*SchedulerEnqueueEvent, error) {
opt := composeListOptions(opts...)
pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1}
data, err := i.rdb.ListSchedulerEnqueueEvents(entryID, pgn)
if err != nil {
return nil, err
}
var events []*SchedulerEnqueueEvent
for _, e := range data {
events = append(events, &SchedulerEnqueueEvent{TaskID: e.TaskID, EnqueuedAt: e.EnqueuedAt})
}
return events, nil
}

File diff suppressed because it is too large Load Diff

View File

@@ -7,7 +7,6 @@ package asynqtest
import (
"encoding/json"
"math"
"sort"
"testing"
@@ -18,12 +17,10 @@ import (
"github.com/hibiken/asynq/internal/base"
)
// EquateInt64Approx returns a Comparer option that treats int64 values
// to be equal if they are within the given margin.
func EquateInt64Approx(margin int64) cmp.Option {
return cmp.Comparer(func(a, b int64) bool {
return math.Abs(float64(a-b)) <= float64(margin)
})
// ZSetEntry is an entry in redis sorted set.
type ZSetEntry struct {
Msg *base.TaskMessage
Score float64
}
// SortMsgOpt is a cmp.Option to sort base.TaskMessage for comparing slice of task messages.
@@ -36,10 +33,10 @@ var SortMsgOpt = cmp.Transformer("SortTaskMessages", func(in []*base.TaskMessage
})
// SortZSetEntryOpt is an cmp.Option to sort ZSetEntry for comparing slice of zset entries.
var SortZSetEntryOpt = cmp.Transformer("SortZSetEntries", func(in []base.Z) []base.Z {
out := append([]base.Z(nil), in...) // Copy input to avoid mutating it
var SortZSetEntryOpt = cmp.Transformer("SortZSetEntries", func(in []ZSetEntry) []ZSetEntry {
out := append([]ZSetEntry(nil), in...) // Copy input to avoid mutating it
sort.Slice(out, func(i, j int) bool {
return out[i].Message.ID.String() < out[j].Message.ID.String()
return out[i].Msg.ID.String() < out[j].Msg.ID.String()
})
return out
})
@@ -65,24 +62,6 @@ var SortWorkerInfoOpt = cmp.Transformer("SortWorkerInfo", func(in []*base.Worker
return out
})
// SortSchedulerEntryOpt is a cmp.Option to sort base.SchedulerEntry for comparing slice of entries.
var SortSchedulerEntryOpt = cmp.Transformer("SortSchedulerEntry", func(in []*base.SchedulerEntry) []*base.SchedulerEntry {
out := append([]*base.SchedulerEntry(nil), in...) // Copy input to avoid mutating it
sort.Slice(out, func(i, j int) bool {
return out[i].Spec < out[j].Spec
})
return out
})
// SortSchedulerEnqueueEventOpt is a cmp.Option to sort base.SchedulerEnqueueEvent for comparing slice of events.
var SortSchedulerEnqueueEventOpt = cmp.Transformer("SortSchedulerEnqueueEvent", func(in []*base.SchedulerEnqueueEvent) []*base.SchedulerEnqueueEvent {
out := append([]*base.SchedulerEnqueueEvent(nil), in...)
sort.Slice(out, func(i, j int) bool {
return out[i].EnqueuedAt.Unix() < out[j].EnqueuedAt.Unix()
})
return out
})
// SortStringSliceOpt is a cmp.Option to sort string slice.
var SortStringSliceOpt = cmp.Transformer("SortStringSlice", func(in []string) []string {
out := append([]string(nil), in...)
@@ -95,7 +74,15 @@ var IgnoreIDOpt = cmpopts.IgnoreFields(base.TaskMessage{}, "ID")
// NewTaskMessage returns a new instance of TaskMessage given a task type and payload.
func NewTaskMessage(taskType string, payload map[string]interface{}) *base.TaskMessage {
return NewTaskMessageWithQueue(taskType, payload, base.DefaultQueueName)
return &base.TaskMessage{
ID: uuid.New(),
Type: taskType,
Queue: base.DefaultQueueName,
Retry: 25,
Payload: payload,
Timeout: 1800, // default timeout of 30 mins
Deadline: 0, // no deadline
}
}
// NewTaskMessageWithQueue returns a new instance of TaskMessage given a
@@ -107,8 +94,6 @@ func NewTaskMessageWithQueue(taskType string, payload map[string]interface{}, qn
Queue: qname,
Retry: 25,
Payload: payload,
Timeout: 1800, // default timeout of 30 mins
Deadline: 0, // no deadline
}
}
@@ -172,113 +157,57 @@ func MustUnmarshalSlice(tb testing.TB, data []string) []*base.TaskMessage {
}
// FlushDB deletes all the keys of the currently selected DB.
func FlushDB(tb testing.TB, r redis.UniversalClient) {
func FlushDB(tb testing.TB, r *redis.Client) {
tb.Helper()
switch r := r.(type) {
case *redis.Client:
if err := r.FlushDB().Err(); err != nil {
tb.Fatal(err)
}
case *redis.ClusterClient:
err := r.ForEachMaster(func(c *redis.Client) error {
if err := c.FlushAll().Err(); err != nil {
return err
}
return nil
})
if err != nil {
tb.Fatal(err)
}
}
}
// SeedPendingQueue initializes the specified queue with the given messages.
func SeedPendingQueue(tb testing.TB, r redis.UniversalClient, msgs []*base.TaskMessage, qname string) {
// SeedEnqueuedQueue initializes the specified queue with the given messages.
//
// If queue name option is not passed, it defaults to the default queue.
func SeedEnqueuedQueue(tb testing.TB, r *redis.Client, msgs []*base.TaskMessage, queueOpt ...string) {
tb.Helper()
r.SAdd(base.AllQueues, qname)
seedRedisList(tb, r, base.QueueKey(qname), msgs)
queue := base.DefaultQueue
if len(queueOpt) > 0 {
queue = base.QueueKey(queueOpt[0])
}
r.SAdd(base.AllQueues, queue)
seedRedisList(tb, r, queue, msgs)
}
// SeedActiveQueue initializes the active queue with the given messages.
func SeedActiveQueue(tb testing.TB, r redis.UniversalClient, msgs []*base.TaskMessage, qname string) {
// SeedInProgressQueue initializes the in-progress queue with the given messages.
func SeedInProgressQueue(tb testing.TB, r *redis.Client, msgs []*base.TaskMessage) {
tb.Helper()
r.SAdd(base.AllQueues, qname)
seedRedisList(tb, r, base.ActiveKey(qname), msgs)
seedRedisList(tb, r, base.InProgressQueue, msgs)
}
// SeedScheduledQueue initializes the scheduled queue with the given messages.
func SeedScheduledQueue(tb testing.TB, r redis.UniversalClient, entries []base.Z, qname string) {
func SeedScheduledQueue(tb testing.TB, r *redis.Client, entries []ZSetEntry) {
tb.Helper()
r.SAdd(base.AllQueues, qname)
seedRedisZSet(tb, r, base.ScheduledKey(qname), entries)
seedRedisZSet(tb, r, base.ScheduledQueue, entries)
}
// SeedRetryQueue initializes the retry queue with the given messages.
func SeedRetryQueue(tb testing.TB, r redis.UniversalClient, entries []base.Z, qname string) {
func SeedRetryQueue(tb testing.TB, r *redis.Client, entries []ZSetEntry) {
tb.Helper()
r.SAdd(base.AllQueues, qname)
seedRedisZSet(tb, r, base.RetryKey(qname), entries)
seedRedisZSet(tb, r, base.RetryQueue, entries)
}
// SeedArchivedQueue initializes the archived queue with the given messages.
func SeedArchivedQueue(tb testing.TB, r redis.UniversalClient, entries []base.Z, qname string) {
// SeedDeadQueue initializes the dead queue with the given messages.
func SeedDeadQueue(tb testing.TB, r *redis.Client, entries []ZSetEntry) {
tb.Helper()
r.SAdd(base.AllQueues, qname)
seedRedisZSet(tb, r, base.ArchivedKey(qname), entries)
seedRedisZSet(tb, r, base.DeadQueue, entries)
}
// SeedDeadlines initializes the deadlines set with the given entries.
func SeedDeadlines(tb testing.TB, r redis.UniversalClient, entries []base.Z, qname string) {
func SeedDeadlines(tb testing.TB, r *redis.Client, entries []ZSetEntry) {
tb.Helper()
r.SAdd(base.AllQueues, qname)
seedRedisZSet(tb, r, base.DeadlinesKey(qname), entries)
seedRedisZSet(tb, r, base.KeyDeadlines, entries)
}
// SeedAllPendingQueues initializes all of the specified queues with the given messages.
//
// pending maps a queue name to a list of messages.
func SeedAllPendingQueues(tb testing.TB, r redis.UniversalClient, pending map[string][]*base.TaskMessage) {
for q, msgs := range pending {
SeedPendingQueue(tb, r, msgs, q)
}
}
// SeedAllActiveQueues initializes all of the specified active queues with the given messages.
func SeedAllActiveQueues(tb testing.TB, r redis.UniversalClient, active map[string][]*base.TaskMessage) {
for q, msgs := range active {
SeedActiveQueue(tb, r, msgs, q)
}
}
// SeedAllScheduledQueues initializes all of the specified scheduled queues with the given entries.
func SeedAllScheduledQueues(tb testing.TB, r redis.UniversalClient, scheduled map[string][]base.Z) {
for q, entries := range scheduled {
SeedScheduledQueue(tb, r, entries, q)
}
}
// SeedAllRetryQueues initializes all of the specified retry queues with the given entries.
func SeedAllRetryQueues(tb testing.TB, r redis.UniversalClient, retry map[string][]base.Z) {
for q, entries := range retry {
SeedRetryQueue(tb, r, entries, q)
}
}
// SeedAllArchivedQueues initializes all of the specified archived queues with the given entries.
func SeedAllArchivedQueues(tb testing.TB, r redis.UniversalClient, archived map[string][]base.Z) {
for q, entries := range archived {
SeedArchivedQueue(tb, r, entries, q)
}
}
// SeedAllDeadlines initializes all of the deadlines with the given entries.
func SeedAllDeadlines(tb testing.TB, r redis.UniversalClient, deadlines map[string][]base.Z) {
for q, entries := range deadlines {
SeedDeadlines(tb, r, entries, q)
}
}
func seedRedisList(tb testing.TB, c redis.UniversalClient, key string, msgs []*base.TaskMessage) {
func seedRedisList(tb testing.TB, c *redis.Client, key string, msgs []*base.TaskMessage) {
data := MustMarshalSlice(tb, msgs)
for _, s := range data {
if err := c.LPush(key, s).Err(); err != nil {
@@ -287,86 +216,92 @@ func seedRedisList(tb testing.TB, c redis.UniversalClient, key string, msgs []*b
}
}
func seedRedisZSet(tb testing.TB, c redis.UniversalClient, key string, items []base.Z) {
func seedRedisZSet(tb testing.TB, c *redis.Client, key string, items []ZSetEntry) {
for _, item := range items {
z := &redis.Z{Member: MustMarshal(tb, item.Message), Score: float64(item.Score)}
z := &redis.Z{Member: MustMarshal(tb, item.Msg), Score: float64(item.Score)}
if err := c.ZAdd(key, z).Err(); err != nil {
tb.Fatal(err)
}
}
}
// GetPendingMessages returns all pending messages in the given queue.
func GetPendingMessages(tb testing.TB, r redis.UniversalClient, qname string) []*base.TaskMessage {
// GetEnqueuedMessages returns all task messages in the specified queue.
//
// If queue name option is not passed, it defaults to the default queue.
func GetEnqueuedMessages(tb testing.TB, r *redis.Client, queueOpt ...string) []*base.TaskMessage {
tb.Helper()
return getListMessages(tb, r, base.QueueKey(qname))
queue := base.DefaultQueue
if len(queueOpt) > 0 {
queue = base.QueueKey(queueOpt[0])
}
return getListMessages(tb, r, queue)
}
// GetActiveMessages returns all active messages in the given queue.
func GetActiveMessages(tb testing.TB, r redis.UniversalClient, qname string) []*base.TaskMessage {
// GetInProgressMessages returns all task messages in the in-progress queue.
func GetInProgressMessages(tb testing.TB, r *redis.Client) []*base.TaskMessage {
tb.Helper()
return getListMessages(tb, r, base.ActiveKey(qname))
return getListMessages(tb, r, base.InProgressQueue)
}
// GetScheduledMessages returns all scheduled task messages in the given queue.
func GetScheduledMessages(tb testing.TB, r redis.UniversalClient, qname string) []*base.TaskMessage {
// GetScheduledMessages returns all task messages in the scheduled queue.
func GetScheduledMessages(tb testing.TB, r *redis.Client) []*base.TaskMessage {
tb.Helper()
return getZSetMessages(tb, r, base.ScheduledKey(qname))
return getZSetMessages(tb, r, base.ScheduledQueue)
}
// GetRetryMessages returns all retry messages in the given queue.
func GetRetryMessages(tb testing.TB, r redis.UniversalClient, qname string) []*base.TaskMessage {
// GetRetryMessages returns all task messages in the retry queue.
func GetRetryMessages(tb testing.TB, r *redis.Client) []*base.TaskMessage {
tb.Helper()
return getZSetMessages(tb, r, base.RetryKey(qname))
return getZSetMessages(tb, r, base.RetryQueue)
}
// GetArchivedMessages returns all archived messages in the given queue.
func GetArchivedMessages(tb testing.TB, r redis.UniversalClient, qname string) []*base.TaskMessage {
// GetDeadMessages returns all task messages in the dead queue.
func GetDeadMessages(tb testing.TB, r *redis.Client) []*base.TaskMessage {
tb.Helper()
return getZSetMessages(tb, r, base.ArchivedKey(qname))
return getZSetMessages(tb, r, base.DeadQueue)
}
// GetScheduledEntries returns all scheduled messages and its score in the given queue.
func GetScheduledEntries(tb testing.TB, r redis.UniversalClient, qname string) []base.Z {
// GetScheduledEntries returns all task messages and its score in the scheduled queue.
func GetScheduledEntries(tb testing.TB, r *redis.Client) []ZSetEntry {
tb.Helper()
return getZSetEntries(tb, r, base.ScheduledKey(qname))
return getZSetEntries(tb, r, base.ScheduledQueue)
}
// GetRetryEntries returns all retry messages and its score in the given queue.
func GetRetryEntries(tb testing.TB, r redis.UniversalClient, qname string) []base.Z {
// GetRetryEntries returns all task messages and its score in the retry queue.
func GetRetryEntries(tb testing.TB, r *redis.Client) []ZSetEntry {
tb.Helper()
return getZSetEntries(tb, r, base.RetryKey(qname))
return getZSetEntries(tb, r, base.RetryQueue)
}
// GetArchivedEntries returns all archived messages and its score in the given queue.
func GetArchivedEntries(tb testing.TB, r redis.UniversalClient, qname string) []base.Z {
// GetDeadEntries returns all task messages and its score in the dead queue.
func GetDeadEntries(tb testing.TB, r *redis.Client) []ZSetEntry {
tb.Helper()
return getZSetEntries(tb, r, base.ArchivedKey(qname))
return getZSetEntries(tb, r, base.DeadQueue)
}
// GetDeadlinesEntries returns all task messages and its score in the deadlines set for the given queue.
func GetDeadlinesEntries(tb testing.TB, r redis.UniversalClient, qname string) []base.Z {
// GetDeadlinesEntries returns all task messages and its score in the deadlines set.
func GetDeadlinesEntries(tb testing.TB, r *redis.Client) []ZSetEntry {
tb.Helper()
return getZSetEntries(tb, r, base.DeadlinesKey(qname))
return getZSetEntries(tb, r, base.KeyDeadlines)
}
func getListMessages(tb testing.TB, r redis.UniversalClient, list string) []*base.TaskMessage {
func getListMessages(tb testing.TB, r *redis.Client, list string) []*base.TaskMessage {
data := r.LRange(list, 0, -1).Val()
return MustUnmarshalSlice(tb, data)
}
func getZSetMessages(tb testing.TB, r redis.UniversalClient, zset string) []*base.TaskMessage {
func getZSetMessages(tb testing.TB, r *redis.Client, zset string) []*base.TaskMessage {
data := r.ZRange(zset, 0, -1).Val()
return MustUnmarshalSlice(tb, data)
}
func getZSetEntries(tb testing.TB, r redis.UniversalClient, zset string) []base.Z {
func getZSetEntries(tb testing.TB, r *redis.Client, zset string) []ZSetEntry {
data := r.ZRangeWithScores(zset, 0, -1).Val()
var entries []base.Z
var entries []ZSetEntry
for _, z := range data {
entries = append(entries, base.Z{
Message: MustUnmarshal(tb, z.Member.(string)),
Score: int64(z.Score),
entries = append(entries, ZSetEntry{
Msg: MustUnmarshal(tb, z.Member.(string)),
Score: z.Score,
})
}
return entries

View File

@@ -9,7 +9,6 @@ import (
"context"
"encoding/json"
"fmt"
"sort"
"strings"
"sync"
"time"
@@ -19,124 +18,54 @@ import (
)
// Version of asynq library and CLI.
const Version = "0.15.0"
const Version = "0.10.0"
// DefaultQueueName is the queue name used if none are specified by user.
const DefaultQueueName = "default"
// DefaultQueue is the redis key for the default queue.
var DefaultQueue = QueueKey(DefaultQueueName)
// Global Redis keys.
// Redis keys
const (
AllServers = "asynq:servers" // ZSET
serversPrefix = "asynq:servers:" // STRING - asynq:ps:<host>:<pid>:<serverid>
AllWorkers = "asynq:workers" // ZSET
AllSchedulers = "asynq:schedulers" // ZSET
workersPrefix = "asynq:workers:" // HASH - asynq:workers:<host:<pid>:<serverid>
processedPrefix = "asynq:processed:" // STRING - asynq:processed:<yyyy-mm-dd>
failurePrefix = "asynq:failure:" // STRING - asynq:failure:<yyyy-mm-dd>
QueuePrefix = "asynq:queues:" // LIST - asynq:queues:<qname>
AllQueues = "asynq:queues" // SET
DefaultQueue = QueuePrefix + DefaultQueueName // LIST
ScheduledQueue = "asynq:scheduled" // ZSET
RetryQueue = "asynq:retry" // ZSET
DeadQueue = "asynq:dead" // ZSET
InProgressQueue = "asynq:in_progress" // LIST
KeyDeadlines = "asynq:deadlines" // ZSET
PausedQueues = "asynq:paused" // SET
CancelChannel = "asynq:cancel" // PubSub channel
)
// ValidateQueueName validates a given qname to be used as a queue name.
// Returns nil if valid, otherwise returns non-nil error.
func ValidateQueueName(qname string) error {
if len(qname) == 0 {
return fmt.Errorf("queue name must contain one or more characters")
}
return nil
}
// QueueKey returns a redis key for the given queue name.
func QueueKey(qname string) string {
return fmt.Sprintf("asynq:{%s}", qname)
return QueuePrefix + strings.ToLower(qname)
}
// ActiveKey returns a redis key for the active tasks.
func ActiveKey(qname string) string {
return fmt.Sprintf("asynq:{%s}:active", qname)
// ProcessedKey returns a redis key for processed count for the given day.
func ProcessedKey(t time.Time) string {
return processedPrefix + t.UTC().Format("2006-01-02")
}
// ScheduledKey returns a redis key for the scheduled tasks.
func ScheduledKey(qname string) string {
return fmt.Sprintf("asynq:{%s}:scheduled", qname)
}
// RetryKey returns a redis key for the retry tasks.
func RetryKey(qname string) string {
return fmt.Sprintf("asynq:{%s}:retry", qname)
}
// ArchivedKey returns a redis key for the archived tasks.
func ArchivedKey(qname string) string {
return fmt.Sprintf("asynq:{%s}:archived", qname)
}
// DeadlinesKey returns a redis key for the deadlines.
func DeadlinesKey(qname string) string {
return fmt.Sprintf("asynq:{%s}:deadlines", qname)
}
// PausedKey returns a redis key to indicate that the given queue is paused.
func PausedKey(qname string) string {
return fmt.Sprintf("asynq:{%s}:paused", qname)
}
// ProcessedKey returns a redis key for processed count for the given day for the queue.
func ProcessedKey(qname string, t time.Time) string {
return fmt.Sprintf("asynq:{%s}:processed:%s", qname, t.UTC().Format("2006-01-02"))
}
// FailedKey returns a redis key for failure count for the given day for the queue.
func FailedKey(qname string, t time.Time) string {
return fmt.Sprintf("asynq:{%s}:failed:%s", qname, t.UTC().Format("2006-01-02"))
// FailureKey returns a redis key for failure count for the given day.
func FailureKey(t time.Time) string {
return failurePrefix + t.UTC().Format("2006-01-02")
}
// ServerInfoKey returns a redis key for process info.
func ServerInfoKey(hostname string, pid int, serverID string) string {
return fmt.Sprintf("asynq:servers:{%s:%d:%s}", hostname, pid, serverID)
func ServerInfoKey(hostname string, pid int, sid string) string {
return fmt.Sprintf("%s%s:%d:%s", serversPrefix, hostname, pid, sid)
}
// WorkersKey returns a redis key for the workers given hostname, pid, and server ID.
func WorkersKey(hostname string, pid int, serverID string) string {
return fmt.Sprintf("asynq:workers:{%s:%d:%s}", hostname, pid, serverID)
}
// SchedulerEntriesKey returns a redis key for the scheduler entries given scheduler ID.
func SchedulerEntriesKey(schedulerID string) string {
return fmt.Sprintf("asynq:schedulers:{%s}", schedulerID)
}
// SchedulerHistoryKey returns a redis key for the scheduler's history for the given entry.
func SchedulerHistoryKey(entryID string) string {
return fmt.Sprintf("asynq:scheduler_history:%s", entryID)
}
// UniqueKey returns a redis key with the given type, payload, and queue name.
func UniqueKey(qname, tasktype string, payload map[string]interface{}) string {
return fmt.Sprintf("asynq:{%s}:unique:%s:%s", qname, tasktype, serializePayload(payload))
}
func serializePayload(payload map[string]interface{}) string {
if payload == nil {
return "nil"
}
type entry struct {
k string
v interface{}
}
var es []entry
for k, v := range payload {
es = append(es, entry{k, v})
}
// sort entries by key
sort.Slice(es, func(i, j int) bool { return es[i].k < es[j].k })
var b strings.Builder
for _, e := range es {
if b.Len() > 0 {
b.WriteString(",")
}
b.WriteString(fmt.Sprintf("%s=%v", e.k, e.v))
}
return b.String()
func WorkersKey(hostname string, pid int, sid string) string {
return fmt.Sprintf("%s%s:%d:%s", workersPrefix, hostname, pid, sid)
}
// TaskMessage is the internal representation of a task with additional metadata fields.
@@ -165,7 +94,7 @@ type TaskMessage struct {
// Timeout specifies timeout in seconds.
// If task processing doesn't complete within the timeout, the task will be retried
// if retry count is remaining. Otherwise it will be moved to the archive.
// if retry count is remaining. Otherwise it will be moved to the dead queue.
//
// Use zero to indicate no timeout.
Timeout int64
@@ -173,7 +102,7 @@ type TaskMessage struct {
// Deadline specifies the deadline for the task in Unix time,
// the number of seconds elapsed since January 1, 1970 UTC.
// If task processing doesn't complete before the deadline, the task will be retried
// if retry count is remaining. Otherwise it will be moved to the archive.
// if retry count is remaining. Otherwise it will be moved to the dead queue.
//
// Use zero to indicate no deadline.
Deadline int64
@@ -204,12 +133,6 @@ func DecodeMessage(s string) (*TaskMessage, error) {
return &msg, nil
}
// Z represents sorted set member.
type Z struct {
Message *TaskMessage
Score int64
}
// ServerStatus represents status of a server.
// ServerStatus methods are concurrency safe.
type ServerStatus struct {
@@ -228,10 +151,10 @@ const (
// StatusIdle indicates the server is in idle state.
StatusIdle ServerStatusValue = iota
// StatusRunning indicates the server is up and active.
// StatusRunning indicates the servier is up and processing tasks.
StatusRunning
// StatusQuiet indicates the server is up but not active.
// StatusQuiet indicates the server is up but not processing new tasks.
StatusQuiet
// StatusStopped indicates the server server has been stopped.
@@ -286,50 +209,14 @@ type ServerInfo struct {
type WorkerInfo struct {
Host string
PID int
ServerID string
ID string
Type string
Queue string
Payload map[string]interface{}
Started time.Time
Deadline time.Time
}
// SchedulerEntry holds information about a periodic task registered with a scheduler.
type SchedulerEntry struct {
// Identifier of this entry.
ID string
// Spec describes the schedule of this entry.
Spec string
// Type is the task type of the periodic task.
Type string
// Payload is the payload of the periodic task.
Payload map[string]interface{}
// Opts is the options for the periodic task.
Opts []string
// Next shows the next time the task will be enqueued.
Next time.Time
// Prev shows the last time the task was enqueued.
// Zero time if task was never enqueued.
Prev time.Time
}
// SchedulerEnqueueEvent holds information about an enqueue event by a scheduler.
type SchedulerEnqueueEvent struct {
// ID of the task that was enqueued.
TaskID string
// Time the task was enqueued.
EnqueuedAt time.Time
}
// Cancelations is a collection that holds cancel functions for all active tasks.
// Cancelations is a collection that holds cancel functions for all in-progress tasks.
//
// Cancelations are safe for concurrent use by multipel goroutines.
type Cancelations struct {
@@ -370,7 +257,6 @@ func (c *Cancelations) Get(id string) (fn context.CancelFunc, ok bool) {
//
// See rdb.RDB as a reference implementation.
type Broker interface {
Ping() error
Enqueue(msg *TaskMessage) error
EnqueueUnique(msg *TaskMessage, ttl time.Duration) error
Dequeue(qnames ...string) (*TaskMessage, time.Time, error)
@@ -379,9 +265,9 @@ type Broker interface {
Schedule(msg *TaskMessage, processAt time.Time) error
ScheduleUnique(msg *TaskMessage, processAt time.Time, ttl time.Duration) error
Retry(msg *TaskMessage, processAt time.Time, errMsg string) error
Archive(msg *TaskMessage, errMsg string) error
CheckAndEnqueue(qnames ...string) error
ListDeadlineExceeded(deadline time.Time, qnames ...string) ([]*TaskMessage, error)
Kill(msg *TaskMessage, errMsg string) error
CheckAndEnqueue() error
ListDeadlineExceeded(deadline time.Time) ([]*TaskMessage, error)
WriteServerState(info *ServerInfo, workers []*WorkerInfo, ttl time.Duration) error
ClearServerState(host string, pid int, serverID string) error
CancelationPubSub() (*redis.PubSub, error) // TODO: Need to decouple from redis to support other brokers

View File

@@ -20,8 +20,7 @@ func TestQueueKey(t *testing.T) {
qname string
want string
}{
{"default", "asynq:{default}"},
{"custom", "asynq:{custom}"},
{"custom", "asynq:queues:custom"},
}
for _, tc := range tests {
@@ -32,140 +31,36 @@ func TestQueueKey(t *testing.T) {
}
}
func TestActiveKey(t *testing.T) {
tests := []struct {
qname string
want string
}{
{"default", "asynq:{default}:active"},
{"custom", "asynq:{custom}:active"},
}
for _, tc := range tests {
got := ActiveKey(tc.qname)
if got != tc.want {
t.Errorf("ActiveKey(%q) = %q, want %q", tc.qname, got, tc.want)
}
}
}
func TestDeadlinesKey(t *testing.T) {
tests := []struct {
qname string
want string
}{
{"default", "asynq:{default}:deadlines"},
{"custom", "asynq:{custom}:deadlines"},
}
for _, tc := range tests {
got := DeadlinesKey(tc.qname)
if got != tc.want {
t.Errorf("DeadlinesKey(%q) = %q, want %q", tc.qname, got, tc.want)
}
}
}
func TestScheduledKey(t *testing.T) {
tests := []struct {
qname string
want string
}{
{"default", "asynq:{default}:scheduled"},
{"custom", "asynq:{custom}:scheduled"},
}
for _, tc := range tests {
got := ScheduledKey(tc.qname)
if got != tc.want {
t.Errorf("ScheduledKey(%q) = %q, want %q", tc.qname, got, tc.want)
}
}
}
func TestRetryKey(t *testing.T) {
tests := []struct {
qname string
want string
}{
{"default", "asynq:{default}:retry"},
{"custom", "asynq:{custom}:retry"},
}
for _, tc := range tests {
got := RetryKey(tc.qname)
if got != tc.want {
t.Errorf("RetryKey(%q) = %q, want %q", tc.qname, got, tc.want)
}
}
}
func TestArchivedKey(t *testing.T) {
tests := []struct {
qname string
want string
}{
{"default", "asynq:{default}:archived"},
{"custom", "asynq:{custom}:archived"},
}
for _, tc := range tests {
got := ArchivedKey(tc.qname)
if got != tc.want {
t.Errorf("ArchivedKey(%q) = %q, want %q", tc.qname, got, tc.want)
}
}
}
func TestPausedKey(t *testing.T) {
tests := []struct {
qname string
want string
}{
{"default", "asynq:{default}:paused"},
{"custom", "asynq:{custom}:paused"},
}
for _, tc := range tests {
got := PausedKey(tc.qname)
if got != tc.want {
t.Errorf("PausedKey(%q) = %q, want %q", tc.qname, got, tc.want)
}
}
}
func TestProcessedKey(t *testing.T) {
tests := []struct {
qname string
input time.Time
want string
}{
{"default", time.Date(2019, 11, 14, 10, 30, 1, 1, time.UTC), "asynq:{default}:processed:2019-11-14"},
{"critical", time.Date(2020, 12, 1, 1, 0, 1, 1, time.UTC), "asynq:{critical}:processed:2020-12-01"},
{"default", time.Date(2020, 1, 6, 15, 02, 1, 1, time.UTC), "asynq:{default}:processed:2020-01-06"},
{time.Date(2019, 11, 14, 10, 30, 1, 1, time.UTC), "asynq:processed:2019-11-14"},
{time.Date(2020, 12, 1, 1, 0, 1, 1, time.UTC), "asynq:processed:2020-12-01"},
{time.Date(2020, 1, 6, 15, 02, 1, 1, time.UTC), "asynq:processed:2020-01-06"},
}
for _, tc := range tests {
got := ProcessedKey(tc.qname, tc.input)
got := ProcessedKey(tc.input)
if got != tc.want {
t.Errorf("ProcessedKey(%v) = %q, want %q", tc.input, got, tc.want)
}
}
}
func TestFailedKey(t *testing.T) {
func TestFailureKey(t *testing.T) {
tests := []struct {
qname string
input time.Time
want string
}{
{"default", time.Date(2019, 11, 14, 10, 30, 1, 1, time.UTC), "asynq:{default}:failed:2019-11-14"},
{"custom", time.Date(2020, 12, 1, 1, 0, 1, 1, time.UTC), "asynq:{custom}:failed:2020-12-01"},
{"low", time.Date(2020, 1, 6, 15, 02, 1, 1, time.UTC), "asynq:{low}:failed:2020-01-06"},
{time.Date(2019, 11, 14, 10, 30, 1, 1, time.UTC), "asynq:failure:2019-11-14"},
{time.Date(2020, 12, 1, 1, 0, 1, 1, time.UTC), "asynq:failure:2020-12-01"},
{time.Date(2020, 1, 6, 15, 02, 1, 1, time.UTC), "asynq:failure:2020-01-06"},
}
for _, tc := range tests {
got := FailedKey(tc.qname, tc.input)
got := FailureKey(tc.input)
if got != tc.want {
t.Errorf("FailureKey(%v) = %q, want %q", tc.input, got, tc.want)
}
@@ -179,8 +74,8 @@ func TestServerInfoKey(t *testing.T) {
sid string
want string
}{
{"localhost", 9876, "server123", "asynq:servers:{localhost:9876:server123}"},
{"127.0.0.1", 1234, "server987", "asynq:servers:{127.0.0.1:1234:server987}"},
{"localhost", 9876, "server123", "asynq:servers:localhost:9876:server123"},
{"127.0.0.1", 1234, "server987", "asynq:servers:127.0.0.1:1234:server987"},
}
for _, tc := range tests {
@@ -199,8 +94,8 @@ func TestWorkersKey(t *testing.T) {
sid string
want string
}{
{"localhost", 9876, "server1", "asynq:workers:{localhost:9876:server1}"},
{"127.0.0.1", 1234, "server2", "asynq:workers:{127.0.0.1:1234:server2}"},
{"localhost", 9876, "server1", "asynq:workers:localhost:9876:server1"},
{"127.0.0.1", 1234, "server2", "asynq:workers:127.0.0.1:1234:server2"},
}
for _, tc := range tests {
@@ -212,98 +107,6 @@ func TestWorkersKey(t *testing.T) {
}
}
func TestSchedulerEntriesKey(t *testing.T) {
tests := []struct {
schedulerID string
want string
}{
{"localhost:9876:scheduler123", "asynq:schedulers:{localhost:9876:scheduler123}"},
{"127.0.0.1:1234:scheduler987", "asynq:schedulers:{127.0.0.1:1234:scheduler987}"},
}
for _, tc := range tests {
got := SchedulerEntriesKey(tc.schedulerID)
if got != tc.want {
t.Errorf("SchedulerEntriesKey(%q) = %q, want %q", tc.schedulerID, got, tc.want)
}
}
}
func TestSchedulerHistoryKey(t *testing.T) {
tests := []struct {
entryID string
want string
}{
{"entry876", "asynq:scheduler_history:entry876"},
{"entry345", "asynq:scheduler_history:entry345"},
}
for _, tc := range tests {
got := SchedulerHistoryKey(tc.entryID)
if got != tc.want {
t.Errorf("SchedulerHistoryKey(%q) = %q, want %q",
tc.entryID, got, tc.want)
}
}
}
func TestUniqueKey(t *testing.T) {
tests := []struct {
desc string
qname string
tasktype string
payload map[string]interface{}
want string
}{
{
"with primitive types",
"default",
"email:send",
map[string]interface{}{"a": 123, "b": "hello", "c": true},
"asynq:{default}:unique:email:send:a=123,b=hello,c=true",
},
{
"with unsorted keys",
"default",
"email:send",
map[string]interface{}{"b": "hello", "c": true, "a": 123},
"asynq:{default}:unique:email:send:a=123,b=hello,c=true",
},
{
"with composite types",
"default",
"email:send",
map[string]interface{}{
"address": map[string]string{"line": "123 Main St", "city": "Boston", "state": "MA"},
"names": []string{"bob", "mike", "rob"}},
"asynq:{default}:unique:email:send:address=map[city:Boston line:123 Main St state:MA],names=[bob mike rob]",
},
{
"with complex types",
"default",
"email:send",
map[string]interface{}{
"time": time.Date(2020, time.July, 28, 0, 0, 0, 0, time.UTC),
"duration": time.Hour},
"asynq:{default}:unique:email:send:duration=1h0m0s,time=2020-07-28 00:00:00 +0000 UTC",
},
{
"with nil payload",
"default",
"reindex",
nil,
"asynq:{default}:unique:reindex:nil",
},
}
for _, tc := range tests {
got := UniqueKey(tc.qname, tc.tasktype, tc.payload)
if got != tc.want {
t.Errorf("%s: UniqueKey(%q, %q, %v) = %q, want %q", tc.desc, tc.qname, tc.tasktype, tc.payload, got, tc.want)
}
}
}
func TestMessageEncoding(t *testing.T) {
id := uuid.New()
tests := []struct {
@@ -363,14 +166,14 @@ func TestStatusConcurrentAccess(t *testing.T) {
go func() {
defer wg.Done()
status.Get()
_ = status.String()
status.String()
}()
wg.Add(1)
go func() {
defer wg.Done()
status.Set(StatusStopped)
_ = status.String()
status.String()
}()
wg.Wait()

View File

@@ -0,0 +1,41 @@
// Copyright 2020 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
package rdb
import (
"testing"
"github.com/go-redis/redis/v7"
h "github.com/hibiken/asynq/internal/asynqtest"
"github.com/hibiken/asynq/internal/base"
)
func BenchmarkDone(b *testing.B) {
r := redis.NewClient(&redis.Options{
Addr: "localhost:6379",
DB: 8,
})
h.FlushDB(b, r)
// populate in-progress queue with messages
var inProgress []*base.TaskMessage
for i := 0; i < 40; i++ {
inProgress = append(inProgress,
h.NewTaskMessage("send_email", map[string]interface{}{"subject": "hello", "recipient_id": 123}))
}
h.SeedInProgressQueue(b, r, inProgress)
rdb := NewRDB(r)
b.ResetTimer()
for n := 0; n < b.N; n++ {
b.StopTimer()
msg := h.NewTaskMessage("reindex", map[string]interface{}{"config": "path/to/config/file"})
r.LPush(base.InProgressQueue, h.MustMarshal(b, msg))
b.StartTimer()
rdb.Done(msg)
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -32,11 +32,11 @@ const statsTTL = 90 * 24 * time.Hour // 90 days
// RDB is a client interface to query and mutate task queues.
type RDB struct {
client redis.UniversalClient
client *redis.Client
}
// NewRDB returns a new instance of RDB.
func NewRDB(client redis.UniversalClient) *RDB {
func NewRDB(client *redis.Client) *RDB {
return &RDB{client}
}
@@ -45,10 +45,13 @@ func (r *RDB) Close() error {
return r.client.Close()
}
// Ping checks the connection with redis server.
func (r *RDB) Ping() error {
return r.client.Ping().Err()
}
// KEYS[1] -> asynq:queues:<qname>
// KEYS[2] -> asynq:queues
// ARGV[1] -> task message data
var enqueueCmd = redis.NewScript(`
redis.call("LPUSH", KEYS[1], ARGV[1])
redis.call("SADD", KEYS[2], KEYS[1])
return 1`)
// Enqueue inserts the given task to the tail of the queue.
func (r *RDB) Enqueue(msg *base.TaskMessage) error {
@@ -56,15 +59,13 @@ func (r *RDB) Enqueue(msg *base.TaskMessage) error {
if err != nil {
return err
}
if err := r.client.SAdd(base.AllQueues, msg.Queue).Err(); err != nil {
return err
}
key := base.QueueKey(msg.Queue)
return r.client.LPush(key, encoded).Err()
return enqueueCmd.Run(r.client, []string{key, base.AllQueues}, encoded).Err()
}
// KEYS[1] -> unique key
// KEYS[2] -> asynq:{<qname>}
// KEYS[1] -> unique key in the form <type>:<payload>:<qname>
// KEYS[2] -> asynq:queues:<qname>
// KEYS[2] -> asynq:queues
// ARGV[1] -> task ID
// ARGV[2] -> uniqueness lock TTL
// ARGV[3] -> task message data
@@ -74,6 +75,7 @@ if not ok then
return 0
end
redis.call("LPUSH", KEYS[2], ARGV[3])
redis.call("SADD", KEYS[3], KEYS[2])
return 1
`)
@@ -84,11 +86,9 @@ func (r *RDB) EnqueueUnique(msg *base.TaskMessage, ttl time.Duration) error {
if err != nil {
return err
}
if err := r.client.SAdd(base.AllQueues, msg.Queue).Err(); err != nil {
return err
}
key := base.QueueKey(msg.Queue)
res, err := enqueueUniqueCmd.Run(r.client,
[]string{msg.UniqueKey, base.QueueKey(msg.Queue)},
[]string{msg.UniqueKey, key, base.AllQueues},
msg.ID.String(), int(ttl.Seconds()), encoded).Result()
if err != nil {
return err
@@ -108,7 +108,14 @@ func (r *RDB) EnqueueUnique(msg *base.TaskMessage, ttl time.Duration) error {
// Dequeue skips a queue if the queue is paused.
// If all queues are empty, ErrNoProcessableTask error is returned.
func (r *RDB) Dequeue(qnames ...string) (msg *base.TaskMessage, deadline time.Time, err error) {
data, d, err := r.dequeue(qnames...)
var qkeys []interface{}
for _, q := range qnames {
qkeys = append(qkeys, base.QueueKey(q))
}
data, d, err := r.dequeue(qkeys...)
if err == redis.Nil {
return nil, time.Time{}, ErrNoProcessableTask
}
if err != nil {
return nil, time.Time{}, err
}
@@ -118,19 +125,21 @@ func (r *RDB) Dequeue(qnames ...string) (msg *base.TaskMessage, deadline time.Ti
return msg, time.Unix(d, 0), nil
}
// KEYS[1] -> asynq:{<qname>}
// KEYS[2] -> asynq:{<qname>}:paused
// KEYS[3] -> asynq:{<qname>}:active
// KEYS[4] -> asynq:{<qname>}:deadlines
// KEYS[1] -> asynq:in_progress
// KEYS[2] -> asynq:paused
// KEYS[3] -> asynq:deadlines
// ARGV[1] -> current time in Unix time
// ARGV[2:] -> List of queues to query in order
//
// dequeueCmd checks whether a queue is paused first, before
// calling RPOPLPUSH to pop a task from the queue.
// It computes the task deadline by inspecting Timout and Deadline fields,
// and inserts the task with deadlines set.
var dequeueCmd = redis.NewScript(`
if redis.call("EXISTS", KEYS[2]) == 0 then
local msg = redis.call("RPOPLPUSH", KEYS[1], KEYS[3])
for i = 2, table.getn(ARGV) do
local qkey = ARGV[i]
if redis.call("SISMEMBER", KEYS[2], qkey) == 0 then
local msg = redis.call("RPOPLPUSH", qkey, KEYS[1])
if msg then
local decoded = cjson.decode(msg)
local timeout = decoded["Timeout"]
@@ -145,24 +154,20 @@ if redis.call("EXISTS", KEYS[2]) == 0 then
else
return redis.error_reply("asynq internal error: both timeout and deadline are not set")
end
redis.call("ZADD", KEYS[4], score, msg)
redis.call("ZADD", KEYS[3], score, msg)
return {msg, score}
end
end
end
return nil`)
func (r *RDB) dequeue(qnames ...string) (msgjson string, deadline int64, err error) {
for _, qname := range qnames {
keys := []string{
base.QueueKey(qname),
base.PausedKey(qname),
base.ActiveKey(qname),
base.DeadlinesKey(qname),
}
res, err := dequeueCmd.Run(r.client, keys, time.Now().Unix()).Result()
if err == redis.Nil {
continue
} else if err != nil {
func (r *RDB) dequeue(qkeys ...interface{}) (msgjson string, deadline int64, err error) {
var args []interface{}
args = append(args, time.Now().Unix())
args = append(args, qkeys...)
res, err := dequeueCmd.Run(r.client,
[]string{base.InProgressQueue, base.PausedQueues, base.KeyDeadlines}, args...).Result()
if err != nil {
return "", 0, err
}
data, err := cast.ToSliceE(res)
@@ -180,14 +185,15 @@ func (r *RDB) dequeue(qnames ...string) (msgjson string, deadline int64, err err
}
return msgjson, deadline, nil
}
return "", 0, ErrNoProcessableTask
}
// KEYS[1] -> asynq:{<qname>}:active
// KEYS[2] -> asynq:{<qname>}:deadlines
// KEYS[3] -> asynq:{<qname>}:processed:<yyyy-mm-dd>
// KEYS[1] -> asynq:in_progress
// KEYS[2] -> asynq:deadlines
// KEYS[3] -> asynq:processed:<yyyy-mm-dd>
// KEYS[4] -> unique key in the format <type>:<payload>:<qname>
// ARGV[1] -> base.TaskMessage value
// ARGV[2] -> stats expiration timestamp
// ARGV[3] -> task ID
// Note: LREM count ZERO means "remove all elements equal to val"
var doneCmd = redis.NewScript(`
if redis.call("LREM", KEYS[1], 0, ARGV[1]) == 0 then
return redis.error_reply("NOT FOUND")
@@ -199,34 +205,13 @@ local n = redis.call("INCR", KEYS[3])
if tonumber(n) == 1 then
redis.call("EXPIREAT", KEYS[3], ARGV[2])
end
return redis.status_reply("OK")
`)
// KEYS[1] -> asynq:{<qname>}:active
// KEYS[2] -> asynq:{<qname>}:deadlines
// KEYS[3] -> asynq:{<qname>}:processed:<yyyy-mm-dd>
// KEYS[4] -> unique key
// ARGV[1] -> base.TaskMessage value
// ARGV[2] -> stats expiration timestamp
// ARGV[3] -> task ID
var doneUniqueCmd = redis.NewScript(`
if redis.call("LREM", KEYS[1], 0, ARGV[1]) == 0 then
return redis.error_reply("NOT FOUND")
end
if redis.call("ZREM", KEYS[2], ARGV[1]) == 0 then
return redis.error_reply("NOT FOUND")
end
local n = redis.call("INCR", KEYS[3])
if tonumber(n) == 1 then
redis.call("EXPIREAT", KEYS[3], ARGV[2])
end
if redis.call("GET", KEYS[4]) == ARGV[3] then
if string.len(KEYS[4]) > 0 and redis.call("GET", KEYS[4]) == ARGV[3] then
redis.call("DEL", KEYS[4])
end
return redis.status_reply("OK")
`)
// Done removes the task from active queue to mark the task as done.
// Done removes the task from in-progress queue to mark the task as done.
// It removes a uniqueness lock acquired by the task, if any.
func (r *RDB) Done(msg *base.TaskMessage) error {
encoded, err := base.EncodeMessage(msg)
@@ -234,24 +219,16 @@ func (r *RDB) Done(msg *base.TaskMessage) error {
return err
}
now := time.Now()
processedKey := base.ProcessedKey(now)
expireAt := now.Add(statsTTL)
keys := []string{
base.ActiveKey(msg.Queue),
base.DeadlinesKey(msg.Queue),
base.ProcessedKey(msg.Queue, now),
}
args := []interface{}{encoded, expireAt.Unix()}
if len(msg.UniqueKey) > 0 {
keys = append(keys, msg.UniqueKey)
args = append(args, msg.ID.String())
return doneUniqueCmd.Run(r.client, keys, args...).Err()
}
return doneCmd.Run(r.client, keys, args...).Err()
return doneCmd.Run(r.client,
[]string{base.InProgressQueue, base.KeyDeadlines, processedKey, msg.UniqueKey},
encoded, expireAt.Unix(), msg.ID.String()).Err()
}
// KEYS[1] -> asynq:{<qname>}:active
// KEYS[2] -> asynq:{<qname>}:deadlines
// KEYS[3] -> asynq:{<qname>}
// KEYS[1] -> asynq:in_progress
// KEYS[2] -> asynq:deadlines
// KEYS[3] -> asynq:queues:<qname>
// ARGV[1] -> base.TaskMessage value
// Note: Use RPUSH to push to the head of the queue.
var requeueCmd = redis.NewScript(`
@@ -264,42 +241,56 @@ end
redis.call("RPUSH", KEYS[3], ARGV[1])
return redis.status_reply("OK")`)
// Requeue moves the task from active queue to the specified queue.
// Requeue moves the task from in-progress queue to the specified queue.
func (r *RDB) Requeue(msg *base.TaskMessage) error {
encoded, err := base.EncodeMessage(msg)
if err != nil {
return err
}
return requeueCmd.Run(r.client,
[]string{base.ActiveKey(msg.Queue), base.DeadlinesKey(msg.Queue), base.QueueKey(msg.Queue)},
[]string{base.InProgressQueue, base.KeyDeadlines, base.QueueKey(msg.Queue)},
encoded).Err()
}
// KEYS[1] -> asynq:scheduled
// KEYS[2] -> asynq:queues
// ARGV[1] -> score (process_at timestamp)
// ARGV[2] -> task message
// ARGV[3] -> queue key
var scheduleCmd = redis.NewScript(`
redis.call("ZADD", KEYS[1], ARGV[1], ARGV[2])
redis.call("SADD", KEYS[2], ARGV[3])
return 1
`)
// Schedule adds the task to the backlog queue to be processed in the future.
func (r *RDB) Schedule(msg *base.TaskMessage, processAt time.Time) error {
encoded, err := base.EncodeMessage(msg)
if err != nil {
return err
}
if err := r.client.SAdd(base.AllQueues, msg.Queue).Err(); err != nil {
return err
}
qkey := base.QueueKey(msg.Queue)
score := float64(processAt.Unix())
return r.client.ZAdd(base.ScheduledKey(msg.Queue), &redis.Z{Score: score, Member: encoded}).Err()
return scheduleCmd.Run(r.client,
[]string{base.ScheduledQueue, base.AllQueues},
score, encoded, qkey).Err()
}
// KEYS[1] -> unique key
// KEYS[2] -> asynq:{<qname>}:scheduled
// KEYS[1] -> unique key in the format <type>:<payload>:<qname>
// KEYS[2] -> asynq:scheduled
// KEYS[3] -> asynq:queues
// ARGV[1] -> task ID
// ARGV[2] -> uniqueness lock TTL
// ARGV[3] -> score (process_at timestamp)
// ARGV[4] -> task message
// ARGV[5] -> queue key
var scheduleUniqueCmd = redis.NewScript(`
local ok = redis.call("SET", KEYS[1], ARGV[1], "NX", "EX", ARGV[2])
if not ok then
return 0
end
redis.call("ZADD", KEYS[2], ARGV[3], ARGV[4])
redis.call("SADD", KEYS[3], ARGV[5])
return 1
`)
@@ -310,13 +301,11 @@ func (r *RDB) ScheduleUnique(msg *base.TaskMessage, processAt time.Time, ttl tim
if err != nil {
return err
}
if err := r.client.SAdd(base.AllQueues, msg.Queue).Err(); err != nil {
return err
}
qkey := base.QueueKey(msg.Queue)
score := float64(processAt.Unix())
res, err := scheduleUniqueCmd.Run(r.client,
[]string{msg.UniqueKey, base.ScheduledKey(msg.Queue)},
msg.ID.String(), int(ttl.Seconds()), score, encoded).Result()
[]string{msg.UniqueKey, base.ScheduledQueue, base.AllQueues},
msg.ID.String(), int(ttl.Seconds()), score, encoded, qkey).Result()
if err != nil {
return err
}
@@ -330,12 +319,12 @@ func (r *RDB) ScheduleUnique(msg *base.TaskMessage, processAt time.Time, ttl tim
return nil
}
// KEYS[1] -> asynq:{<qname>}:active
// KEYS[2] -> asynq:{<qname>}:deadlines
// KEYS[3] -> asynq:{<qname>}:retry
// KEYS[4] -> asynq:{<qname>}:processed:<yyyy-mm-dd>
// KEYS[5] -> asynq:{<qname>}:failed:<yyyy-mm-dd>
// ARGV[1] -> base.TaskMessage value to remove from base.ActiveQueue queue
// KEYS[1] -> asynq:in_progress
// KEYS[2] -> asynq:deadlines
// KEYS[3] -> asynq:retry
// KEYS[4] -> asynq:processed:<yyyy-mm-dd>
// KEYS[5] -> asynq:failure:<yyyy-mm-dd>
// ARGV[1] -> base.TaskMessage value to remove from base.InProgressQueue queue
// ARGV[2] -> base.TaskMessage value to add to Retry queue
// ARGV[3] -> retry_at UNIX timestamp
// ARGV[4] -> stats expiration timestamp
@@ -357,7 +346,7 @@ if tonumber(m) == 1 then
end
return redis.status_reply("OK")`)
// Retry moves the task from active to retry queue, incrementing retry count
// Retry moves the task from in-progress to retry queue, incrementing retry count
// and assigning error message to the task message.
func (r *RDB) Retry(msg *base.TaskMessage, processAt time.Time, errMsg string) error {
msgToRemove, err := base.EncodeMessage(msg)
@@ -372,31 +361,31 @@ func (r *RDB) Retry(msg *base.TaskMessage, processAt time.Time, errMsg string) e
return err
}
now := time.Now()
processedKey := base.ProcessedKey(msg.Queue, now)
failedKey := base.FailedKey(msg.Queue, now)
processedKey := base.ProcessedKey(now)
failureKey := base.FailureKey(now)
expireAt := now.Add(statsTTL)
return retryCmd.Run(r.client,
[]string{base.ActiveKey(msg.Queue), base.DeadlinesKey(msg.Queue), base.RetryKey(msg.Queue), processedKey, failedKey},
[]string{base.InProgressQueue, base.KeyDeadlines, base.RetryQueue, processedKey, failureKey},
msgToRemove, msgToAdd, processAt.Unix(), expireAt.Unix()).Err()
}
const (
maxArchiveSize = 10000 // maximum number of tasks in archive
archivedExpirationInDays = 90 // number of days before an archived task gets deleted permanently
maxDeadTasks = 10000
deadExpirationInDays = 90
)
// KEYS[1] -> asynq:{<qname>}:active
// KEYS[2] -> asynq:{<qname>}:deadlines
// KEYS[3] -> asynq:{<qname>}:archived
// KEYS[4] -> asynq:{<qname>}:processed:<yyyy-mm-dd>
// KEYS[5] -> asynq:{<qname>}:failed:<yyyy-mm-dd>
// ARGV[1] -> base.TaskMessage value to remove
// ARGV[2] -> base.TaskMessage value to add
// KEYS[1] -> asynq:in_progress
// KEYS[2] -> asynq:deadlines
// KEYS[3] -> asynq:dead
// KEYS[4] -> asynq:processed:<yyyy-mm-dd>
// KEYS[5] -> asynq.failure:<yyyy-mm-dd>
// ARGV[1] -> base.TaskMessage value to remove from base.InProgressQueue queue
// ARGV[2] -> base.TaskMessage value to add to Dead queue
// ARGV[3] -> died_at UNIX timestamp
// ARGV[4] -> cutoff timestamp (e.g., 90 days ago)
// ARGV[5] -> max number of tasks in archive (e.g., 100)
// ARGV[5] -> max number of tasks in dead queue (e.g., 100)
// ARGV[6] -> stats expiration timestamp
var archiveCmd = redis.NewScript(`
var killCmd = redis.NewScript(`
if redis.call("LREM", KEYS[1], 0, ARGV[1]) == 0 then
return redis.error_reply("NOT FOUND")
end
@@ -416,9 +405,10 @@ if tonumber(m) == 1 then
end
return redis.status_reply("OK")`)
// Archive sends the given task to archive, attaching the error message to the task.
// It also trims the archive by timestamp and set size.
func (r *RDB) Archive(msg *base.TaskMessage, errMsg string) error {
// Kill sends the task to "dead" queue from in-progress queue, assigning
// the error message to the task.
// It also trims the set by timestamp and set size.
func (r *RDB) Kill(msg *base.TaskMessage, errMsg string) error {
msgToRemove, err := base.EncodeMessage(msg)
if err != nil {
return err
@@ -430,74 +420,65 @@ func (r *RDB) Archive(msg *base.TaskMessage, errMsg string) error {
return err
}
now := time.Now()
limit := now.AddDate(0, 0, -archivedExpirationInDays).Unix() // 90 days ago
processedKey := base.ProcessedKey(msg.Queue, now)
failedKey := base.FailedKey(msg.Queue, now)
limit := now.AddDate(0, 0, -deadExpirationInDays).Unix() // 90 days ago
processedKey := base.ProcessedKey(now)
failureKey := base.FailureKey(now)
expireAt := now.Add(statsTTL)
return archiveCmd.Run(r.client,
[]string{base.ActiveKey(msg.Queue), base.DeadlinesKey(msg.Queue), base.ArchivedKey(msg.Queue), processedKey, failedKey},
msgToRemove, msgToAdd, now.Unix(), limit, maxArchiveSize, expireAt.Unix()).Err()
return killCmd.Run(r.client,
[]string{base.InProgressQueue, base.KeyDeadlines, base.DeadQueue, processedKey, failureKey},
msgToRemove, msgToAdd, now.Unix(), limit, maxDeadTasks, expireAt.Unix()).Err()
}
// CheckAndEnqueue checks for scheduled/retry tasks for the given queues
//and enqueues any tasks that are ready to be processed.
func (r *RDB) CheckAndEnqueue(qnames ...string) error {
for _, qname := range qnames {
if err := r.forwardAll(base.ScheduledKey(qname), base.QueueKey(qname)); err != nil {
// CheckAndEnqueue checks for all scheduled/retry tasks and enqueues any tasks that
// are ready to be processed.
func (r *RDB) CheckAndEnqueue() (err error) {
delayed := []string{base.ScheduledQueue, base.RetryQueue}
for _, zset := range delayed {
n := 1
for n != 0 {
n, err = r.forward(zset)
if err != nil {
return err
}
if err := r.forwardAll(base.RetryKey(qname), base.QueueKey(qname)); err != nil {
return err
}
}
return nil
}
// KEYS[1] -> source queue (e.g. asynq:{<qname>:scheduled or asynq:{<qname>}:retry})
// KEYS[2] -> destination queue (e.g. asynq:{<qname>})
// KEYS[1] -> source queue (e.g. scheduled or retry queue)
// ARGV[1] -> current unix time
// ARGV[2] -> queue prefix
// Note: Script moves tasks up to 100 at a time to keep the runtime of script short.
var forwardCmd = redis.NewScript(`
local msgs = redis.call("ZRANGEBYSCORE", KEYS[1], "-inf", ARGV[1], "LIMIT", 0, 100)
for _, msg in ipairs(msgs) do
redis.call("LPUSH", KEYS[2], msg)
local decoded = cjson.decode(msg)
local qkey = ARGV[2] .. decoded["Queue"]
redis.call("LPUSH", qkey, msg)
redis.call("ZREM", KEYS[1], msg)
end
return table.getn(msgs)`)
// forward moves tasks with a score less than the current unix time
// from the src zset to the dst list. It returns the number of tasks moved.
func (r *RDB) forward(src, dst string) (int, error) {
// from the src zset. It returns the number of tasks moved.
func (r *RDB) forward(src string) (int, error) {
now := float64(time.Now().Unix())
res, err := forwardCmd.Run(r.client, []string{src, dst}, now).Result()
res, err := forwardCmd.Run(r.client,
[]string{src}, now, base.QueuePrefix).Result()
if err != nil {
return 0, err
}
return cast.ToInt(res), nil
}
// forwardAll moves tasks with a score less than the current unix time from the src zset,
// until there's no more tasks.
func (r *RDB) forwardAll(src, dst string) (err error) {
n := 1
for n != 0 {
n, err = r.forward(src, dst)
if err != nil {
return err
}
}
return nil
}
// ListDeadlineExceeded returns a list of task messages that have exceeded the deadline from the given queues.
func (r *RDB) ListDeadlineExceeded(deadline time.Time, qnames ...string) ([]*base.TaskMessage, error) {
// ListDeadlineExceeded returns a list of task messages that have exceeded the given deadline.
func (r *RDB) ListDeadlineExceeded(deadline time.Time) ([]*base.TaskMessage, error) {
var msgs []*base.TaskMessage
opt := &redis.ZRangeBy{
Min: "-inf",
Max: strconv.FormatInt(deadline.Unix(), 10),
}
for _, qname := range qnames {
res, err := r.client.ZRangeByScore(base.DeadlinesKey(qname), opt).Result()
res, err := r.client.ZRangeByScore(base.KeyDeadlines, opt).Result()
if err != nil {
return nil, err
}
@@ -508,24 +489,28 @@ func (r *RDB) ListDeadlineExceeded(deadline time.Time, qnames ...string) ([]*bas
}
msgs = append(msgs, msg)
}
}
return msgs, nil
}
// KEYS[1] -> asynq:servers:{<host:pid:sid>}
// KEYS[2] -> asynq:workers:{<host:pid:sid>}
// ARGV[1] -> TTL in seconds
// ARGV[2] -> server info
// ARGV[3:] -> alternate key-value pair of (worker id, worker data)
// KEYS[1] -> asynq:servers:<host:pid:sid>
// KEYS[2] -> asynq:servers
// KEYS[3] -> asynq:workers<host:pid:sid>
// KEYS[4] -> asynq:workers
// ARGV[1] -> expiration time
// ARGV[2] -> TTL in seconds
// ARGV[3] -> server info
// ARGV[4:] -> alternate key-value pair of (worker id, worker data)
// Note: Add key to ZSET with expiration time as score.
// ref: https://github.com/antirez/redis/issues/135#issuecomment-2361996
var writeServerStateCmd = redis.NewScript(`
redis.call("SETEX", KEYS[1], ARGV[1], ARGV[2])
redis.call("DEL", KEYS[2])
for i = 3, table.getn(ARGV)-1, 2 do
redis.call("HSET", KEYS[2], ARGV[i], ARGV[i+1])
redis.call("SETEX", KEYS[1], ARGV[2], ARGV[3])
redis.call("ZADD", KEYS[2], ARGV[1], KEYS[1])
redis.call("DEL", KEYS[3])
for i = 4, table.getn(ARGV)-1, 2 do
redis.call("HSET", KEYS[3], ARGV[i], ARGV[i+1])
end
redis.call("EXPIRE", KEYS[2], ARGV[1])
redis.call("EXPIRE", KEYS[3], ARGV[2])
redis.call("ZADD", KEYS[4], ARGV[1], KEYS[3])
return redis.status_reply("OK")`)
// WriteServerState writes server state data to redis with expiration set to the value ttl.
@@ -535,7 +520,7 @@ func (r *RDB) WriteServerState(info *base.ServerInfo, workers []*base.WorkerInfo
return err
}
exp := time.Now().Add(ttl).UTC()
args := []interface{}{ttl.Seconds(), bytes} // args to the lua script
args := []interface{}{float64(exp.Unix()), ttl.Seconds(), bytes} // args to the lua script
for _, w := range workers {
bytes, err := json.Marshal(w)
if err != nil {
@@ -545,72 +530,28 @@ func (r *RDB) WriteServerState(info *base.ServerInfo, workers []*base.WorkerInfo
}
skey := base.ServerInfoKey(info.Host, info.PID, info.ServerID)
wkey := base.WorkersKey(info.Host, info.PID, info.ServerID)
if err := r.client.ZAdd(base.AllServers, &redis.Z{Score: float64(exp.Unix()), Member: skey}).Err(); err != nil {
return err
}
if err := r.client.ZAdd(base.AllWorkers, &redis.Z{Score: float64(exp.Unix()), Member: wkey}).Err(); err != nil {
return err
}
return writeServerStateCmd.Run(r.client, []string{skey, wkey}, args...).Err()
return writeServerStateCmd.Run(r.client,
[]string{skey, base.AllServers, wkey, base.AllWorkers},
args...).Err()
}
// KEYS[1] -> asynq:servers:{<host:pid:sid>}
// KEYS[2] -> asynq:workers:{<host:pid:sid>}
// KEYS[1] -> asynq:servers
// KEYS[2] -> asynq:servers:<host:pid:sid>
// KEYS[3] -> asynq:workers
// KEYS[4] -> asynq:workers<host:pid:sid>
var clearServerStateCmd = redis.NewScript(`
redis.call("DEL", KEYS[1])
redis.call("ZREM", KEYS[1], KEYS[2])
redis.call("DEL", KEYS[2])
redis.call("ZREM", KEYS[3], KEYS[4])
redis.call("DEL", KEYS[4])
return redis.status_reply("OK")`)
// ClearServerState deletes server state data from redis.
func (r *RDB) ClearServerState(host string, pid int, serverID string) error {
skey := base.ServerInfoKey(host, pid, serverID)
wkey := base.WorkersKey(host, pid, serverID)
if err := r.client.ZRem(base.AllServers, skey).Err(); err != nil {
return err
}
if err := r.client.ZRem(base.AllWorkers, wkey).Err(); err != nil {
return err
}
return clearServerStateCmd.Run(r.client, []string{skey, wkey}).Err()
}
// KEYS[1] -> asynq:schedulers:{<schedulerID>}
// ARGV[1] -> TTL in seconds
// ARGV[2:] -> schedler entries
var writeSchedulerEntriesCmd = redis.NewScript(`
redis.call("DEL", KEYS[1])
for i = 2, #ARGV do
redis.call("LPUSH", KEYS[1], ARGV[i])
end
redis.call("EXPIRE", KEYS[1], ARGV[1])
return redis.status_reply("OK")`)
// WriteSchedulerEntries writes scheduler entries data to redis with expiration set to the value ttl.
func (r *RDB) WriteSchedulerEntries(schedulerID string, entries []*base.SchedulerEntry, ttl time.Duration) error {
args := []interface{}{ttl.Seconds()}
for _, e := range entries {
bytes, err := json.Marshal(e)
if err != nil {
continue // skip bad data
}
args = append(args, bytes)
}
exp := time.Now().Add(ttl).UTC()
key := base.SchedulerEntriesKey(schedulerID)
err := r.client.ZAdd(base.AllSchedulers, &redis.Z{Score: float64(exp.Unix()), Member: key}).Err()
if err != nil {
return err
}
return writeSchedulerEntriesCmd.Run(r.client, []string{key}, args...).Err()
}
// ClearSchedulerEntries deletes scheduler entries data from redis.
func (r *RDB) ClearSchedulerEntries(scheduelrID string) error {
key := base.SchedulerEntriesKey(scheduelrID)
if err := r.client.ZRem(base.AllSchedulers, key).Err(); err != nil {
return err
}
return r.client.Del(key).Err()
return clearServerStateCmd.Run(r.client,
[]string{base.AllServers, skey, base.AllWorkers, wkey}).Err()
}
// CancelationPubSub returns a pubsub for cancelation messages.
@@ -628,32 +569,3 @@ func (r *RDB) CancelationPubSub() (*redis.PubSub, error) {
func (r *RDB) PublishCancelation(id string) error {
return r.client.Publish(base.CancelChannel, id).Err()
}
// KEYS[1] -> asynq:scheduler_history:<entryID>
// ARGV[1] -> enqueued_at timestamp
// ARGV[2] -> serialized SchedulerEnqueueEvent data
// ARGV[3] -> max number of events to be persisted
var recordSchedulerEnqueueEventCmd = redis.NewScript(`
redis.call("ZREMRANGEBYRANK", KEYS[1], 0, -ARGV[3])
redis.call("ZADD", KEYS[1], ARGV[1], ARGV[2])
return redis.status_reply("OK")`)
// Maximum number of enqueue events to store per entry.
const maxEvents = 1000
// RecordSchedulerEnqueueEvent records the time when the given task was enqueued.
func (r *RDB) RecordSchedulerEnqueueEvent(entryID string, event *base.SchedulerEnqueueEvent) error {
key := base.SchedulerHistoryKey(entryID)
data, err := json.Marshal(event)
if err != nil {
return err
}
return recordSchedulerEnqueueEventCmd.Run(
r.client, []string{key}, event.EnqueuedAt.Unix(), data, maxEvents).Err()
}
// ClearSchedulerHistory deletes the enqueue event history for the given scheduler entry.
func (r *RDB) ClearSchedulerHistory(entryID string) error {
key := base.SchedulerHistoryKey(entryID)
return r.client.Del(key).Err()
}

File diff suppressed because it is too large Load Diff

View File

@@ -117,31 +117,31 @@ func (tb *TestBroker) Retry(msg *base.TaskMessage, processAt time.Time, errMsg s
return tb.real.Retry(msg, processAt, errMsg)
}
func (tb *TestBroker) Archive(msg *base.TaskMessage, errMsg string) error {
func (tb *TestBroker) Kill(msg *base.TaskMessage, errMsg string) error {
tb.mu.Lock()
defer tb.mu.Unlock()
if tb.sleeping {
return errRedisDown
}
return tb.real.Archive(msg, errMsg)
return tb.real.Kill(msg, errMsg)
}
func (tb *TestBroker) CheckAndEnqueue(qnames ...string) error {
func (tb *TestBroker) CheckAndEnqueue() error {
tb.mu.Lock()
defer tb.mu.Unlock()
if tb.sleeping {
return errRedisDown
}
return tb.real.CheckAndEnqueue(qnames...)
return tb.real.CheckAndEnqueue()
}
func (tb *TestBroker) ListDeadlineExceeded(deadline time.Time, qnames ...string) ([]*base.TaskMessage, error) {
func (tb *TestBroker) ListDeadlineExceeded(deadline time.Time) ([]*base.TaskMessage, error) {
tb.mu.Lock()
defer tb.mu.Unlock()
if tb.sleeping {
return nil, errRedisDown
}
return tb.real.ListDeadlineExceeded(deadline, qnames...)
return tb.real.ListDeadlineExceeded(deadline)
}
func (tb *TestBroker) WriteServerState(info *base.ServerInfo, workers []*base.WorkerInfo, ttl time.Duration) error {
@@ -180,15 +180,6 @@ func (tb *TestBroker) PublishCancelation(id string) error {
return tb.real.PublishCancelation(id)
}
func (tb *TestBroker) Ping() error {
tb.mu.Lock()
defer tb.mu.Unlock()
if tb.sleeping {
return errRedisDown
}
return tb.real.Ping()
}
func (tb *TestBroker) Close() error {
tb.mu.Lock()
defer tb.mu.Unlock()

View File

@@ -44,16 +44,6 @@ func toInt(v interface{}) (int, error) {
}
}
// String returns a string representation of payload data.
func (p Payload) String() string {
return fmt.Sprint(p.data)
}
// MarshalJSON returns the JSON encoding of payload data.
func (p Payload) MarshalJSON() ([]byte, error) {
return json.Marshal(p.data)
}
// GetString returns a string value if a string type is associated with
// the key, otherwise reports an error.
func (p Payload) GetString(key string) (string, error) {

View File

@@ -6,7 +6,6 @@ package asynq
import (
"encoding/json"
"fmt"
"testing"
"time"
@@ -646,30 +645,3 @@ func TestPayloadHas(t *testing.T) {
t.Errorf("Payload.Has(%q) = true, want false", "name")
}
}
func TestPayloadDebuggingStrings(t *testing.T) {
data := map[string]interface{}{
"foo": 123,
"bar": "hello",
"baz": false,
}
payload := Payload{data: data}
if payload.String() != fmt.Sprint(data) {
t.Errorf("Payload.String() = %q, want %q",
payload.String(), fmt.Sprint(data))
}
got, err := payload.MarshalJSON()
if err != nil {
t.Fatal(err)
}
want, err := json.Marshal(data)
if err != nil {
t.Fatal(err)
}
if diff := cmp.Diff(got, want); diff != "" {
t.Errorf("Payload.MarhsalJSON() = %s, want %s; (-want,+got)\n%s",
got, want, diff)
}
}

View File

@@ -6,13 +6,9 @@ package asynq
import (
"context"
"errors"
"fmt"
"math/rand"
"runtime"
"runtime/debug"
"sort"
"strings"
"sync"
"time"
@@ -33,7 +29,7 @@ type processor struct {
// orderedQueues is set only in strict-priority mode.
orderedQueues []string
retryDelayFunc RetryDelayFunc
retryDelayFunc retryDelayFunc
errHandler ErrorHandler
@@ -60,17 +56,19 @@ type processor struct {
// abort channel communicates to the in-flight worker goroutines to stop.
abort chan struct{}
// cancelations is a set of cancel functions for all active tasks.
// cancelations is a set of cancel functions for all in-progress tasks.
cancelations *base.Cancelations
starting chan<- *workerInfo
starting chan<- *base.TaskMessage
finished chan<- *base.TaskMessage
}
type retryDelayFunc func(n int, err error, task *Task) time.Duration
type processorParams struct {
logger *log.Logger
broker base.Broker
retryDelayFunc RetryDelayFunc
retryDelayFunc retryDelayFunc
syncCh chan<- *syncRequest
cancelations *base.Cancelations
concurrency int
@@ -78,7 +76,7 @@ type processorParams struct {
strictPriority bool
errHandler ErrorHandler
shutdownTimeout time.Duration
starting chan<- *workerInfo
starting chan<- *base.TaskMessage
finished chan<- *base.TaskMessage
}
@@ -104,7 +102,6 @@ func newProcessor(params processorParams) *processor {
abort: make(chan struct{}),
errHandler: params.errHandler,
handler: HandlerFunc(func(ctx context.Context, t *Task) error { return fmt.Errorf("handler not set") }),
shutdownTimeout: params.shutdownTimeout,
starting: params.starting,
finished: params.finished,
}
@@ -180,7 +177,7 @@ func (p *processor) exec() {
return
}
p.starting <- &workerInfo{msg, time.Now(), deadline}
p.starting <- msg
go func() {
defer func() {
p.finished <- msg
@@ -194,19 +191,9 @@ func (p *processor) exec() {
p.cancelations.Delete(msg.ID.String())
}()
// check context before starting a worker goroutine.
select {
case <-ctx.Done():
// already canceled (e.g. deadline exceeded).
p.retryOrKill(ctx, msg, ctx.Err())
return
default:
}
resCh := make(chan error, 1)
go func() {
resCh <- p.perform(ctx, NewTask(msg.Type, msg.Payload))
}()
task := NewTask(msg.Type, msg.Payload)
go func() { resCh <- perform(ctx, task, p.handler) }()
select {
case <-p.abort:
@@ -215,14 +202,18 @@ func (p *processor) exec() {
p.requeue(msg)
return
case <-ctx.Done():
p.logger.Debugf("Retrying task. task id=%s", msg.ID) // TODO: Improve this log message and above
p.retryOrKill(ctx, msg, ctx.Err())
return
case resErr := <-resCh:
// Note: One of three things should happen.
// 1) Done -> Removes the message from Active
// 2) Retry -> Removes the message from Active & Adds the message to Retry
// 3) Archive -> Removes the message from Active & Adds the message to archive
// 1) Done -> Removes the message from InProgress
// 2) Retry -> Removes the message from InProgress & Adds the message to Retry
// 3) Kill -> Removes the message from InProgress & Adds the message to Dead
if resErr != nil {
if p.errHandler != nil {
p.errHandler.HandleError(ctx, task, resErr)
}
p.retryOrKill(ctx, msg, resErr)
return
}
@@ -244,7 +235,7 @@ func (p *processor) requeue(msg *base.TaskMessage) {
func (p *processor) markAsDone(ctx context.Context, msg *base.TaskMessage) {
err := p.broker.Done(msg)
if err != nil {
errMsg := fmt.Sprintf("Could not remove task id=%s type=%q from %q err: %+v", msg.ID, msg.Type, base.ActiveKey(msg.Queue), err)
errMsg := fmt.Sprintf("Could not remove task id=%s type=%q from %q err: %+v", msg.ID, msg.Type, base.InProgressQueue, err)
deadline, ok := ctx.Deadline()
if !ok {
panic("asynq: internal error: missing deadline in context")
@@ -260,17 +251,9 @@ func (p *processor) markAsDone(ctx context.Context, msg *base.TaskMessage) {
}
}
// SkipRetry is used as a return value from Handler.ProcessTask to indicate that
// the task should not be retried and should be archived instead.
var SkipRetry = errors.New("skip retry for the task")
func (p *processor) retryOrKill(ctx context.Context, msg *base.TaskMessage, err error) {
if p.errHandler != nil {
p.errHandler.HandleError(ctx, NewTask(msg.Type, msg.Payload), err)
}
if msg.Retried >= msg.Retry || errors.Is(err, SkipRetry) {
p.logger.Warnf("Retry exhausted for task id=%s", msg.ID)
p.archive(ctx, msg, err)
if msg.Retried >= msg.Retry {
p.kill(ctx, msg, err)
} else {
p.retry(ctx, msg, err)
}
@@ -281,7 +264,7 @@ func (p *processor) retry(ctx context.Context, msg *base.TaskMessage, e error) {
retryAt := time.Now().Add(d)
err := p.broker.Retry(msg, retryAt, e.Error())
if err != nil {
errMsg := fmt.Sprintf("Could not move task id=%s from %q to %q", msg.ID, base.ActiveKey(msg.Queue), base.RetryKey(msg.Queue))
errMsg := fmt.Sprintf("Could not move task id=%s from %q to %q", msg.ID, base.InProgressQueue, base.RetryQueue)
deadline, ok := ctx.Deadline()
if !ok {
panic("asynq: internal error: missing deadline in context")
@@ -297,10 +280,11 @@ func (p *processor) retry(ctx context.Context, msg *base.TaskMessage, e error) {
}
}
func (p *processor) archive(ctx context.Context, msg *base.TaskMessage, e error) {
err := p.broker.Archive(msg, e.Error())
func (p *processor) kill(ctx context.Context, msg *base.TaskMessage, e error) {
p.logger.Warnf("Retry exhausted for task id=%s", msg.ID)
err := p.broker.Kill(msg, e.Error())
if err != nil {
errMsg := fmt.Sprintf("Could not move task id=%s from %q to %q", msg.ID, base.ActiveKey(msg.Queue), base.ArchivedKey(msg.Queue))
errMsg := fmt.Sprintf("Could not move task id=%s from %q to %q", msg.ID, base.InProgressQueue, base.DeadQueue)
deadline, ok := ctx.Deadline()
if !ok {
panic("asynq: internal error: missing deadline in context")
@@ -308,7 +292,7 @@ func (p *processor) archive(ctx context.Context, msg *base.TaskMessage, e error)
p.logger.Warnf("%s; Will retry syncing", errMsg)
p.syncRequestCh <- &syncRequest{
fn: func() error {
return p.broker.Archive(msg, e.Error())
return p.broker.Kill(msg, e.Error())
},
errMsg: errMsg,
deadline: deadline,
@@ -346,26 +330,13 @@ func (p *processor) queues() []string {
// perform calls the handler with the given task.
// If the call returns without panic, it simply returns the value,
// otherwise, it recovers from panic and returns an error.
func (p *processor) perform(ctx context.Context, task *Task) (err error) {
func perform(ctx context.Context, task *Task, h Handler) (err error) {
defer func() {
if x := recover(); x != nil {
p.logger.Errorf("recovering from panic. See the stack trace below for details:\n%s", string(debug.Stack()))
_, file, line, ok := runtime.Caller(1) // skip the first frame (panic itself)
if ok && strings.Contains(file, "runtime/") {
// The panic came from the runtime, most likely due to incorrect
// map/slice usage. The parent frame should have the real trigger.
_, file, line, ok = runtime.Caller(2)
}
// Include the file and line number info in the error, if runtime.Caller returned ok.
if ok {
err = fmt.Errorf("panic [%s:%d]: %v", file, line, x)
} else {
err = fmt.Errorf("panic: %v", x)
}
}
}()
return p.handler.ProcessTask(ctx, task)
return h.ProcessTask(ctx, task)
}
// uniq dedupes elements and returns a slice of unique names of length l.

View File

@@ -20,7 +20,7 @@ import (
)
// fakeHeartbeater receives from starting and finished channels and do nothing.
func fakeHeartbeater(starting <-chan *workerInfo, finished <-chan *base.TaskMessage, done <-chan struct{}) {
func fakeHeartbeater(starting, finished <-chan *base.TaskMessage, done <-chan struct{}) {
for {
select {
case <-starting:
@@ -42,14 +42,14 @@ func fakeSyncer(syncCh <-chan *syncRequest, done <-chan struct{}) {
}
}
func TestProcessorSuccessWithSingleQueue(t *testing.T) {
func TestProcessorSuccess(t *testing.T) {
r := setup(t)
rdbClient := rdb.NewRDB(r)
m1 := h.NewTaskMessage("task1", nil)
m2 := h.NewTaskMessage("task2", nil)
m3 := h.NewTaskMessage("task3", nil)
m4 := h.NewTaskMessage("task4", nil)
m1 := h.NewTaskMessage("send_email", nil)
m2 := h.NewTaskMessage("gen_thumbnail", nil)
m3 := h.NewTaskMessage("reindex", nil)
m4 := h.NewTaskMessage("sync", nil)
t1 := NewTask(m1.Type, m1.Payload)
t2 := NewTask(m2.Type, m2.Payload)
@@ -57,17 +57,17 @@ func TestProcessorSuccessWithSingleQueue(t *testing.T) {
t4 := NewTask(m4.Type, m4.Payload)
tests := []struct {
pending []*base.TaskMessage // initial default queue state
enqueued []*base.TaskMessage // initial default queue state
incoming []*base.TaskMessage // tasks to be enqueued during run
wantProcessed []*Task // tasks to be processed at the end
}{
{
pending: []*base.TaskMessage{m1},
enqueued: []*base.TaskMessage{m1},
incoming: []*base.TaskMessage{m2, m3, m4},
wantProcessed: []*Task{t1, t2, t3, t4},
},
{
pending: []*base.TaskMessage{},
enqueued: []*base.TaskMessage{},
incoming: []*base.TaskMessage{m1},
wantProcessed: []*Task{t1},
},
@@ -75,7 +75,7 @@ func TestProcessorSuccessWithSingleQueue(t *testing.T) {
for _, tc := range tests {
h.FlushDB(t, r) // clean up db before each test case.
h.SeedPendingQueue(t, r, tc.pending, base.DefaultQueueName) // initialize default queue.
h.SeedEnqueuedQueue(t, r, tc.enqueued) // initialize default queue.
// instantiate a new processor
var mu sync.Mutex
@@ -86,7 +86,7 @@ func TestProcessorSuccessWithSingleQueue(t *testing.T) {
processed = append(processed, task)
return nil
}
starting := make(chan *workerInfo)
starting := make(chan *base.TaskMessage)
finished := make(chan *base.TaskMessage)
syncCh := make(chan *syncRequest)
done := make(chan struct{})
@@ -96,7 +96,7 @@ func TestProcessorSuccessWithSingleQueue(t *testing.T) {
p := newProcessor(processorParams{
logger: testLogger,
broker: rdbClient,
retryDelayFunc: DefaultRetryDelayFunc,
retryDelayFunc: defaultDelayFunc,
syncCh: syncCh,
cancelations: base.NewCancelations(),
concurrency: 10,
@@ -117,101 +117,9 @@ func TestProcessorSuccessWithSingleQueue(t *testing.T) {
t.Fatal(err)
}
}
time.Sleep(2 * time.Second) // wait for two second to allow all pending tasks to be processed.
if l := r.LLen(base.ActiveKey(base.DefaultQueueName)).Val(); l != 0 {
t.Errorf("%q has %d tasks, want 0", base.ActiveKey(base.DefaultQueueName), l)
}
p.terminate()
mu.Lock()
if diff := cmp.Diff(tc.wantProcessed, processed, sortTaskOpt, cmp.AllowUnexported(Payload{})); diff != "" {
t.Errorf("mismatch found in processed tasks; (-want, +got)\n%s", diff)
}
mu.Unlock()
}
}
func TestProcessorSuccessWithMultipleQueues(t *testing.T) {
var (
r = setup(t)
rdbClient = rdb.NewRDB(r)
m1 = h.NewTaskMessage("task1", nil)
m2 = h.NewTaskMessage("task2", nil)
m3 = h.NewTaskMessageWithQueue("task3", nil, "high")
m4 = h.NewTaskMessageWithQueue("task4", nil, "low")
t1 = NewTask(m1.Type, m1.Payload)
t2 = NewTask(m2.Type, m2.Payload)
t3 = NewTask(m3.Type, m3.Payload)
t4 = NewTask(m4.Type, m4.Payload)
)
tests := []struct {
pending map[string][]*base.TaskMessage
queues []string // list of queues to consume the tasks from
wantProcessed []*Task // tasks to be processed at the end
}{
{
pending: map[string][]*base.TaskMessage{
"default": {m1, m2},
"high": {m3},
"low": {m4},
},
queues: []string{"default", "high", "low"},
wantProcessed: []*Task{t1, t2, t3, t4},
},
}
for _, tc := range tests {
// Set up test case.
h.FlushDB(t, r)
h.SeedAllPendingQueues(t, r, tc.pending)
// Instantiate a new processor.
var mu sync.Mutex
var processed []*Task
handler := func(ctx context.Context, task *Task) error {
mu.Lock()
defer mu.Unlock()
processed = append(processed, task)
return nil
}
starting := make(chan *workerInfo)
finished := make(chan *base.TaskMessage)
syncCh := make(chan *syncRequest)
done := make(chan struct{})
defer func() { close(done) }()
go fakeHeartbeater(starting, finished, done)
go fakeSyncer(syncCh, done)
p := newProcessor(processorParams{
logger: testLogger,
broker: rdbClient,
retryDelayFunc: DefaultRetryDelayFunc,
syncCh: syncCh,
cancelations: base.NewCancelations(),
concurrency: 10,
queues: map[string]int{
"default": 2,
"high": 3,
"low": 1,
},
strictPriority: false,
errHandler: nil,
shutdownTimeout: defaultShutdownTimeout,
starting: starting,
finished: finished,
})
p.handler = HandlerFunc(handler)
p.start(&sync.WaitGroup{})
// Wait for two second to allow all pending tasks to be processed.
time.Sleep(2 * time.Second)
// Make sure no messages are stuck in active list.
for _, qname := range tc.queues {
if l := r.LLen(base.ActiveKey(qname)).Val(); l != 0 {
t.Errorf("%q has %d tasks, want 0", base.ActiveKey(qname), l)
}
time.Sleep(2 * time.Second) // wait for two second to allow all enqueued tasks to be processed.
if l := r.LLen(base.InProgressQueue).Val(); l != 0 {
t.Errorf("%q has %d tasks, want 0", base.InProgressQueue, l)
}
p.terminate()
@@ -232,18 +140,18 @@ func TestProcessTasksWithLargeNumberInPayload(t *testing.T) {
t1 := NewTask(m1.Type, m1.Payload)
tests := []struct {
pending []*base.TaskMessage // initial default queue state
enqueued []*base.TaskMessage // initial default queue state
wantProcessed []*Task // tasks to be processed at the end
}{
{
pending: []*base.TaskMessage{m1},
enqueued: []*base.TaskMessage{m1},
wantProcessed: []*Task{t1},
},
}
for _, tc := range tests {
h.FlushDB(t, r) // clean up db before each test case.
h.SeedPendingQueue(t, r, tc.pending, base.DefaultQueueName) // initialize default queue.
h.SeedEnqueuedQueue(t, r, tc.enqueued) // initialize default queue.
var mu sync.Mutex
var processed []*Task
@@ -258,7 +166,7 @@ func TestProcessTasksWithLargeNumberInPayload(t *testing.T) {
processed = append(processed, task)
return nil
}
starting := make(chan *workerInfo)
starting := make(chan *base.TaskMessage)
finished := make(chan *base.TaskMessage)
syncCh := make(chan *syncRequest)
done := make(chan struct{})
@@ -268,7 +176,7 @@ func TestProcessTasksWithLargeNumberInPayload(t *testing.T) {
p := newProcessor(processorParams{
logger: testLogger,
broker: rdbClient,
retryDelayFunc: DefaultRetryDelayFunc,
retryDelayFunc: defaultDelayFunc,
syncCh: syncCh,
cancelations: base.NewCancelations(),
concurrency: 10,
@@ -282,9 +190,9 @@ func TestProcessTasksWithLargeNumberInPayload(t *testing.T) {
p.handler = HandlerFunc(handler)
p.start(&sync.WaitGroup{})
time.Sleep(2 * time.Second) // wait for two second to allow all pending tasks to be processed.
if l := r.LLen(base.ActiveKey(base.DefaultQueueName)).Val(); l != 0 {
t.Errorf("%q has %d tasks, want 0", base.ActiveKey(base.DefaultQueueName), l)
time.Sleep(2 * time.Second) // wait for two second to allow all enqueued tasks to be processed.
if l := r.LLen(base.InProgressQueue).Val(); l != 0 {
t.Errorf("%q has %d tasks, want 0", base.InProgressQueue, l)
}
p.terminate()
@@ -307,74 +215,39 @@ func TestProcessorRetry(t *testing.T) {
m4 := h.NewTaskMessage("sync", nil)
errMsg := "something went wrong"
wrappedSkipRetry := fmt.Errorf("%s:%w", errMsg, SkipRetry)
now := time.Now()
tests := []struct {
desc string // test description
pending []*base.TaskMessage // initial default queue state
enqueued []*base.TaskMessage // initial default queue state
incoming []*base.TaskMessage // tasks to be enqueued during run
delay time.Duration // retry delay duration
handler Handler // task handler
wait time.Duration // wait duration between starting and stopping processor for this test case
wantRetry []base.Z // tasks in retry queue at the end
wantArchived []*base.TaskMessage // tasks in archived queue at the end
wantRetry []h.ZSetEntry // tasks in retry queue at the end
wantDead []*base.TaskMessage // tasks in dead queue at the end
wantErrCount int // number of times error handler should be called
}{
{
desc: "Should automatically retry errored tasks",
pending: []*base.TaskMessage{m1, m2},
enqueued: []*base.TaskMessage{m1, m2},
incoming: []*base.TaskMessage{m3, m4},
delay: time.Minute,
handler: HandlerFunc(func(ctx context.Context, task *Task) error {
return fmt.Errorf(errMsg)
}),
wait: 2 * time.Second,
wantRetry: []base.Z{
{Message: h.TaskMessageAfterRetry(*m2, errMsg), Score: now.Add(time.Minute).Unix()},
{Message: h.TaskMessageAfterRetry(*m3, errMsg), Score: now.Add(time.Minute).Unix()},
{Message: h.TaskMessageAfterRetry(*m4, errMsg), Score: now.Add(time.Minute).Unix()},
wantRetry: []h.ZSetEntry{
{Msg: h.TaskMessageAfterRetry(*m2, errMsg), Score: float64(now.Add(time.Minute).Unix())},
{Msg: h.TaskMessageAfterRetry(*m3, errMsg), Score: float64(now.Add(time.Minute).Unix())},
{Msg: h.TaskMessageAfterRetry(*m4, errMsg), Score: float64(now.Add(time.Minute).Unix())},
},
wantArchived: []*base.TaskMessage{h.TaskMessageWithError(*m1, errMsg)},
wantDead: []*base.TaskMessage{h.TaskMessageWithError(*m1, errMsg)},
wantErrCount: 4,
},
{
desc: "Should skip retry errored tasks",
pending: []*base.TaskMessage{m1, m2},
incoming: []*base.TaskMessage{},
delay: time.Minute,
handler: HandlerFunc(func(ctx context.Context, task *Task) error {
return SkipRetry // return SkipRetry without wrapping
}),
wait: 2 * time.Second,
wantRetry: []base.Z{},
wantArchived: []*base.TaskMessage{
h.TaskMessageWithError(*m1, SkipRetry.Error()),
h.TaskMessageWithError(*m2, SkipRetry.Error()),
},
wantErrCount: 2, // ErrorHandler should still be called with SkipRetry error
},
{
desc: "Should skip retry errored tasks (with error wrapping)",
pending: []*base.TaskMessage{m1, m2},
incoming: []*base.TaskMessage{},
delay: time.Minute,
handler: HandlerFunc(func(ctx context.Context, task *Task) error {
return wrappedSkipRetry
}),
wait: 2 * time.Second,
wantRetry: []base.Z{},
wantArchived: []*base.TaskMessage{
h.TaskMessageWithError(*m1, wrappedSkipRetry.Error()),
h.TaskMessageWithError(*m2, wrappedSkipRetry.Error()),
},
wantErrCount: 2, // ErrorHandler should still be called with SkipRetry error
},
}
for _, tc := range tests {
h.FlushDB(t, r) // clean up db before each test case.
h.SeedPendingQueue(t, r, tc.pending, base.DefaultQueueName) // initialize default queue.
h.SeedEnqueuedQueue(t, r, tc.enqueued) // initialize default queue.
// instantiate a new processor
delayFunc := func(n int, e error, t *Task) time.Duration {
@@ -389,7 +262,7 @@ func TestProcessorRetry(t *testing.T) {
defer mu.Unlock()
n++
}
starting := make(chan *workerInfo)
starting := make(chan *base.TaskMessage)
finished := make(chan *base.TaskMessage)
done := make(chan struct{})
defer func() { close(done) }()
@@ -421,19 +294,19 @@ func TestProcessorRetry(t *testing.T) {
time.Sleep(tc.wait) // FIXME: This makes test flaky.
p.terminate()
cmpOpt := h.EquateInt64Approx(1) // allow up to a second difference in zset score
gotRetry := h.GetRetryEntries(t, r, base.DefaultQueueName)
cmpOpt := cmpopts.EquateApprox(0, float64(time.Second)) // allow up to a second difference in zset score
gotRetry := h.GetRetryEntries(t, r)
if diff := cmp.Diff(tc.wantRetry, gotRetry, h.SortZSetEntryOpt, cmpOpt); diff != "" {
t.Errorf("%s: mismatch found in %q after running processor; (-want, +got)\n%s", tc.desc, base.RetryKey(base.DefaultQueueName), diff)
t.Errorf("mismatch found in %q after running processor; (-want, +got)\n%s", base.RetryQueue, diff)
}
gotDead := h.GetArchivedMessages(t, r, base.DefaultQueueName)
if diff := cmp.Diff(tc.wantArchived, gotDead, h.SortMsgOpt); diff != "" {
t.Errorf("%s: mismatch found in %q after running processor; (-want, +got)\n%s", tc.desc, base.ArchivedKey(base.DefaultQueueName), diff)
gotDead := h.GetDeadMessages(t, r)
if diff := cmp.Diff(tc.wantDead, gotDead, h.SortMsgOpt); diff != "" {
t.Errorf("mismatch found in %q after running processor; (-want, +got)\n%s", base.DeadQueue, diff)
}
if l := r.LLen(base.ActiveKey(base.DefaultQueueName)).Val(); l != 0 {
t.Errorf("%s: %q has %d tasks, want 0", base.ActiveKey(base.DefaultQueueName), tc.desc, l)
if l := r.LLen(base.InProgressQueue).Val(); l != 0 {
t.Errorf("%q has %d tasks, want 0", base.InProgressQueue, l)
}
if n != tc.wantErrCount {
@@ -470,7 +343,7 @@ func TestProcessorQueues(t *testing.T) {
}
for _, tc := range tests {
starting := make(chan *workerInfo)
starting := make(chan *base.TaskMessage)
finished := make(chan *base.TaskMessage)
done := make(chan struct{})
defer func() { close(done) }()
@@ -478,7 +351,7 @@ func TestProcessorQueues(t *testing.T) {
p := newProcessor(processorParams{
logger: testLogger,
broker: nil,
retryDelayFunc: DefaultRetryDelayFunc,
retryDelayFunc: defaultDelayFunc,
syncCh: nil,
cancelations: base.NewCancelations(),
concurrency: 10,
@@ -498,42 +371,36 @@ func TestProcessorQueues(t *testing.T) {
}
func TestProcessorWithStrictPriority(t *testing.T) {
var (
r = setup(t)
r := setup(t)
rdbClient := rdb.NewRDB(r)
rdbClient = rdb.NewRDB(r)
m1 := h.NewTaskMessage("send_email", nil)
m2 := h.NewTaskMessage("send_email", nil)
m3 := h.NewTaskMessage("send_email", nil)
m4 := h.NewTaskMessage("gen_thumbnail", nil)
m5 := h.NewTaskMessage("gen_thumbnail", nil)
m6 := h.NewTaskMessage("sync", nil)
m7 := h.NewTaskMessage("sync", nil)
m1 = h.NewTaskMessageWithQueue("task1", nil, "critical")
m2 = h.NewTaskMessageWithQueue("task2", nil, "critical")
m3 = h.NewTaskMessageWithQueue("task3", nil, "critical")
m4 = h.NewTaskMessageWithQueue("task4", nil, base.DefaultQueueName)
m5 = h.NewTaskMessageWithQueue("task5", nil, base.DefaultQueueName)
m6 = h.NewTaskMessageWithQueue("task6", nil, "low")
m7 = h.NewTaskMessageWithQueue("task7", nil, "low")
t1 = NewTask(m1.Type, m1.Payload)
t2 = NewTask(m2.Type, m2.Payload)
t3 = NewTask(m3.Type, m3.Payload)
t4 = NewTask(m4.Type, m4.Payload)
t5 = NewTask(m5.Type, m5.Payload)
t6 = NewTask(m6.Type, m6.Payload)
t7 = NewTask(m7.Type, m7.Payload)
)
defer r.Close()
t1 := NewTask(m1.Type, m1.Payload)
t2 := NewTask(m2.Type, m2.Payload)
t3 := NewTask(m3.Type, m3.Payload)
t4 := NewTask(m4.Type, m4.Payload)
t5 := NewTask(m5.Type, m5.Payload)
t6 := NewTask(m6.Type, m6.Payload)
t7 := NewTask(m7.Type, m7.Payload)
tests := []struct {
pending map[string][]*base.TaskMessage // initial queues state
queues []string // list of queues to consume tasks from
enqueued map[string][]*base.TaskMessage // initial queues state
wait time.Duration // wait duration between starting and stopping processor for this test case
wantProcessed []*Task // tasks to be processed at the end
}{
{
pending: map[string][]*base.TaskMessage{
enqueued: map[string][]*base.TaskMessage{
base.DefaultQueueName: {m4, m5},
"critical": {m1, m2, m3},
"low": {m6, m7},
},
queues: []string{base.DefaultQueueName, "critical", "low"},
wait: time.Second,
wantProcessed: []*Task{t1, t2, t3, t4, t5, t6, t7},
},
@@ -541,8 +408,8 @@ func TestProcessorWithStrictPriority(t *testing.T) {
for _, tc := range tests {
h.FlushDB(t, r) // clean up db before each test case.
for qname, msgs := range tc.pending {
h.SeedPendingQueue(t, r, msgs, qname)
for qname, msgs := range tc.enqueued {
h.SeedEnqueuedQueue(t, r, msgs, qname)
}
// instantiate a new processor
@@ -555,22 +422,20 @@ func TestProcessorWithStrictPriority(t *testing.T) {
return nil
}
queueCfg := map[string]int{
base.DefaultQueueName: 2,
"critical": 3,
base.DefaultQueueName: 2,
"low": 1,
}
starting := make(chan *workerInfo)
starting := make(chan *base.TaskMessage)
finished := make(chan *base.TaskMessage)
syncCh := make(chan *syncRequest)
done := make(chan struct{})
defer func() { close(done) }()
go fakeHeartbeater(starting, finished, done)
go fakeSyncer(syncCh, done)
p := newProcessor(processorParams{
logger: testLogger,
broker: rdbClient,
retryDelayFunc: DefaultRetryDelayFunc,
syncCh: syncCh,
retryDelayFunc: defaultDelayFunc,
syncCh: nil,
cancelations: base.NewCancelations(),
concurrency: 1, // Set concurrency to 1 to make sure tasks are processed one at a time.
queues: queueCfg,
@@ -584,22 +449,19 @@ func TestProcessorWithStrictPriority(t *testing.T) {
p.start(&sync.WaitGroup{})
time.Sleep(tc.wait)
// Make sure no tasks are stuck in active list.
for _, qname := range tc.queues {
if l := r.LLen(base.ActiveKey(qname)).Val(); l != 0 {
t.Errorf("%q has %d tasks, want 0", base.ActiveKey(qname), l)
}
}
p.terminate()
if diff := cmp.Diff(tc.wantProcessed, processed, cmp.AllowUnexported(Payload{})); diff != "" {
t.Errorf("mismatch found in processed tasks; (-want, +got)\n%s", diff)
}
if l := r.LLen(base.InProgressQueue).Val(); l != 0 {
t.Errorf("%q has %d tasks, want 0", base.InProgressQueue, l)
}
}
}
func TestProcessorPerform(t *testing.T) {
func TestPerform(t *testing.T) {
tests := []struct {
desc string
handler HandlerFunc
@@ -631,16 +493,9 @@ func TestProcessorPerform(t *testing.T) {
wantErr: true,
},
}
// Note: We don't need to fully initialize the processor since we are only testing
// perform method.
p := newProcessor(processorParams{
logger: testLogger,
queues: defaultQueueConfig,
})
for _, tc := range tests {
p.handler = tc.handler
got := p.perform(context.Background(), tc.task)
got := perform(context.Background(), tc.task, tc.handler)
if !tc.wantErr && got != nil {
t.Errorf("%s: perform() = %v, want nil", tc.desc, got)
continue

View File

@@ -16,14 +16,11 @@ import (
type recoverer struct {
logger *log.Logger
broker base.Broker
retryDelayFunc RetryDelayFunc
retryDelayFunc retryDelayFunc
// channel to communicate back to the long running "recoverer" goroutine.
done chan struct{}
// list of queues to check for deadline.
queues []string
// poll interval.
interval time.Duration
}
@@ -31,9 +28,8 @@ type recoverer struct {
type recovererParams struct {
logger *log.Logger
broker base.Broker
queues []string
interval time.Duration
retryDelayFunc RetryDelayFunc
retryDelayFunc retryDelayFunc
}
func newRecoverer(params recovererParams) *recoverer {
@@ -41,7 +37,6 @@ func newRecoverer(params recovererParams) *recoverer {
logger: params.logger,
broker: params.broker,
done: make(chan struct{}),
queues: params.queues,
interval: params.interval,
retryDelayFunc: params.retryDelayFunc,
}
@@ -67,7 +62,7 @@ func (r *recoverer) start(wg *sync.WaitGroup) {
case <-timer.C:
// Get all tasks which have expired 30 seconds ago or earlier.
deadline := time.Now().Add(-30 * time.Second)
msgs, err := r.broker.ListDeadlineExceeded(deadline, r.queues...)
msgs, err := r.broker.ListDeadlineExceeded(deadline)
if err != nil {
r.logger.Warn("recoverer: could not list deadline exceeded tasks")
continue
@@ -75,7 +70,7 @@ func (r *recoverer) start(wg *sync.WaitGroup) {
const errMsg = "deadline exceeded" // TODO: better error message
for _, msg := range msgs {
if msg.Retried >= msg.Retry {
r.archive(msg, errMsg)
r.kill(msg, errMsg)
} else {
r.retry(msg, errMsg)
}
@@ -94,8 +89,8 @@ func (r *recoverer) retry(msg *base.TaskMessage, errMsg string) {
}
}
func (r *recoverer) archive(msg *base.TaskMessage, errMsg string) {
if err := r.broker.Archive(msg, errMsg); err != nil {
r.logger.Warnf("recoverer: could not move task to archive: %v", err)
func (r *recoverer) kill(msg *base.TaskMessage, errMsg string) {
if err := r.broker.Kill(msg, errMsg); err != nil {
r.logger.Warnf("recoverer: could not move task to dead queue: %v", err)
}
}

View File

@@ -17,13 +17,12 @@ import (
func TestRecoverer(t *testing.T) {
r := setup(t)
defer r.Close()
rdbClient := rdb.NewRDB(r)
t1 := h.NewTaskMessageWithQueue("task1", nil, "default")
t2 := h.NewTaskMessageWithQueue("task2", nil, "default")
t1 := h.NewTaskMessage("task1", nil)
t2 := h.NewTaskMessage("task2", nil)
t3 := h.NewTaskMessageWithQueue("task3", nil, "critical")
t4 := h.NewTaskMessageWithQueue("task4", nil, "default")
t4 := h.NewTaskMessage("task4", nil)
t4.Retried = t4.Retry // t4 has reached its max retry count
now := time.Now()
@@ -34,204 +33,106 @@ func TestRecoverer(t *testing.T) {
tests := []struct {
desc string
inProgress map[string][]*base.TaskMessage
deadlines map[string][]base.Z
retry map[string][]base.Z
archived map[string][]base.Z
wantActive map[string][]*base.TaskMessage
wantDeadlines map[string][]base.Z
wantRetry map[string][]*base.TaskMessage
wantArchived map[string][]*base.TaskMessage
inProgress []*base.TaskMessage
deadlines []h.ZSetEntry
retry []h.ZSetEntry
dead []h.ZSetEntry
wantInProgress []*base.TaskMessage
wantDeadlines []h.ZSetEntry
wantRetry []*base.TaskMessage
wantDead []*base.TaskMessage
}{
{
desc: "with one active task",
inProgress: map[string][]*base.TaskMessage{
"default": {t1},
desc: "with one task in-progress",
inProgress: []*base.TaskMessage{t1},
deadlines: []h.ZSetEntry{
{Msg: t1, Score: float64(fiveMinutesAgo.Unix())},
},
deadlines: map[string][]base.Z{
"default": {{Message: t1, Score: fiveMinutesAgo.Unix()}},
},
retry: map[string][]base.Z{
"default": {},
},
archived: map[string][]base.Z{
"default": {},
},
wantActive: map[string][]*base.TaskMessage{
"default": {},
},
wantDeadlines: map[string][]base.Z{
"default": {},
},
wantRetry: map[string][]*base.TaskMessage{
"default": {h.TaskMessageAfterRetry(*t1, "deadline exceeded")},
},
wantArchived: map[string][]*base.TaskMessage{
"default": {},
retry: []h.ZSetEntry{},
dead: []h.ZSetEntry{},
wantInProgress: []*base.TaskMessage{},
wantDeadlines: []h.ZSetEntry{},
wantRetry: []*base.TaskMessage{
h.TaskMessageAfterRetry(*t1, "deadline exceeded"),
},
wantDead: []*base.TaskMessage{},
},
{
desc: "with a task with max-retry reached",
inProgress: map[string][]*base.TaskMessage{
"default": {t4},
"critical": {},
},
deadlines: map[string][]base.Z{
"default": {{Message: t4, Score: fiveMinutesAgo.Unix()}},
"critical": {},
},
retry: map[string][]base.Z{
"default": {},
"critical": {},
},
archived: map[string][]base.Z{
"default": {},
"critical": {},
},
wantActive: map[string][]*base.TaskMessage{
"default": {},
"critical": {},
},
wantDeadlines: map[string][]base.Z{
"default": {},
"critical": {},
},
wantRetry: map[string][]*base.TaskMessage{
"default": {},
"critical": {},
},
wantArchived: map[string][]*base.TaskMessage{
"default": {h.TaskMessageWithError(*t4, "deadline exceeded")},
"critical": {},
inProgress: []*base.TaskMessage{t4},
deadlines: []h.ZSetEntry{
{Msg: t4, Score: float64(fiveMinutesAgo.Unix())},
},
retry: []h.ZSetEntry{},
dead: []h.ZSetEntry{},
wantInProgress: []*base.TaskMessage{},
wantDeadlines: []h.ZSetEntry{},
wantRetry: []*base.TaskMessage{},
wantDead: []*base.TaskMessage{h.TaskMessageWithError(*t4, "deadline exceeded")},
},
{
desc: "with multiple active tasks, and one expired",
inProgress: map[string][]*base.TaskMessage{
"default": {t1, t2},
"critical": {t3},
desc: "with multiple tasks in-progress, and one expired",
inProgress: []*base.TaskMessage{t1, t2, t3},
deadlines: []h.ZSetEntry{
{Msg: t1, Score: float64(oneHourAgo.Unix())},
{Msg: t2, Score: float64(fiveMinutesFromNow.Unix())},
{Msg: t3, Score: float64(oneHourFromNow.Unix())},
},
deadlines: map[string][]base.Z{
"default": {
{Message: t1, Score: oneHourAgo.Unix()},
{Message: t2, Score: fiveMinutesFromNow.Unix()},
retry: []h.ZSetEntry{},
dead: []h.ZSetEntry{},
wantInProgress: []*base.TaskMessage{t2, t3},
wantDeadlines: []h.ZSetEntry{
{Msg: t2, Score: float64(fiveMinutesFromNow.Unix())},
{Msg: t3, Score: float64(oneHourFromNow.Unix())},
},
"critical": {
{Message: t3, Score: oneHourFromNow.Unix()},
},
},
retry: map[string][]base.Z{
"default": {},
"critical": {},
},
archived: map[string][]base.Z{
"default": {},
"critical": {},
},
wantActive: map[string][]*base.TaskMessage{
"default": {t2},
"critical": {t3},
},
wantDeadlines: map[string][]base.Z{
"default": {{Message: t2, Score: fiveMinutesFromNow.Unix()}},
"critical": {{Message: t3, Score: oneHourFromNow.Unix()}},
},
wantRetry: map[string][]*base.TaskMessage{
"default": {h.TaskMessageAfterRetry(*t1, "deadline exceeded")},
"critical": {},
},
wantArchived: map[string][]*base.TaskMessage{
"default": {},
"critical": {},
wantRetry: []*base.TaskMessage{
h.TaskMessageAfterRetry(*t1, "deadline exceeded"),
},
wantDead: []*base.TaskMessage{},
},
{
desc: "with multiple expired active tasks",
inProgress: map[string][]*base.TaskMessage{
"default": {t1, t2},
"critical": {t3},
desc: "with multiple expired tasks in-progress",
inProgress: []*base.TaskMessage{t1, t2, t3},
deadlines: []h.ZSetEntry{
{Msg: t1, Score: float64(oneHourAgo.Unix())},
{Msg: t2, Score: float64(fiveMinutesAgo.Unix())},
{Msg: t3, Score: float64(oneHourFromNow.Unix())},
},
deadlines: map[string][]base.Z{
"default": {
{Message: t1, Score: oneHourAgo.Unix()},
{Message: t2, Score: oneHourFromNow.Unix()},
retry: []h.ZSetEntry{},
dead: []h.ZSetEntry{},
wantInProgress: []*base.TaskMessage{t3},
wantDeadlines: []h.ZSetEntry{
{Msg: t3, Score: float64(oneHourFromNow.Unix())},
},
"critical": {
{Message: t3, Score: fiveMinutesAgo.Unix()},
},
},
retry: map[string][]base.Z{
"default": {},
"cricial": {},
},
archived: map[string][]base.Z{
"default": {},
"cricial": {},
},
wantActive: map[string][]*base.TaskMessage{
"default": {t2},
"critical": {},
},
wantDeadlines: map[string][]base.Z{
"default": {{Message: t2, Score: oneHourFromNow.Unix()}},
},
wantRetry: map[string][]*base.TaskMessage{
"default": {h.TaskMessageAfterRetry(*t1, "deadline exceeded")},
"critical": {h.TaskMessageAfterRetry(*t3, "deadline exceeded")},
},
wantArchived: map[string][]*base.TaskMessage{
"default": {},
"critical": {},
wantRetry: []*base.TaskMessage{
h.TaskMessageAfterRetry(*t1, "deadline exceeded"),
h.TaskMessageAfterRetry(*t2, "deadline exceeded"),
},
wantDead: []*base.TaskMessage{},
},
{
desc: "with empty active queue",
inProgress: map[string][]*base.TaskMessage{
"default": {},
"critical": {},
},
deadlines: map[string][]base.Z{
"default": {},
"critical": {},
},
retry: map[string][]base.Z{
"default": {},
"critical": {},
},
archived: map[string][]base.Z{
"default": {},
"critical": {},
},
wantActive: map[string][]*base.TaskMessage{
"default": {},
"critical": {},
},
wantDeadlines: map[string][]base.Z{
"default": {},
"critical": {},
},
wantRetry: map[string][]*base.TaskMessage{
"default": {},
"critical": {},
},
wantArchived: map[string][]*base.TaskMessage{
"default": {},
"critical": {},
},
desc: "with empty in-progress queue",
inProgress: []*base.TaskMessage{},
deadlines: []h.ZSetEntry{},
retry: []h.ZSetEntry{},
dead: []h.ZSetEntry{},
wantInProgress: []*base.TaskMessage{},
wantDeadlines: []h.ZSetEntry{},
wantRetry: []*base.TaskMessage{},
wantDead: []*base.TaskMessage{},
},
}
for _, tc := range tests {
h.FlushDB(t, r)
h.SeedAllActiveQueues(t, r, tc.inProgress)
h.SeedAllDeadlines(t, r, tc.deadlines)
h.SeedAllRetryQueues(t, r, tc.retry)
h.SeedAllArchivedQueues(t, r, tc.archived)
h.SeedInProgressQueue(t, r, tc.inProgress)
h.SeedDeadlines(t, r, tc.deadlines)
h.SeedRetryQueue(t, r, tc.retry)
h.SeedDeadQueue(t, r, tc.dead)
recoverer := newRecoverer(recovererParams{
logger: testLogger,
broker: rdbClient,
queues: []string{"default", "critical"},
interval: 1 * time.Second,
retryDelayFunc: func(n int, err error, task *Task) time.Duration { return 30 * time.Second },
})
@@ -241,29 +142,21 @@ func TestRecoverer(t *testing.T) {
time.Sleep(2 * time.Second)
recoverer.terminate()
for qname, want := range tc.wantActive {
gotActive := h.GetActiveMessages(t, r, qname)
if diff := cmp.Diff(want, gotActive, h.SortMsgOpt); diff != "" {
t.Errorf("%s; mismatch found in %q; (-want,+got)\n%s", tc.desc, base.ActiveKey(qname), diff)
gotInProgress := h.GetInProgressMessages(t, r)
if diff := cmp.Diff(tc.wantInProgress, gotInProgress, h.SortMsgOpt); diff != "" {
t.Errorf("%s; mismatch found in %q; (-want,+got)\n%s", tc.desc, base.InProgressQueue, diff)
}
gotDeadlines := h.GetDeadlinesEntries(t, r)
if diff := cmp.Diff(tc.wantDeadlines, gotDeadlines, h.SortZSetEntryOpt); diff != "" {
t.Errorf("%s; mismatch found in %q; (-want,+got)\n%s", tc.desc, base.KeyDeadlines, diff)
}
for qname, want := range tc.wantDeadlines {
gotDeadlines := h.GetDeadlinesEntries(t, r, qname)
if diff := cmp.Diff(want, gotDeadlines, h.SortZSetEntryOpt); diff != "" {
t.Errorf("%s; mismatch found in %q; (-want,+got)\n%s", tc.desc, base.DeadlinesKey(qname), diff)
}
}
for qname, want := range tc.wantRetry {
gotRetry := h.GetRetryMessages(t, r, qname)
if diff := cmp.Diff(want, gotRetry, h.SortMsgOpt); diff != "" {
t.Errorf("%s; mismatch found in %q: (-want, +got)\n%s", tc.desc, base.RetryKey(qname), diff)
}
}
for qname, want := range tc.wantArchived {
gotDead := h.GetArchivedMessages(t, r, qname)
if diff := cmp.Diff(want, gotDead, h.SortMsgOpt); diff != "" {
t.Errorf("%s; mismatch found in %q: (-want, +got)\n%s", tc.desc, base.ArchivedKey(qname), diff)
gotRetry := h.GetRetryMessages(t, r)
if diff := cmp.Diff(tc.wantRetry, gotRetry, h.SortMsgOpt); diff != "" {
t.Errorf("%s; mismatch found in %q: (-want, +got)\n%s", tc.desc, base.RetryQueue, diff)
}
gotDead := h.GetDeadMessages(t, r)
if diff := cmp.Diff(tc.wantDead, gotDead, h.SortMsgOpt); diff != "" {
t.Errorf("%s; mismatch found in %q: (-want, +got)\n%s", tc.desc, base.DeadQueue, diff)
}
}
}

View File

@@ -5,250 +5,64 @@
package asynq
import (
"fmt"
"os"
"sync"
"time"
"github.com/go-redis/redis/v7"
"github.com/google/uuid"
"github.com/hibiken/asynq/internal/base"
"github.com/hibiken/asynq/internal/log"
"github.com/hibiken/asynq/internal/rdb"
"github.com/robfig/cron/v3"
)
// A Scheduler kicks off tasks at regular intervals based on the user defined schedule.
type Scheduler struct {
id string
status *base.ServerStatus
type scheduler struct {
logger *log.Logger
client *Client
rdb *rdb.RDB
cron *cron.Cron
location *time.Location
broker base.Broker
// channel to communicate back to the long running "scheduler" goroutine.
done chan struct{}
wg sync.WaitGroup
errHandler func(task *Task, opts []Option, err error)
// poll interval on average
avgInterval time.Duration
}
// NewScheduler returns a new Scheduler instance given the redis connection option.
// The parameter opts is optional, defaults will be used if opts is set to nil
func NewScheduler(r RedisConnOpt, opts *SchedulerOpts) *Scheduler {
c, ok := r.MakeRedisClient().(redis.UniversalClient)
if !ok {
panic(fmt.Sprintf("asynq: unsupported RedisConnOpt type %T", r))
}
if opts == nil {
opts = &SchedulerOpts{}
}
logger := log.NewLogger(opts.Logger)
loglevel := opts.LogLevel
if loglevel == level_unspecified {
loglevel = InfoLevel
}
logger.SetLevel(toInternalLogLevel(loglevel))
loc := opts.Location
if loc == nil {
loc = time.UTC
}
return &Scheduler{
id: generateSchedulerID(),
status: base.NewServerStatus(base.StatusIdle),
logger: logger,
client: NewClient(r),
rdb: rdb.NewRDB(c),
cron: cron.New(cron.WithLocation(loc)),
location: loc,
done: make(chan struct{}),
errHandler: opts.EnqueueErrorHandler,
}
}
func generateSchedulerID() string {
host, err := os.Hostname()
if err != nil {
host = "unknown-host"
}
return fmt.Sprintf("%s:%d:%v", host, os.Getpid(), uuid.New())
}
// SchedulerOpts specifies scheduler options.
type SchedulerOpts struct {
// Logger specifies the logger used by the scheduler instance.
//
// If unset, the default logger is used.
Logger Logger
// LogLevel specifies the minimum log level to enable.
//
// If unset, InfoLevel is used by default.
LogLevel LogLevel
// Location specifies the time zone location.
//
// If unset, the UTC time zone (time.UTC) is used.
Location *time.Location
// EnqueueErrorHandler gets called when scheduler cannot enqueue a registered task
// due to an error.
EnqueueErrorHandler func(task *Task, opts []Option, err error)
}
// enqueueJob encapsulates the job of enqueing a task and recording the event.
type enqueueJob struct {
id uuid.UUID
cronspec string
task *Task
opts []Option
location *time.Location
type schedulerParams struct {
logger *log.Logger
client *Client
rdb *rdb.RDB
errHandler func(task *Task, opts []Option, err error)
broker base.Broker
interval time.Duration
}
func (j *enqueueJob) Run() {
res, err := j.client.Enqueue(j.task, j.opts...)
if err != nil {
j.logger.Errorf("scheduler could not enqueue a task %+v: %v", j.task, err)
if j.errHandler != nil {
j.errHandler(j.task, j.opts, err)
}
return
}
j.logger.Debugf("scheduler enqueued a task: %+v", res)
event := &base.SchedulerEnqueueEvent{
TaskID: res.ID,
EnqueuedAt: res.EnqueuedAt.In(j.location),
}
err = j.rdb.RecordSchedulerEnqueueEvent(j.id.String(), event)
if err != nil {
j.logger.Errorf("scheduler could not record enqueue event of enqueued task %+v: %v", j.task, err)
func newScheduler(params schedulerParams) *scheduler {
return &scheduler{
logger: params.logger,
broker: params.broker,
done: make(chan struct{}),
avgInterval: params.interval,
}
}
// Register registers a task to be enqueued on the given schedule specified by the cronspec.
// It returns an ID of the newly registered entry.
func (s *Scheduler) Register(cronspec string, task *Task, opts ...Option) (entryID string, err error) {
job := &enqueueJob{
id: uuid.New(),
cronspec: cronspec,
task: task,
opts: opts,
location: s.location,
client: s.client,
rdb: s.rdb,
logger: s.logger,
errHandler: s.errHandler,
}
if _, err = s.cron.AddJob(cronspec, job); err != nil {
return "", err
}
return job.id.String(), nil
func (s *scheduler) terminate() {
s.logger.Debug("Scheduler shutting down...")
// Signal the scheduler goroutine to stop polling.
s.done <- struct{}{}
}
// Run starts the scheduler until an os signal to exit the program is received.
// It returns an error if scheduler is already running or has been stopped.
func (s *Scheduler) Run() error {
if err := s.Start(); err != nil {
return err
}
s.waitForSignals()
return s.Stop()
}
// Start starts the scheduler.
// It returns an error if the scheduler is already running or has been stopped.
func (s *Scheduler) Start() error {
switch s.status.Get() {
case base.StatusRunning:
return fmt.Errorf("asynq: the scheduler is already running")
case base.StatusStopped:
return fmt.Errorf("asynq: the scheduler has already been stopped")
}
s.logger.Info("Scheduler starting")
s.logger.Infof("Scheduler timezone is set to %v", s.location)
s.cron.Start()
s.wg.Add(1)
go s.runHeartbeater()
s.status.Set(base.StatusRunning)
return nil
}
// Stop stops the scheduler.
// It returns an error if the scheduler is not currently running.
func (s *Scheduler) Stop() error {
if s.status.Get() != base.StatusRunning {
return fmt.Errorf("asynq: the scheduler is not running")
}
s.logger.Info("Scheduler shutting down")
close(s.done) // signal heartbeater to stop
ctx := s.cron.Stop()
<-ctx.Done()
s.wg.Wait()
s.clearHistory()
s.client.Close()
s.rdb.Close()
s.status.Set(base.StatusStopped)
s.logger.Info("Scheduler stopped")
return nil
}
func (s *Scheduler) runHeartbeater() {
defer s.wg.Done()
ticker := time.NewTicker(5 * time.Second)
// start starts the "scheduler" goroutine.
func (s *scheduler) start(wg *sync.WaitGroup) {
wg.Add(1)
go func() {
defer wg.Done()
for {
select {
case <-s.done:
s.logger.Debugf("Scheduler heatbeater shutting down")
s.rdb.ClearSchedulerEntries(s.id)
s.logger.Debug("Scheduler done")
return
case <-ticker.C:
s.beat()
case <-time.After(s.avgInterval):
s.exec()
}
}
}()
}
// beat writes a snapshot of entries to redis.
func (s *Scheduler) beat() {
var entries []*base.SchedulerEntry
for _, entry := range s.cron.Entries() {
job := entry.Job.(*enqueueJob)
e := &base.SchedulerEntry{
ID: job.id.String(),
Spec: job.cronspec,
Type: job.task.Type,
Payload: job.task.Payload.data,
Opts: stringifyOptions(job.opts),
Next: entry.Next,
Prev: entry.Prev,
}
entries = append(entries, e)
}
s.logger.Debugf("Writing entries %v", entries)
if err := s.rdb.WriteSchedulerEntries(s.id, entries, 5*time.Second); err != nil {
s.logger.Warnf("Scheduler could not write heartbeat data: %v", err)
}
}
func stringifyOptions(opts []Option) []string {
var res []string
for _, opt := range opts {
res = append(res, opt.String())
}
return res
}
func (s *Scheduler) clearHistory() {
for _, entry := range s.cron.Entries() {
job := entry.Job.(*enqueueJob)
if err := s.rdb.ClearSchedulerHistory(job.id.String()); err != nil {
s.logger.Warnf("Could not clear scheduler history for entry %q: %v", job.id.String(), err)
}
func (s *scheduler) exec() {
if err := s.broker.CheckAndEnqueue(); err != nil {
s.logger.Errorf("Could not enqueue scheduled tasks: %v", err)
}
}

View File

@@ -10,109 +10,88 @@ import (
"time"
"github.com/google/go-cmp/cmp"
"github.com/hibiken/asynq/internal/asynqtest"
h "github.com/hibiken/asynq/internal/asynqtest"
"github.com/hibiken/asynq/internal/base"
"github.com/hibiken/asynq/internal/rdb"
)
func TestScheduler(t *testing.T) {
r := setup(t)
rdbClient := rdb.NewRDB(r)
const pollInterval = time.Second
s := newScheduler(schedulerParams{
logger: testLogger,
broker: rdbClient,
interval: pollInterval,
})
t1 := h.NewTaskMessage("gen_thumbnail", nil)
t2 := h.NewTaskMessage("send_email", nil)
t3 := h.NewTaskMessage("reindex", nil)
t4 := h.NewTaskMessage("sync", nil)
now := time.Now()
tests := []struct {
cronspec string
task *Task
opts []Option
wait time.Duration
queue string
want []*base.TaskMessage
initScheduled []h.ZSetEntry // scheduled queue initial state
initRetry []h.ZSetEntry // retry queue initial state
initQueue []*base.TaskMessage // default queue initial state
wait time.Duration // wait duration before checking for final state
wantScheduled []*base.TaskMessage // schedule queue final state
wantRetry []*base.TaskMessage // retry queue final state
wantQueue []*base.TaskMessage // default queue final state
}{
{
cronspec: "@every 3s",
task: NewTask("task1", nil),
opts: []Option{MaxRetry(10)},
wait: 10 * time.Second,
queue: "default",
want: []*base.TaskMessage{
{
Type: "task1",
Payload: nil,
Retry: 10,
Timeout: int64(defaultTimeout.Seconds()),
Queue: "default",
initScheduled: []h.ZSetEntry{
{Msg: t1, Score: float64(now.Add(time.Hour).Unix())},
{Msg: t2, Score: float64(now.Add(-2 * time.Second).Unix())},
},
initRetry: []h.ZSetEntry{
{Msg: t3, Score: float64(time.Now().Add(-500 * time.Millisecond).Unix())},
},
initQueue: []*base.TaskMessage{t4},
wait: pollInterval * 2,
wantScheduled: []*base.TaskMessage{t1},
wantRetry: []*base.TaskMessage{},
wantQueue: []*base.TaskMessage{t2, t3, t4},
},
{
Type: "task1",
Payload: nil,
Retry: 10,
Timeout: int64(defaultTimeout.Seconds()),
Queue: "default",
},
{
Type: "task1",
Payload: nil,
Retry: 10,
Timeout: int64(defaultTimeout.Seconds()),
Queue: "default",
},
initScheduled: []h.ZSetEntry{
{Msg: t1, Score: float64(now.Unix())},
{Msg: t2, Score: float64(now.Add(-2 * time.Second).Unix())},
{Msg: t3, Score: float64(now.Add(-500 * time.Millisecond).Unix())},
},
initRetry: []h.ZSetEntry{},
initQueue: []*base.TaskMessage{t4},
wait: pollInterval * 2,
wantScheduled: []*base.TaskMessage{},
wantRetry: []*base.TaskMessage{},
wantQueue: []*base.TaskMessage{t1, t2, t3, t4},
},
}
r := setup(t)
for _, tc := range tests {
scheduler := NewScheduler(getRedisConnOpt(t), nil)
if _, err := scheduler.Register(tc.cronspec, tc.task, tc.opts...); err != nil {
t.Fatal(err)
}
h.FlushDB(t, r) // clean up db before each test case.
h.SeedScheduledQueue(t, r, tc.initScheduled) // initialize scheduled queue
h.SeedRetryQueue(t, r, tc.initRetry) // initialize retry queue
h.SeedEnqueuedQueue(t, r, tc.initQueue) // initialize default queue
if err := scheduler.Start(); err != nil {
t.Fatal(err)
}
var wg sync.WaitGroup
s.start(&wg)
time.Sleep(tc.wait)
if err := scheduler.Stop(); err != nil {
t.Fatal(err)
s.terminate()
gotScheduled := h.GetScheduledMessages(t, r)
if diff := cmp.Diff(tc.wantScheduled, gotScheduled, h.SortMsgOpt); diff != "" {
t.Errorf("mismatch found in %q after running scheduler: (-want, +got)\n%s", base.ScheduledQueue, diff)
}
got := asynqtest.GetPendingMessages(t, r, tc.queue)
if diff := cmp.Diff(tc.want, got, asynqtest.IgnoreIDOpt); diff != "" {
t.Errorf("mismatch found in queue %q: (-want,+got)\n%s", tc.queue, diff)
}
}
gotRetry := h.GetRetryMessages(t, r)
if diff := cmp.Diff(tc.wantRetry, gotRetry, h.SortMsgOpt); diff != "" {
t.Errorf("mismatch found in %q after running scheduler: (-want, +got)\n%s", base.RetryQueue, diff)
}
func TestSchedulerWhenRedisDown(t *testing.T) {
var (
mu sync.Mutex
counter int
)
errorHandler := func(task *Task, opts []Option, err error) {
mu.Lock()
counter++
mu.Unlock()
gotEnqueued := h.GetEnqueuedMessages(t, r)
if diff := cmp.Diff(tc.wantQueue, gotEnqueued, h.SortMsgOpt); diff != "" {
t.Errorf("mismatch found in %q after running scheduler: (-want, +got)\n%s", base.DefaultQueue, diff)
}
// Connect to non-existent redis instance to simulate a redis server being down.
scheduler := NewScheduler(
RedisClientOpt{Addr: ":9876"},
&SchedulerOpts{EnqueueErrorHandler: errorHandler},
)
task := NewTask("test", nil)
if _, err := scheduler.Register("@every 3s", task); err != nil {
t.Fatal(err)
}
if err := scheduler.Start(); err != nil {
t.Fatal(err)
}
// Scheduler should attempt to enqueue the task three times (every 3s).
time.Sleep(10 * time.Second)
if err := scheduler.Stop(); err != nil {
t.Fatal(err)
}
mu.Lock()
if counter != 3 {
t.Errorf("EnqueueErrorHandler was called %d times, want 3", counter)
}
mu.Unlock()
}

101
server.go
View File

@@ -15,23 +15,23 @@ import (
"sync"
"time"
"github.com/go-redis/redis/v7"
"github.com/hibiken/asynq/internal/base"
"github.com/hibiken/asynq/internal/log"
"github.com/hibiken/asynq/internal/rdb"
)
// Server is responsible for managing the task processing.
// Server is responsible for managing the background-task processing.
//
// Server pulls tasks off queues and processes them.
// If the processing of a task is unsuccessful, server will schedule it for a retry.
// If the processing of a task is unsuccessful, server will
// schedule it for a retry.
// A task will be retried until either the task gets processed successfully
// or until it reaches its max retry count.
//
// If a task exhausts its retries, it will be moved to the archive and
// will be kept in the archive for some time until a certain condition is met
// (e.g., archive size reaches a certain limit, or the task has been in the
// archive for a certain amount of time).
// If a task exhausts its retries, it will be moved to the "dead" queue and
// will be kept in the queue for some time until a certain condition is met
// (e.g., queue size reaches a certain limit, or the task has been in the
// queue for a certain amount of time).
type Server struct {
logger *log.Logger
@@ -41,13 +41,12 @@ type Server struct {
// wait group to wait for all goroutines to finish.
wg sync.WaitGroup
forwarder *forwarder
scheduler *scheduler
processor *processor
syncer *syncer
heartbeater *heartbeater
subscriber *subscriber
recoverer *recoverer
healthchecker *healthchecker
}
// Config specifies the server's background-task processing behavior.
@@ -61,7 +60,11 @@ type Config struct {
// Function to calculate retry delay for a failed task.
//
// By default, it uses exponential backoff algorithm to calculate the delay.
RetryDelayFunc RetryDelayFunc
//
// n is the number of times the task has been retried.
// e is the error returned by the task handler.
// t is the task in question.
RetryDelayFunc func(n int, e error, t *Task) time.Duration
// List of queues to process with given priority value. Keys are the names of the
// queues and values are associated priority value.
@@ -71,13 +74,11 @@ type Config struct {
// Priority is treated as follows to avoid starving low priority queues.
//
// Example:
//
// Queues: map[string]int{
// "critical": 6,
// "default": 3,
// "low": 1,
// }
//
// With the above config and given that all queues are not empty, the tasks
// in "critical", "default", "low" should be processed 60%, 30%, 10% of
// the time respectively.
@@ -97,10 +98,7 @@ type Config struct {
// HandleError is invoked only if the task handler returns a non-nil error.
//
// Example:
//
// func reportError(ctx context, task *asynq.Task, err error) {
// retried, _ := asynq.GetRetryCount(ctx)
// maxRetry, _ := asynq.GetMaxRetry(ctx)
// func reportError(task *asynq.Task, err error, retried, maxRetry int) {
// if retried >= maxRetry {
// err = fmt.Errorf("retry exhausted for task %s: %w", task.Type, err)
// }
@@ -125,18 +123,9 @@ type Config struct {
//
// If unset or zero, default timeout of 8 seconds is used.
ShutdownTimeout time.Duration
// HealthCheckFunc is called periodically with any errors encountered during ping to the
// connected redis server.
HealthCheckFunc func(error)
// HealthCheckInterval specifies the interval between healthchecks.
//
// If unset or zero, the interval is set to 15 seconds.
HealthCheckInterval time.Duration
}
// An ErrorHandler handles an error occured during task processing.
// An ErrorHandler handles errors returned by the task handler.
type ErrorHandler interface {
HandleError(ctx context.Context, task *Task, err error)
}
@@ -150,14 +139,6 @@ func (fn ErrorHandlerFunc) HandleError(ctx context.Context, task *Task, err erro
fn(ctx, task, err)
}
// RetryDelayFunc calculates the retry delay duration for a failed task given
// the retry count, error, and the task.
//
// n is the number of times the task has been retried.
// e is the error returned by the task handler.
// t is the task in question.
type RetryDelayFunc func(n int, e error, t *Task) time.Duration
// Logger supports logging at various log levels.
type Logger interface {
// Debug logs a message at Debug level.
@@ -258,11 +239,9 @@ func toInternalLogLevel(l LogLevel) log.Level {
panic(fmt.Sprintf("asynq: unexpected log level: %v", l))
}
// DefaultRetryDelayFunc is the default RetryDelayFunc used if one is not specified in Config.
// It uses exponential back-off strategy to calculate the retry delay.
func DefaultRetryDelayFunc(n int, e error, t *Task) time.Duration {
r := rand.New(rand.NewSource(time.Now().UnixNano()))
// Formula taken from https://github.com/mperham/sidekiq.
func defaultDelayFunc(n int, e error, t *Task) time.Duration {
r := rand.New(rand.NewSource(time.Now().UnixNano()))
s := int(math.Pow(float64(n), 4)) + 15 + (r.Intn(30) * (n + 1))
return time.Duration(s) * time.Second
}
@@ -271,26 +250,18 @@ var defaultQueueConfig = map[string]int{
base.DefaultQueueName: 1,
}
const (
defaultShutdownTimeout = 8 * time.Second
defaultHealthCheckInterval = 15 * time.Second
)
const defaultShutdownTimeout = 8 * time.Second
// NewServer returns a new Server given a redis connection option
// and background processing configuration.
func NewServer(r RedisConnOpt, cfg Config) *Server {
c, ok := r.MakeRedisClient().(redis.UniversalClient)
if !ok {
panic(fmt.Sprintf("asynq: unsupported RedisConnOpt type %T", r))
}
n := cfg.Concurrency
if n < 1 {
n = runtime.NumCPU()
}
delayFunc := cfg.RetryDelayFunc
if delayFunc == nil {
delayFunc = DefaultRetryDelayFunc
delayFunc = defaultDelayFunc
}
queues := make(map[string]int)
for qname, p := range cfg.Queues {
@@ -301,18 +272,10 @@ func NewServer(r RedisConnOpt, cfg Config) *Server {
if len(queues) == 0 {
queues = defaultQueueConfig
}
var qnames []string
for q := range queues {
qnames = append(qnames, q)
}
shutdownTimeout := cfg.ShutdownTimeout
if shutdownTimeout == 0 {
shutdownTimeout = defaultShutdownTimeout
}
healthcheckInterval := cfg.HealthCheckInterval
if healthcheckInterval == 0 {
healthcheckInterval = defaultHealthCheckInterval
}
logger := log.NewLogger(cfg.Logger)
loglevel := cfg.LogLevel
if loglevel == level_unspecified {
@@ -320,8 +283,8 @@ func NewServer(r RedisConnOpt, cfg Config) *Server {
}
logger.SetLevel(toInternalLogLevel(loglevel))
rdb := rdb.NewRDB(c)
starting := make(chan *workerInfo)
rdb := rdb.NewRDB(createRedisClient(r))
starting := make(chan *base.TaskMessage)
finished := make(chan *base.TaskMessage)
syncCh := make(chan *syncRequest)
status := base.NewServerStatus(base.StatusIdle)
@@ -343,10 +306,9 @@ func NewServer(r RedisConnOpt, cfg Config) *Server {
starting: starting,
finished: finished,
})
forwarder := newForwarder(forwarderParams{
scheduler := newScheduler(schedulerParams{
logger: logger,
broker: rdb,
queues: qnames,
interval: 5 * time.Second,
})
subscriber := newSubscriber(subscriberParams{
@@ -372,26 +334,18 @@ func NewServer(r RedisConnOpt, cfg Config) *Server {
logger: logger,
broker: rdb,
retryDelayFunc: delayFunc,
queues: qnames,
interval: 1 * time.Minute,
})
healthchecker := newHealthChecker(healthcheckerParams{
logger: logger,
broker: rdb,
interval: healthcheckInterval,
healthcheckFunc: cfg.HealthCheckFunc,
})
return &Server{
logger: logger,
broker: rdb,
status: status,
forwarder: forwarder,
scheduler: scheduler,
processor: processor,
syncer: syncer,
heartbeater: heartbeater,
subscriber: subscriber,
recoverer: recoverer,
healthchecker: healthchecker,
}
}
@@ -402,9 +356,6 @@ func NewServer(r RedisConnOpt, cfg Config) *Server {
//
// If ProcessTask return a non-nil error or panics, the task
// will be retried after delay.
// One exception to this rule is when ProcessTask returns SkipRetry error.
// If the returned error is SkipRetry or the error wraps SkipRetry, retry is
// skipped and task will be archived instead.
type Handler interface {
ProcessTask(context.Context, *Task) error
}
@@ -462,11 +413,10 @@ func (srv *Server) Start(handler Handler) error {
srv.logger.Info("Starting processing")
srv.heartbeater.start(&srv.wg)
srv.healthchecker.start(&srv.wg)
srv.subscriber.start(&srv.wg)
srv.syncer.start(&srv.wg)
srv.recoverer.start(&srv.wg)
srv.forwarder.start(&srv.wg)
srv.scheduler.start(&srv.wg)
srv.processor.start(&srv.wg)
return nil
}
@@ -487,12 +437,11 @@ func (srv *Server) Stop() {
// Sender goroutines should be terminated before the receiver goroutines.
// processor -> syncer (via syncCh)
// processor -> heartbeater (via starting, finished channels)
srv.forwarder.terminate()
srv.scheduler.terminate()
srv.processor.terminate()
srv.recoverer.terminate()
srv.syncer.terminate()
srv.subscriber.terminate()
srv.healthchecker.terminate()
srv.heartbeater.terminate()
srv.wg.Wait()

View File

@@ -21,10 +21,12 @@ func TestServer(t *testing.T) {
ignoreOpt := goleak.IgnoreTopFunction("github.com/go-redis/redis/v7/internal/pool.(*ConnPool).reaper")
defer goleak.VerifyNoLeaks(t, ignoreOpt)
redisConnOpt := getRedisConnOpt(t)
c := NewClient(redisConnOpt)
defer c.Close()
srv := NewServer(redisConnOpt, Config{
r := &RedisClientOpt{
Addr: "localhost:6379",
DB: 15,
}
c := NewClient(r)
srv := NewServer(r, Config{
Concurrency: 10,
LogLevel: testLogLevel,
})
@@ -44,7 +46,7 @@ func TestServer(t *testing.T) {
t.Errorf("could not enqueue a task: %v", err)
}
_, err = c.Enqueue(NewTask("send_email", map[string]interface{}{"recipient_id": 456}), ProcessIn(1*time.Hour))
_, err = c.EnqueueAt(time.Now().Add(time.Hour), NewTask("send_email", map[string]interface{}{"recipient_id": 456}))
if err != nil {
t.Errorf("could not enqueue a task: %v", err)
}
@@ -127,7 +129,7 @@ func TestServerWithRedisDown(t *testing.T) {
testBroker := testbroker.NewTestBroker(r)
srv := NewServer(RedisClientOpt{Addr: ":6379"}, Config{LogLevel: testLogLevel})
srv.broker = testBroker
srv.forwarder.broker = testBroker
srv.scheduler.broker = testBroker
srv.heartbeater.broker = testBroker
srv.processor.broker = testBroker
srv.subscriber.broker = testBroker
@@ -157,15 +159,14 @@ func TestServerWithFlakyBroker(t *testing.T) {
}()
r := rdb.NewRDB(setup(t))
testBroker := testbroker.NewTestBroker(r)
redisConnOpt := getRedisConnOpt(t)
srv := NewServer(redisConnOpt, Config{LogLevel: testLogLevel})
srv := NewServer(RedisClientOpt{Addr: redisAddr, DB: redisDB}, Config{LogLevel: testLogLevel})
srv.broker = testBroker
srv.forwarder.broker = testBroker
srv.scheduler.broker = testBroker
srv.heartbeater.broker = testBroker
srv.processor.broker = testBroker
srv.subscriber.broker = testBroker
c := NewClient(redisConnOpt)
c := NewClient(RedisClientOpt{Addr: redisAddr, DB: redisDB})
h := func(ctx context.Context, task *Task) error {
// force task retry.
@@ -190,7 +191,7 @@ func TestServerWithFlakyBroker(t *testing.T) {
if err != nil {
t.Fatal(err)
}
_, err = c.Enqueue(NewTask("scheduled", nil), ProcessIn(time.Duration(i)*time.Second))
_, err = c.EnqueueIn(time.Duration(i)*time.Second, NewTask("scheduled", nil))
if err != nil {
t.Fatal(err)
}

View File

@@ -28,10 +28,3 @@ func (srv *Server) waitForSignals() {
break
}
}
func (s *Scheduler) waitForSignals() {
s.logger.Info("Send signal TERM or INT to stop the scheduler")
sigs := make(chan os.Signal, 1)
signal.Notify(sigs, unix.SIGTERM, unix.SIGINT)
<-sigs
}

View File

@@ -20,10 +20,3 @@ func (srv *Server) waitForSignals() {
signal.Notify(sigs, windows.SIGTERM, windows.SIGINT)
<-sigs
}
func (s *Scheduler) waitForSignals() {
s.logger.Info("Send signal TERM or INT to stop the scheduler")
sigs := make(chan os.Signal, 1)
signal.Notify(sigs, windows.SIGTERM, windows.SIGINT)
<-sigs
}

View File

@@ -20,7 +20,7 @@ type subscriber struct {
// channel to communicate back to the long running "subscriber" goroutine.
done chan struct{}
// cancelations hold cancel functions for all active tasks.
// cancelations hold cancel functions for all in-progress tasks.
cancelations *base.Cancelations
// time to wait before retrying to connect to redis.

View File

@@ -16,7 +16,6 @@ import (
func TestSubscriber(t *testing.T) {
r := setup(t)
defer r.Close()
rdbClient := rdb.NewRDB(r)
tests := []struct {
@@ -77,7 +76,6 @@ func TestSubscriberWithRedisDown(t *testing.T) {
}
}()
r := rdb.NewRDB(setup(t))
defer r.Close()
testBroker := testbroker.NewTestBroker(r)
cancelations := base.NewCancelations()

View File

@@ -22,9 +22,8 @@ func TestSyncer(t *testing.T) {
h.NewTaskMessage("gen_thumbnail", nil),
}
r := setup(t)
defer r.Close()
rdbClient := rdb.NewRDB(r)
h.SeedActiveQueue(t, r, inProgress, base.DefaultQueueName)
h.SeedInProgressQueue(t, r, inProgress)
const interval = time.Second
syncRequestCh := make(chan *syncRequest)
@@ -49,9 +48,9 @@ func TestSyncer(t *testing.T) {
time.Sleep(2 * interval) // ensure that syncer runs at least once
gotActive := h.GetActiveMessages(t, r, base.DefaultQueueName)
if l := len(gotActive); l != 0 {
t.Errorf("%q has length %d; want 0", base.ActiveKey(base.DefaultQueueName), l)
gotInProgress := h.GetInProgressMessages(t, r)
if l := len(gotInProgress); l != 0 {
t.Errorf("%q has length %d; want 0", base.InProgressQueue, l)
}
}

View File

@@ -1,11 +1,20 @@
# Asynq CLI
Asynq CLI is a command line tool to monitor the queues and tasks managed by `asynq` package.
Asynq CLI is a command line tool to monitor the tasks managed by `asynq` package.
## Table of Contents
- [Installation](#installation)
- [Usage](#usage)
- [Quick Start](#quick-start)
- [Stats](#stats)
- [History](#history)
- [Servers](#servers)
- [List](#list)
- [Enqueue](#enqueue)
- [Delete](#delete)
- [Kill](#kill)
- [Cancel](#cancel)
- [Pause](#pause)
- [Config File](#config-file)
## Installation
@@ -16,41 +25,144 @@ In order to use the tool, compile it using the following command:
This will create the asynq executable under your `$GOPATH/bin` directory.
## Usage
## Quickstart
### Commands
The tool has a few commands to inspect the state of tasks and queues.
To view details on any command, use `asynq help <command> <subcommand>`.
- `asynq stats`
- `asynq queue [ls inspect history rm pause unpause]`
- `asynq task [ls cancel delete archive run delete-all archive-all run-all]`
- `asynq server [ls]`
### Global flags
Run `asynq help` to see all the available commands.
Asynq CLI needs to connect to a redis-server to inspect the state of queues and tasks. Use flags to specify the options to connect to the redis-server used by your application.
To connect to a redis cluster, pass `--cluster` and `--cluster_addrs` flags.
By default, CLI will try to connect to a redis server running at `localhost:6379`.
```
--config string config file to set flag defaut values (default is $HOME/.asynq.yaml)
-n, --db int redis database number (default is 0)
-h, --help help for asynq
-p, --password string password to use when connecting to redis server
-u, --uri string redis server URI (default "127.0.0.1:6379")
### Stats
--cluster connect to redis cluster
--cluster_addrs string list of comma-separated redis server addresses
```
Stats command gives the overview of the current state of tasks and queues. You can run it in conjunction with `watch` command to repeatedly run `stats`.
Example:
watch -n 3 asynq stats
This will run `asynq stats` command every 3 seconds.
![Gif](/docs/assets/asynq_stats.gif)
### History
History command shows the number of processed and failed tasks from the last x days.
By default, it shows the stats from the last 10 days. Use `--days` to specify the number of days.
Example:
asynq history --days=30
![Gif](/docs/assets/asynq_history.gif)
### Servers
Servers command shows the list of running worker servers pulling tasks from the given redis instance.
Example:
asynq servers
### List
List command shows all tasks in the specified state in a table format
Example:
asynq ls retry
asynq ls scheduled
asynq ls dead
asynq ls enqueued:default
asynq ls inprogress
### Enqueue
There are two commands to enqueue tasks.
Command `enq` takes a task ID and moves the task to **Enqueued** state. You can obtain the task ID by running `ls` command.
Example:
asynq enq d:1575732274:bnogo8gt6toe23vhef0g
Command `enqall` moves all tasks to **Enqueued** state from the specified state.
Example:
asynq enqall retry
Running the above command will move all **Retry** tasks to **Enqueued** state.
### Delete
There are two commands for task deletion.
Command `del` takes a task ID and deletes the task. You can obtain the task ID by running `ls` command.
Example:
asynq del r:1575732274:bnogo8gt6toe23vhef0g
Command `delall` deletes all tasks which are in the specified state.
Example:
asynq delall retry
Running the above command will delete all **Retry** tasks.
### Kill
There are two commands to kill (i.e. move to dead state) tasks.
Command `kill` takes a task ID and kills the task. You can obtain the task ID by running `ls` command.
Example:
asynq kill r:1575732274:bnogo8gt6toe23vhef0g
Command `killall` kills all tasks which are in the specified state.
Example:
asynq killall retry
Running the above command will move all **Retry** tasks to **Dead** state.
### Cancel
Command `cancel` takes a task ID and sends a cancelation signal to the goroutine processing the specified task.
You can obtain the task ID by running `ls` command.
The task should be in "in-progress" state.
Handler implementation needs to be context aware in order to actually stop processing.
Example:
asynq cancel bnogo8gt6toe23vhef0g
### Pause
Command `pause` pauses the spcified queue. Tasks in paused queues are not processed by servers.
To resume processing from the queue, use `unpause` command.
To see which queues are currently paused, use `stats` command.
Example:
asynq pause email
asynq unpause email
## Config File
You can use a config file to set default values for the flags.
This is useful, for example when you have to connect to a remote redis server.
By default, `asynq` will try to read config file located in
`$HOME/.asynq.(yml|json)`. You can specify the file location via `--config` flag.
`$HOME/.asynq.(yaml|json)`. You can specify the file location via `--config` flag.
Config file example:

53
tools/asynq/cmd/cancel.go Normal file
View File

@@ -0,0 +1,53 @@
// Copyright 2020 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
package cmd
import (
"fmt"
"os"
"github.com/go-redis/redis/v7"
"github.com/hibiken/asynq/internal/rdb"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
// cancelCmd represents the cancel command
var cancelCmd = &cobra.Command{
Use: "cancel [task id]",
Short: "Sends a cancelation signal to the goroutine processing the specified task",
Long: `Cancel (asynq cancel) will send a cancelation signal to the goroutine processing
the specified task.
The command takes one argument which specifies the task to cancel.
The task should be in in-progress state.
Identifier for a task should be obtained by running "asynq ls" command.
Handler implementation needs to be context aware for cancelation signal to
actually cancel the processing.
Example: asynq cancel bnogo8gt6toe23vhef0g`,
Args: cobra.ExactArgs(1),
Run: cancel,
}
func init() {
rootCmd.AddCommand(cancelCmd)
}
func cancel(cmd *cobra.Command, args []string) {
r := rdb.NewRDB(redis.NewClient(&redis.Options{
Addr: viper.GetString("uri"),
DB: viper.GetInt("db"),
Password: viper.GetString("password"),
}))
err := r.PublishCancelation(args[0])
if err != nil {
fmt.Printf("could not send cancelation signal: %v\n", err)
os.Exit(1)
}
fmt.Printf("Successfully sent cancelation siganl for task %s\n", args[0])
}

View File

@@ -1,129 +0,0 @@
// Copyright 2020 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
package cmd
import (
"fmt"
"io"
"os"
"sort"
"time"
"github.com/hibiken/asynq/inspeq"
"github.com/spf13/cobra"
)
func init() {
rootCmd.AddCommand(cronCmd)
cronCmd.AddCommand(cronListCmd)
cronCmd.AddCommand(cronHistoryCmd)
cronHistoryCmd.Flags().Int("page", 1, "page number")
cronHistoryCmd.Flags().Int("size", 30, "page size")
}
var cronCmd = &cobra.Command{
Use: "cron",
Short: "Manage cron",
}
var cronListCmd = &cobra.Command{
Use: "ls",
Short: "List cron entries",
Run: cronList,
}
var cronHistoryCmd = &cobra.Command{
Use: "history [ENTRY_ID...]",
Short: "Show history of each cron tasks",
Args: cobra.MinimumNArgs(1),
Run: cronHistory,
}
func cronList(cmd *cobra.Command, args []string) {
inspector := createInspector()
entries, err := inspector.SchedulerEntries()
if err != nil {
fmt.Println(err)
os.Exit(1)
}
if len(entries) == 0 {
fmt.Println("No scheduler entries")
return
}
// Sort entries by spec.
sort.Slice(entries, func(i, j int) bool {
x, y := entries[i], entries[j]
return x.Spec < y.Spec
})
cols := []string{"EntryID", "Spec", "Type", "Payload", "Options", "Next", "Prev"}
printRows := func(w io.Writer, tmpl string) {
for _, e := range entries {
fmt.Fprintf(w, tmpl, e.ID, e.Spec, e.Task.Type, e.Task.Payload, e.Opts,
nextEnqueue(e.Next), prevEnqueue(e.Prev))
}
}
printTable(cols, printRows)
}
// Returns a string describing when the next enqueue will happen.
func nextEnqueue(nextEnqueueAt time.Time) string {
d := nextEnqueueAt.Sub(time.Now()).Round(time.Second)
if d < 0 {
return "Now"
}
return fmt.Sprintf("In %v", d)
}
// Returns a string describing when the previous enqueue was.
func prevEnqueue(prevEnqueuedAt time.Time) string {
if prevEnqueuedAt.IsZero() {
return "N/A"
}
return fmt.Sprintf("%v ago", time.Since(prevEnqueuedAt).Round(time.Second))
}
func cronHistory(cmd *cobra.Command, args []string) {
pageNum, err := cmd.Flags().GetInt("page")
if err != nil {
fmt.Println(err)
os.Exit(1)
}
pageSize, err := cmd.Flags().GetInt("size")
if err != nil {
fmt.Println(err)
os.Exit(1)
}
inspector := createInspector()
for i, entryID := range args {
if i > 0 {
fmt.Printf("\n%s\n", separator)
}
fmt.Println()
fmt.Printf("Entry: %s\n\n", entryID)
events, err := inspector.ListSchedulerEnqueueEvents(
entryID, inspeq.PageSize(pageSize), inspeq.Page(pageNum))
if err != nil {
fmt.Printf("error: %v\n", err)
continue
}
if len(events) == 0 {
fmt.Printf("No scheduler enqueue events found for entry: %s\n", entryID)
continue
}
cols := []string{"TaskID", "EnqueuedAt"}
printRows := func(w io.Writer, tmpl string) {
for _, e := range events {
fmt.Fprintf(w, tmpl, e.TaskID, e.EnqueuedAt)
}
}
printTable(cols, printRows)
}
}

73
tools/asynq/cmd/del.go Normal file
View File

@@ -0,0 +1,73 @@
// Copyright 2020 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
package cmd
import (
"fmt"
"os"
"github.com/go-redis/redis/v7"
"github.com/hibiken/asynq/internal/rdb"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
// delCmd represents the del command
var delCmd = &cobra.Command{
Use: "del [task id]",
Short: "Deletes a task given an identifier",
Long: `Del (asynq del) will delete a task given an identifier.
The command takes one argument which specifies the task to delete.
The task should be in either scheduled, retry or dead state.
Identifier for a task should be obtained by running "asynq ls" command.
Example: asynq enq d:1575732274:bnogo8gt6toe23vhef0g`,
Args: cobra.ExactArgs(1),
Run: del,
}
func init() {
rootCmd.AddCommand(delCmd)
// Here you will define your flags and configuration settings.
// Cobra supports Persistent Flags which will work for this command
// and all subcommands, e.g.:
// delCmd.PersistentFlags().String("foo", "", "A help for foo")
// Cobra supports local flags which will only run when this command
// is called directly, e.g.:
// delCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
}
func del(cmd *cobra.Command, args []string) {
id, score, qtype, err := parseQueryID(args[0])
if err != nil {
fmt.Println(err)
os.Exit(1)
}
r := rdb.NewRDB(redis.NewClient(&redis.Options{
Addr: viper.GetString("uri"),
DB: viper.GetInt("db"),
Password: viper.GetString("password"),
}))
switch qtype {
case "s":
err = r.DeleteScheduledTask(id, score)
case "r":
err = r.DeleteRetryTask(id, score)
case "d":
err = r.DeleteDeadTask(id, score)
default:
fmt.Println("invalid argument")
os.Exit(1)
}
if err != nil {
fmt.Println(err)
os.Exit(1)
}
fmt.Printf("Successfully deleted %v\n", args[0])
}

71
tools/asynq/cmd/delall.go Normal file
View File

@@ -0,0 +1,71 @@
// Copyright 2020 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
package cmd
import (
"fmt"
"os"
"github.com/go-redis/redis/v7"
"github.com/hibiken/asynq/internal/rdb"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
var delallValidArgs = []string{"scheduled", "retry", "dead"}
// delallCmd represents the delall command
var delallCmd = &cobra.Command{
Use: "delall [state]",
Short: "Deletes all tasks in the specified state",
Long: `Delall (asynq delall) will delete all tasks in the specified state.
The argument should be one of "scheduled", "retry", or "dead".
Example: asynq delall dead -> Deletes all dead tasks`,
ValidArgs: delallValidArgs,
Args: cobra.ExactValidArgs(1),
Run: delall,
}
func init() {
rootCmd.AddCommand(delallCmd)
// Here you will define your flags and configuration settings.
// Cobra supports Persistent Flags which will work for this command
// and all subcommands, e.g.:
// delallCmd.PersistentFlags().String("foo", "", "A help for foo")
// Cobra supports local flags which will only run when this command
// is called directly, e.g.:
// delallCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
}
func delall(cmd *cobra.Command, args []string) {
c := redis.NewClient(&redis.Options{
Addr: viper.GetString("uri"),
DB: viper.GetInt("db"),
Password: viper.GetString("password"),
})
r := rdb.NewRDB(c)
var err error
switch args[0] {
case "scheduled":
err = r.DeleteAllScheduledTasks()
case "retry":
err = r.DeleteAllRetryTasks()
case "dead":
err = r.DeleteAllDeadTasks()
default:
fmt.Printf("error: `asynq delall [state]` only accepts %v as the argument.\n", delallValidArgs)
os.Exit(1)
}
if err != nil {
fmt.Println(err)
os.Exit(1)
}
fmt.Printf("Deleted all tasks in %q state\n", args[0])
}

76
tools/asynq/cmd/enq.go Normal file
View File

@@ -0,0 +1,76 @@
// Copyright 2020 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
package cmd
import (
"fmt"
"os"
"github.com/go-redis/redis/v7"
"github.com/hibiken/asynq/internal/rdb"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
// enqCmd represents the enq command
var enqCmd = &cobra.Command{
Use: "enq [task id]",
Short: "Enqueues a task given an identifier",
Long: `Enq (asynq enq) will enqueue a task given an identifier.
The command takes one argument which specifies the task to enqueue.
The task should be in either scheduled, retry or dead state.
Identifier for a task should be obtained by running "asynq ls" command.
The task enqueued by this command will be processed as soon as the task
gets dequeued by a processor.
Example: asynq enq d:1575732274:bnogo8gt6toe23vhef0g`,
Args: cobra.ExactArgs(1),
Run: enq,
}
func init() {
rootCmd.AddCommand(enqCmd)
// Here you will define your flags and configuration settings.
// Cobra supports Persistent Flags which will work for this command
// and all subcommands, e.g.:
// enqCmd.PersistentFlags().String("foo", "", "A help for foo")
// Cobra supports local flags which will only run when this command
// is called directly, e.g.:
// enqCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
}
func enq(cmd *cobra.Command, args []string) {
id, score, qtype, err := parseQueryID(args[0])
if err != nil {
fmt.Println(err)
os.Exit(1)
}
r := rdb.NewRDB(redis.NewClient(&redis.Options{
Addr: viper.GetString("uri"),
DB: viper.GetInt("db"),
Password: viper.GetString("password"),
}))
switch qtype {
case "s":
err = r.EnqueueScheduledTask(id, score)
case "r":
err = r.EnqueueRetryTask(id, score)
case "d":
err = r.EnqueueDeadTask(id, score)
default:
fmt.Println("invalid argument")
os.Exit(1)
}
if err != nil {
fmt.Println(err)
os.Exit(1)
}
fmt.Printf("Successfully enqueued %v\n", args[0])
}

75
tools/asynq/cmd/enqall.go Normal file
View File

@@ -0,0 +1,75 @@
// Copyright 2020 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
package cmd
import (
"fmt"
"os"
"github.com/go-redis/redis/v7"
"github.com/hibiken/asynq/internal/rdb"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
var enqallValidArgs = []string{"scheduled", "retry", "dead"}
// enqallCmd represents the enqall command
var enqallCmd = &cobra.Command{
Use: "enqall [state]",
Short: "Enqueues all tasks in the specified state",
Long: `Enqall (asynq enqall) will enqueue all tasks in the specified state.
The argument should be one of "scheduled", "retry", or "dead".
The tasks enqueued by this command will be processed as soon as it
gets dequeued by a processor.
Example: asynq enqall dead -> Enqueues all dead tasks`,
ValidArgs: enqallValidArgs,
Args: cobra.ExactValidArgs(1),
Run: enqall,
}
func init() {
rootCmd.AddCommand(enqallCmd)
// Here you will define your flags and configuration settings.
// Cobra supports Persistent Flags which will work for this command
// and all subcommands, e.g.:
// enqallCmd.PersistentFlags().String("foo", "", "A help for foo")
// Cobra supports local flags which will only run when this command
// is called directly, e.g.:
// enqallCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
}
func enqall(cmd *cobra.Command, args []string) {
c := redis.NewClient(&redis.Options{
Addr: viper.GetString("uri"),
DB: viper.GetInt("db"),
Password: viper.GetString("password"),
})
r := rdb.NewRDB(c)
var n int64
var err error
switch args[0] {
case "scheduled":
n, err = r.EnqueueAllScheduledTasks()
case "retry":
n, err = r.EnqueueAllRetryTasks()
case "dead":
n, err = r.EnqueueAllDeadTasks()
default:
fmt.Printf("error: `asynq enqall [state]` only accepts %v as the argument.\n", enqallValidArgs)
os.Exit(1)
}
if err != nil {
fmt.Println(err)
os.Exit(1)
}
fmt.Printf("Enqueued %d tasks in %q state\n", n, args[0])
}

View File

@@ -0,0 +1,71 @@
// Copyright 2020 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
package cmd
import (
"fmt"
"os"
"strings"
"text/tabwriter"
"github.com/go-redis/redis/v7"
"github.com/hibiken/asynq/internal/rdb"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
var days int
// historyCmd represents the history command
var historyCmd = &cobra.Command{
Use: "history",
Short: "Shows historical aggregate data",
Long: `History (asynq history) will show the number of processed and failed tasks
from the last x days.
By default, it will show the data from the last 10 days.
Example: asynq history -x=30 -> Shows stats from the last 30 days`,
Args: cobra.NoArgs,
Run: history,
}
func init() {
rootCmd.AddCommand(historyCmd)
historyCmd.Flags().IntVarP(&days, "days", "x", 10, "show data from last x days")
}
func history(cmd *cobra.Command, args []string) {
c := redis.NewClient(&redis.Options{
Addr: viper.GetString("uri"),
DB: viper.GetInt("db"),
Password: viper.GetString("password"),
})
r := rdb.NewRDB(c)
stats, err := r.HistoricalStats(days)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
printDailyStats(stats)
}
func printDailyStats(stats []*rdb.DailyStats) {
format := strings.Repeat("%v\t", 4) + "\n"
tw := new(tabwriter.Writer).Init(os.Stdout, 0, 8, 2, ' ', 0)
fmt.Fprintf(tw, format, "Date (UTC)", "Processed", "Failed", "Error Rate")
fmt.Fprintf(tw, format, "----------", "---------", "------", "----------")
for _, s := range stats {
var errrate string
if s.Processed == 0 {
errrate = "N/A"
} else {
errrate = fmt.Sprintf("%.2f%%", float64(s.Failed)/float64(s.Processed)*100)
}
fmt.Fprintf(tw, format, s.Time.Format("2006-01-02"), s.Processed, s.Failed, errrate)
}
tw.Flush()
}

72
tools/asynq/cmd/kill.go Normal file
View File

@@ -0,0 +1,72 @@
// Copyright 2020 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
package cmd
import (
"fmt"
"os"
"github.com/go-redis/redis/v7"
"github.com/hibiken/asynq/internal/rdb"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
// killCmd represents the kill command
var killCmd = &cobra.Command{
Use: "kill [task id]",
Short: "Kills a task given an identifier",
Long: `Kill (asynq kill) will put a task in dead state given an identifier.
The command takes one argument which specifies the task to kill.
The task should be in either scheduled or retry state.
Identifier for a task should be obtained by running "asynq ls" command.
Example: asynq kill r:1575732274:bnogo8gt6toe23vhef0g`,
Args: cobra.ExactArgs(1),
Run: kill,
}
func init() {
rootCmd.AddCommand(killCmd)
// Here you will define your flags and configuration settings.
// Cobra supports Persistent Flags which will work for this command
// and all subcommands, e.g.:
// killCmd.PersistentFlags().String("foo", "", "A help for foo")
// Cobra supports local flags which will only run when this command
// is called directly, e.g.:
// killCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
}
func kill(cmd *cobra.Command, args []string) {
id, score, qtype, err := parseQueryID(args[0])
if err != nil {
fmt.Println(err)
os.Exit(1)
}
r := rdb.NewRDB(redis.NewClient(&redis.Options{
Addr: viper.GetString("uri"),
DB: viper.GetInt("db"),
Password: viper.GetString("password"),
}))
switch qtype {
case "s":
err = r.KillScheduledTask(id, score)
case "r":
err = r.KillRetryTask(id, score)
default:
fmt.Println("invalid argument")
os.Exit(1)
}
if err != nil {
fmt.Println(err)
os.Exit(1)
}
fmt.Printf("Successfully killed %v\n", args[0])
}

View File

@@ -0,0 +1,70 @@
// Copyright 2020 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
package cmd
import (
"fmt"
"os"
"github.com/go-redis/redis/v7"
"github.com/hibiken/asynq/internal/rdb"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
var killallValidArgs = []string{"scheduled", "retry"}
// killallCmd represents the killall command
var killallCmd = &cobra.Command{
Use: "killall [state]",
Short: "Kills all tasks in the specified state",
Long: `Killall (asynq killall) will update all tasks from the specified state to dead state.
The argument should be either "scheduled" or "retry".
Example: asynq killall retry -> Update all retry tasks to dead tasks`,
ValidArgs: killallValidArgs,
Args: cobra.ExactValidArgs(1),
Run: killall,
}
func init() {
rootCmd.AddCommand(killallCmd)
// Here you will define your flags and configuration settings.
// Cobra supports Persistent Flags which will work for this command
// and all subcommands, e.g.:
// killallCmd.PersistentFlags().String("foo", "", "A help for foo")
// Cobra supports local flags which will only run when this command
// is called directly, e.g.:
// killallCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
}
func killall(cmd *cobra.Command, args []string) {
c := redis.NewClient(&redis.Options{
Addr: viper.GetString("uri"),
DB: viper.GetInt("db"),
Password: viper.GetString("password"),
})
r := rdb.NewRDB(c)
var n int64
var err error
switch args[0] {
case "scheduled":
n, err = r.KillAllScheduledTasks()
case "retry":
n, err = r.KillAllRetryTasks()
default:
fmt.Printf("error: `asynq killall [state]` only accepts %v as the argument.\n", killallValidArgs)
os.Exit(1)
}
if err != nil {
fmt.Println(err)
os.Exit(1)
}
fmt.Printf("Successfully updated %d tasks to \"dead\" state\n", n)
}

229
tools/asynq/cmd/ls.go Normal file
View File

@@ -0,0 +1,229 @@
// Copyright 2020 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
package cmd
import (
"fmt"
"io"
"os"
"strconv"
"strings"
"time"
"github.com/go-redis/redis/v7"
"github.com/google/uuid"
"github.com/hibiken/asynq/internal/rdb"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
var lsValidArgs = []string{"enqueued", "inprogress", "scheduled", "retry", "dead"}
// lsCmd represents the ls command
var lsCmd = &cobra.Command{
Use: "ls [state]",
Short: "Lists tasks in the specified state",
Long: `Ls (asynq ls) will list all tasks in the specified state in a table format.
The command takes one argument which specifies the state of tasks.
The argument value should be one of "enqueued", "inprogress", "scheduled",
"retry", or "dead".
Example:
asynq ls dead -> Lists all tasks in dead state
Enqueued tasks requires a queue name after ":"
Example:
asynq ls enqueued:default -> List tasks from default queue
asynq ls enqueued:critical -> List tasks from critical queue
`,
Args: cobra.ExactValidArgs(1),
Run: ls,
}
// Flags
var pageSize int
var pageNum int
func init() {
rootCmd.AddCommand(lsCmd)
lsCmd.Flags().IntVar(&pageSize, "size", 30, "page size")
lsCmd.Flags().IntVar(&pageNum, "page", 0, "page number - zero indexed (default 0)")
}
func ls(cmd *cobra.Command, args []string) {
if pageSize < 0 {
fmt.Println("page size cannot be negative.")
os.Exit(1)
}
if pageNum < 0 {
fmt.Println("page number cannot be negative.")
os.Exit(1)
}
c := redis.NewClient(&redis.Options{
Addr: viper.GetString("uri"),
DB: viper.GetInt("db"),
Password: viper.GetString("password"),
})
r := rdb.NewRDB(c)
parts := strings.Split(args[0], ":")
switch parts[0] {
case "enqueued":
if len(parts) != 2 {
fmt.Printf("error: Missing queue name\n`asynq ls enqueued:[queue name]`\n")
os.Exit(1)
}
listEnqueued(r, parts[1])
case "inprogress":
listInProgress(r)
case "scheduled":
listScheduled(r)
case "retry":
listRetry(r)
case "dead":
listDead(r)
default:
fmt.Printf("error: `asynq ls [state]`\nonly accepts %v as the argument.\n", lsValidArgs)
os.Exit(1)
}
}
// queryID returns an identifier used for "enq" command.
// score is the zset score and queryType should be one
// of "s", "r" or "d" (scheduled, retry, dead respectively).
func queryID(id uuid.UUID, score int64, qtype string) string {
const format = "%v:%v:%v"
return fmt.Sprintf(format, qtype, score, id)
}
// parseQueryID is a reverse operation of queryID function.
// It takes a queryID and return each part of id with proper
// type if valid, otherwise it reports an error.
func parseQueryID(queryID string) (id uuid.UUID, score int64, qtype string, err error) {
parts := strings.Split(queryID, ":")
if len(parts) != 3 {
return uuid.Nil, 0, "", fmt.Errorf("invalid id")
}
id, err = uuid.Parse(parts[2])
if err != nil {
return uuid.Nil, 0, "", fmt.Errorf("invalid id")
}
score, err = strconv.ParseInt(parts[1], 10, 64)
if err != nil {
return uuid.Nil, 0, "", fmt.Errorf("invalid id")
}
qtype = parts[0]
if len(qtype) != 1 || !strings.Contains("srd", qtype) {
return uuid.Nil, 0, "", fmt.Errorf("invalid id")
}
return id, score, qtype, nil
}
func listEnqueued(r *rdb.RDB, qname string) {
tasks, err := r.ListEnqueued(qname, rdb.Pagination{Size: pageSize, Page: pageNum})
if err != nil {
fmt.Println(err)
os.Exit(1)
}
if len(tasks) == 0 {
fmt.Printf("No enqueued tasks in %q queue\n", qname)
return
}
cols := []string{"ID", "Type", "Payload", "Queue"}
printRows := func(w io.Writer, tmpl string) {
for _, t := range tasks {
fmt.Fprintf(w, tmpl, t.ID, t.Type, t.Payload, t.Queue)
}
}
printTable(cols, printRows)
fmt.Printf("\nShowing %d tasks from page %d\n", len(tasks), pageNum)
}
func listInProgress(r *rdb.RDB) {
tasks, err := r.ListInProgress(rdb.Pagination{Size: pageSize, Page: pageNum})
if err != nil {
fmt.Println(err)
os.Exit(1)
}
if len(tasks) == 0 {
fmt.Println("No in-progress tasks")
return
}
cols := []string{"ID", "Type", "Payload"}
printRows := func(w io.Writer, tmpl string) {
for _, t := range tasks {
fmt.Fprintf(w, tmpl, t.ID, t.Type, t.Payload)
}
}
printTable(cols, printRows)
fmt.Printf("\nShowing %d tasks from page %d\n", len(tasks), pageNum)
}
func listScheduled(r *rdb.RDB) {
tasks, err := r.ListScheduled(rdb.Pagination{Size: pageSize, Page: pageNum})
if err != nil {
fmt.Println(err)
os.Exit(1)
}
if len(tasks) == 0 {
fmt.Println("No scheduled tasks")
return
}
cols := []string{"ID", "Type", "Payload", "Process In", "Queue"}
printRows := func(w io.Writer, tmpl string) {
for _, t := range tasks {
processIn := fmt.Sprintf("%.0f seconds", t.ProcessAt.Sub(time.Now()).Seconds())
fmt.Fprintf(w, tmpl, queryID(t.ID, t.Score, "s"), t.Type, t.Payload, processIn, t.Queue)
}
}
printTable(cols, printRows)
fmt.Printf("\nShowing %d tasks from page %d\n", len(tasks), pageNum)
}
func listRetry(r *rdb.RDB) {
tasks, err := r.ListRetry(rdb.Pagination{Size: pageSize, Page: pageNum})
if err != nil {
fmt.Println(err)
os.Exit(1)
}
if len(tasks) == 0 {
fmt.Println("No retry tasks")
return
}
cols := []string{"ID", "Type", "Payload", "Next Retry", "Last Error", "Retried", "Max Retry", "Queue"}
printRows := func(w io.Writer, tmpl string) {
for _, t := range tasks {
var nextRetry string
if d := t.ProcessAt.Sub(time.Now()); d > 0 {
nextRetry = fmt.Sprintf("in %v", d.Round(time.Second))
} else {
nextRetry = "right now"
}
fmt.Fprintf(w, tmpl, queryID(t.ID, t.Score, "r"), t.Type, t.Payload, nextRetry, t.ErrorMsg, t.Retried, t.Retry, t.Queue)
}
}
printTable(cols, printRows)
fmt.Printf("\nShowing %d tasks from page %d\n", len(tasks), pageNum)
}
func listDead(r *rdb.RDB) {
tasks, err := r.ListDead(rdb.Pagination{Size: pageSize, Page: pageNum})
if err != nil {
fmt.Println(err)
os.Exit(1)
}
if len(tasks) == 0 {
fmt.Println("No dead tasks")
return
}
cols := []string{"ID", "Type", "Payload", "Last Failed", "Last Error", "Queue"}
printRows := func(w io.Writer, tmpl string) {
for _, t := range tasks {
fmt.Fprintf(w, tmpl, queryID(t.ID, t.Score, "d"), t.Type, t.Payload, t.LastFailedAt, t.ErrorMsg, t.Queue)
}
}
printTable(cols, printRows)
fmt.Printf("\nShowing %d tasks from page %d\n", len(tasks), pageNum)
}

View File

@@ -19,10 +19,11 @@ import (
"github.com/spf13/viper"
)
// migrateCmd represents the migrate command
var migrateCmd = &cobra.Command{
Use: "migrate",
Short: fmt.Sprintf("Migrate all tasks to be compatible with asynq v%s", base.Version),
Args: cobra.NoArgs,
Short: fmt.Sprintf("Migrate all tasks to be compatible with asynq@%s", base.Version),
Long: fmt.Sprintf("Migrate (asynq migrate) will convert all tasks in redis to be compatible with asynq@%s.", base.Version),
Run: migrate,
}
@@ -36,206 +37,28 @@ func migrate(cmd *cobra.Command, args []string) {
DB: viper.GetInt("db"),
Password: viper.GetString("password"),
})
r := createRDB()
/*** Migrate from 0.9 to 0.10, 0.11 compatible ***/
lists := []string{"asynq:in_progress"}
lists := []string{base.InProgressQueue}
allQueues, err := c.SMembers(base.AllQueues).Result()
if err != nil {
printError(fmt.Errorf("could not read all queues: %v", err))
fmt.Printf("error: could not read all queues: %v", err)
os.Exit(1)
}
lists = append(lists, allQueues...)
for _, key := range lists {
if err := migrateList(c, key); err != nil {
printError(err)
fmt.Printf("error: %v", err)
os.Exit(1)
}
}
zsets := []string{"asynq:scheduled", "asynq:retry", "asynq:dead"}
zsets := []string{base.ScheduledQueue, base.RetryQueue, base.DeadQueue}
for _, key := range zsets {
if err := migrateZSet(c, key); err != nil {
printError(err)
fmt.Printf("error: %v", err)
os.Exit(1)
}
}
/*** Migrate from 0.11 to 0.12 compatible ***/
if err := createBackup(c, base.AllQueues); err != nil {
printError(err)
os.Exit(1)
}
for _, qkey := range allQueues {
qname := strings.TrimPrefix(qkey, "asynq:queues:")
if err := c.SAdd(base.AllQueues, qname).Err(); err != nil {
err = fmt.Errorf("could not add queue name %q to %q set: %v\n",
qname, base.AllQueues, err)
printError(err)
os.Exit(1)
}
}
if err := deleteBackup(c, base.AllQueues); err != nil {
printError(err)
os.Exit(1)
}
for _, qkey := range allQueues {
qname := strings.TrimPrefix(qkey, "asynq:queues:")
if exists := c.Exists(qkey).Val(); exists == 1 {
if err := c.Rename(qkey, base.QueueKey(qname)).Err(); err != nil {
printError(fmt.Errorf("could not rename key %q: %v\n", qkey, err))
os.Exit(1)
}
}
}
if err := partitionZSetMembersByQueue(c, "asynq:scheduled", base.ScheduledKey); err != nil {
printError(err)
os.Exit(1)
}
if err := partitionZSetMembersByQueue(c, "asynq:retry", base.RetryKey); err != nil {
printError(err)
os.Exit(1)
}
// Note: base.DeadKey function was renamed in v0.14. We define the legacy function here since we need it for this migration script.
deadKeyFunc := func(qname string) string { return fmt.Sprintf("asynq:{%s}:dead", qname) }
if err := partitionZSetMembersByQueue(c, "asynq:dead", deadKeyFunc); err != nil {
printError(err)
os.Exit(1)
}
if err := partitionZSetMembersByQueue(c, "asynq:deadlines", base.DeadlinesKey); err != nil {
printError(err)
os.Exit(1)
}
if err := partitionListMembersByQueue(c, "asynq:in_progress", base.ActiveKey); err != nil {
printError(err)
os.Exit(1)
}
paused, err := c.SMembers("asynq:paused").Result()
if err != nil {
printError(fmt.Errorf("command SMEMBERS asynq:paused failed: %v", err))
os.Exit(1)
}
for _, qkey := range paused {
qname := strings.TrimPrefix(qkey, "asynq:queues:")
if err := r.Pause(qname); err != nil {
printError(err)
os.Exit(1)
}
}
if err := deleteKey(c, "asynq:paused"); err != nil {
printError(err)
os.Exit(1)
}
if err := deleteKey(c, "asynq:servers"); err != nil {
printError(err)
os.Exit(1)
}
if err := deleteKey(c, "asynq:workers"); err != nil {
printError(err)
os.Exit(1)
}
/*** Migrate from 0.13 to 0.14 compatible ***/
// Move all dead tasks to archived ZSET.
for _, qname := range allQueues {
zs, err := c.ZRangeWithScores(deadKeyFunc(qname), 0, -1).Result()
if err != nil {
printError(err)
os.Exit(1)
}
for _, z := range zs {
if err := c.ZAdd(base.ArchivedKey(qname), &z).Err(); err != nil {
printError(err)
os.Exit(1)
}
}
if err := deleteKey(c, deadKeyFunc(qname)); err != nil {
printError(err)
os.Exit(1)
}
}
}
func backupKey(key string) string {
return fmt.Sprintf("%s:backup", key)
}
func createBackup(c *redis.Client, key string) error {
err := c.Rename(key, backupKey(key)).Err()
if err != nil {
return fmt.Errorf("could not rename key %q: %v", key, err)
}
return nil
}
func deleteBackup(c *redis.Client, key string) error {
return deleteKey(c, backupKey(key))
}
func deleteKey(c *redis.Client, key string) error {
exists := c.Exists(key).Val()
if exists == 0 {
// key does not exist
return nil
}
err := c.Del(key).Err()
if err != nil {
return fmt.Errorf("could not delete key %q: %v", key, err)
}
return nil
}
func printError(err error) {
fmt.Println(err)
fmt.Println()
fmt.Println("Migrate command error")
fmt.Println("Please file an issue on Github at https://github.com/hibiken/asynq/issues/new/choose")
}
func partitionZSetMembersByQueue(c *redis.Client, key string, newKeyFunc func(string) string) error {
zs, err := c.ZRangeWithScores(key, 0, -1).Result()
if err != nil {
return fmt.Errorf("command ZRANGE %s 0 -1 WITHSCORES failed: %v", key, err)
}
for _, z := range zs {
s := cast.ToString(z.Member)
msg, err := base.DecodeMessage(s)
if err != nil {
return fmt.Errorf("could not decode message from %q: %v", key, err)
}
if err := c.ZAdd(newKeyFunc(msg.Queue), &z).Err(); err != nil {
return fmt.Errorf("could not add %v to %q: %v", z, newKeyFunc(msg.Queue))
}
}
if err := deleteKey(c, key); err != nil {
return err
}
return nil
}
func partitionListMembersByQueue(c *redis.Client, key string, newKeyFunc func(string) string) error {
data, err := c.LRange(key, 0, -1).Result()
if err != nil {
return fmt.Errorf("command LRANGE %s 0 -1 failed: %v", key, err)
}
for _, s := range data {
msg, err := base.DecodeMessage(s)
if err != nil {
return fmt.Errorf("could not decode message from %q: %v", key, err)
}
if err := c.LPush(newKeyFunc(msg.Queue), s).Err(); err != nil {
return fmt.Errorf("could not add %v to %q: %v", s, newKeyFunc(msg.Queue))
}
}
if err := deleteKey(c, key); err != nil {
return err
}
return nil
}
type oldTaskMessage struct {

47
tools/asynq/cmd/pause.go Normal file
View File

@@ -0,0 +1,47 @@
// Copyright 2020 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
package cmd
import (
"fmt"
"os"
"github.com/go-redis/redis/v7"
"github.com/hibiken/asynq/internal/rdb"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
// pauseCmd represents the pause command
var pauseCmd = &cobra.Command{
Use: "pause [queue name]",
Short: "Pauses the specified queue",
Long: `Pause (asynq pause) will pause the specified queue.
Asynq servers will not process tasks from paused queues.
Use the "unpause" command to resume a paused queue.
Example: asynq pause default -> Pause the "default" queue`,
Args: cobra.ExactValidArgs(1),
Run: pause,
}
func init() {
rootCmd.AddCommand(pauseCmd)
}
func pause(cmd *cobra.Command, args []string) {
c := redis.NewClient(&redis.Options{
Addr: viper.GetString("uri"),
DB: viper.GetInt("db"),
Password: viper.GetString("password"),
})
r := rdb.NewRDB(c)
err := r.Pause(args[0])
if err != nil {
fmt.Printf("error: %v\n", err)
os.Exit(1)
}
fmt.Printf("Successfully paused queue %q\n", args[0])
}

View File

@@ -1,256 +0,0 @@
// Copyright 2020 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
package cmd
import (
"fmt"
"io"
"os"
"github.com/fatih/color"
"github.com/hibiken/asynq/inspeq"
"github.com/hibiken/asynq/internal/rdb"
"github.com/spf13/cobra"
)
const separator = "================================================="
func init() {
rootCmd.AddCommand(queueCmd)
queueCmd.AddCommand(queueListCmd)
queueCmd.AddCommand(queueInspectCmd)
queueCmd.AddCommand(queueHistoryCmd)
queueHistoryCmd.Flags().IntP("days", "x", 10, "show data from last x days")
queueCmd.AddCommand(queuePauseCmd)
queueCmd.AddCommand(queueUnpauseCmd)
queueCmd.AddCommand(queueRemoveCmd)
queueRemoveCmd.Flags().BoolP("force", "f", false, "remove the queue regardless of its size")
}
var queueCmd = &cobra.Command{
Use: "queue",
Short: "Manage queues",
}
var queueListCmd = &cobra.Command{
Use: "ls",
Short: "List queues",
// TODO: Use RunE instead?
Run: queueList,
}
var queueInspectCmd = &cobra.Command{
Use: "inspect QUEUE [QUEUE...]",
Short: "Display detailed information on one or more queues",
Args: cobra.MinimumNArgs(1),
// TODO: Use RunE instead?
Run: queueInspect,
}
var queueHistoryCmd = &cobra.Command{
Use: "history QUEUE [QUEUE...]",
Short: "Display historical aggregate data from one or more queues",
Args: cobra.MinimumNArgs(1),
Run: queueHistory,
}
var queuePauseCmd = &cobra.Command{
Use: "pause QUEUE [QUEUE...]",
Short: "Pause one or more queues",
Args: cobra.MinimumNArgs(1),
Run: queuePause,
}
var queueUnpauseCmd = &cobra.Command{
Use: "unpause QUEUE [QUEUE...]",
Short: "Unpause one or more queues",
Args: cobra.MinimumNArgs(1),
Run: queueUnpause,
}
var queueRemoveCmd = &cobra.Command{
Use: "rm QUEUE [QUEUE...]",
Short: "Remove one or more queues",
Args: cobra.MinimumNArgs(1),
Run: queueRemove,
}
func queueList(cmd *cobra.Command, args []string) {
type queueInfo struct {
name string
keyslot int64
nodes []inspeq.ClusterNode
}
inspector := createInspector()
queues, err := inspector.Queues()
if err != nil {
fmt.Printf("error: Could not fetch list of queues: %v\n", err)
os.Exit(1)
}
var qs []queueInfo
for _, qname := range queues {
q := queueInfo{name: qname}
if useRedisCluster {
keyslot, err := inspector.ClusterKeySlot(qname)
if err != nil {
fmt.Errorf("error: Could not get cluster keyslot for %q\n", qname)
continue
}
q.keyslot = keyslot
nodes, err := inspector.ClusterNodes(qname)
if err != nil {
fmt.Errorf("error: Could not get cluster nodes for %q\n", qname)
continue
}
q.nodes = nodes
}
qs = append(qs, q)
}
if useRedisCluster {
printTable(
[]string{"Queue", "Cluster KeySlot", "Cluster Nodes"},
func(w io.Writer, tmpl string) {
for _, q := range qs {
fmt.Fprintf(w, tmpl, q.name, q.keyslot, q.nodes)
}
},
)
} else {
for _, q := range qs {
fmt.Println(q.name)
}
}
}
func queueInspect(cmd *cobra.Command, args []string) {
inspector := createInspector()
for i, qname := range args {
if i > 0 {
fmt.Printf("\n%s\n", separator)
}
fmt.Println()
stats, err := inspector.CurrentStats(qname)
if err != nil {
fmt.Printf("error: %v\n", err)
continue
}
printQueueStats(stats)
}
}
func printQueueStats(s *inspeq.QueueStats) {
bold := color.New(color.Bold)
bold.Println("Queue Info")
fmt.Printf("Name: %s\n", s.Queue)
fmt.Printf("Size: %d\n", s.Size)
fmt.Printf("Paused: %t\n\n", s.Paused)
bold.Println("Task Count by State")
printTable(
[]string{"active", "pending", "scheduled", "retry", "archived"},
func(w io.Writer, tmpl string) {
fmt.Fprintf(w, tmpl, s.Active, s.Pending, s.Scheduled, s.Retry, s.Archived)
},
)
fmt.Println()
bold.Printf("Daily Stats %s UTC\n", s.Timestamp.UTC().Format("2006-01-02"))
printTable(
[]string{"processed", "failed", "error rate"},
func(w io.Writer, tmpl string) {
var errRate string
if s.Processed == 0 {
errRate = "N/A"
} else {
errRate = fmt.Sprintf("%.2f%%", float64(s.Failed)/float64(s.Processed)*100)
}
fmt.Fprintf(w, tmpl, s.Processed, s.Failed, errRate)
},
)
}
func queueHistory(cmd *cobra.Command, args []string) {
days, err := cmd.Flags().GetInt("days")
if err != nil {
fmt.Printf("error: Internal error: %v\n", err)
os.Exit(1)
}
inspector := createInspector()
for i, qname := range args {
if i > 0 {
fmt.Printf("\n%s\n", separator)
}
fmt.Printf("\nQueue: %s\n\n", qname)
stats, err := inspector.History(qname, days)
if err != nil {
fmt.Printf("error: %v\n", err)
continue
}
printDailyStats(stats)
}
}
func printDailyStats(stats []*inspeq.DailyStats) {
printTable(
[]string{"date (UTC)", "processed", "failed", "error rate"},
func(w io.Writer, tmpl string) {
for _, s := range stats {
var errRate string
if s.Processed == 0 {
errRate = "N/A"
} else {
errRate = fmt.Sprintf("%.2f%%", float64(s.Failed)/float64(s.Processed)*100)
}
fmt.Fprintf(w, tmpl, s.Date.Format("2006-01-02"), s.Processed, s.Failed, errRate)
}
},
)
}
func queuePause(cmd *cobra.Command, args []string) {
inspector := createInspector()
for _, qname := range args {
err := inspector.PauseQueue(qname)
if err != nil {
fmt.Println(err)
continue
}
fmt.Printf("Successfully paused queue %q\n", qname)
}
}
func queueUnpause(cmd *cobra.Command, args []string) {
inspector := createInspector()
for _, qname := range args {
err := inspector.UnpauseQueue(qname)
if err != nil {
fmt.Println(err)
continue
}
fmt.Printf("Successfully unpaused queue %q\n", qname)
}
}
func queueRemove(cmd *cobra.Command, args []string) {
// TODO: Use inspector once RemoveQueue become public API.
force, err := cmd.Flags().GetBool("force")
if err != nil {
fmt.Printf("error: Internal error: %v\n", err)
os.Exit(1)
}
r := createRDB()
for _, qname := range args {
err = r.RemoveQueue(qname, force)
if err != nil {
if _, ok := err.(*rdb.ErrQueueNotEmpty); ok {
fmt.Printf("error: %v\nIf you are sure you want to delete it, run 'asynq queue rm --force %s'\n", err, qname)
continue
}
fmt.Printf("error: %v\n", err)
continue
}
fmt.Printf("Successfully removed queue %q\n", qname)
}
}

54
tools/asynq/cmd/rmq.go Normal file
View File

@@ -0,0 +1,54 @@
// Copyright 2020 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
package cmd
import (
"fmt"
"os"
"github.com/go-redis/redis/v7"
"github.com/hibiken/asynq/internal/rdb"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
// rmqCmd represents the rmq command
var rmqCmd = &cobra.Command{
Use: "rmq [queue name]",
Short: "Removes the specified queue",
Long: `Rmq (asynq rmq) will remove the specified queue.
By default, it will remove the queue only if it's empty.
Use --force option to override this behavior.
Example: asynq rmq low -> Removes "low" queue`,
Args: cobra.ExactValidArgs(1),
Run: rmq,
}
var rmqForce bool
func init() {
rootCmd.AddCommand(rmqCmd)
rmqCmd.Flags().BoolVarP(&rmqForce, "force", "f", false, "remove the queue regardless of its size")
}
func rmq(cmd *cobra.Command, args []string) {
c := redis.NewClient(&redis.Options{
Addr: viper.GetString("uri"),
DB: viper.GetInt("db"),
Password: viper.GetString("password"),
})
r := rdb.NewRDB(c)
err := r.RemoveQueue(args[0], rmqForce)
if err != nil {
if _, ok := err.(*rdb.ErrQueueNotEmpty); ok {
fmt.Printf("error: %v\nIf you are sure you want to delete it, run 'asynq rmq --force %s'\n", err, args[0])
os.Exit(1)
}
fmt.Printf("error: %v", err)
os.Exit(1)
}
fmt.Printf("Successfully removed queue %q\n", args[0])
}

View File

@@ -5,18 +5,13 @@
package cmd
import (
"crypto/tls"
"fmt"
"io"
"os"
"strings"
"text/tabwriter"
"github.com/go-redis/redis/v7"
"github.com/hibiken/asynq"
"github.com/hibiken/asynq/inspeq"
"github.com/hibiken/asynq/internal/base"
"github.com/hibiken/asynq/internal/rdb"
"github.com/spf13/cobra"
homedir "github.com/mitchellh/go-homedir"
@@ -25,16 +20,10 @@ import (
var cfgFile string
// Global flag variables
var (
uri string
db int
password string
useRedisCluster bool
clusterAddrs string
tlsServerName string
)
// Flags
var uri string
var db int
var password string
// rootCmd represents the base command when called without any subcommands
var rootCmd = &cobra.Command{
@@ -73,19 +62,9 @@ func init() {
rootCmd.PersistentFlags().StringVarP(&uri, "uri", "u", "127.0.0.1:6379", "redis server URI")
rootCmd.PersistentFlags().IntVarP(&db, "db", "n", 0, "redis database number (default is 0)")
rootCmd.PersistentFlags().StringVarP(&password, "password", "p", "", "password to use when connecting to redis server")
rootCmd.PersistentFlags().BoolVar(&useRedisCluster, "cluster", false, "connect to redis cluster")
rootCmd.PersistentFlags().StringVar(&clusterAddrs, "cluster_addrs",
"127.0.0.1:7000,127.0.0.1:7001,127.0.0.1:7002,127.0.0.1:7003,127.0.0.1:7004,127.0.0.1:7005",
"list of comma-separated redis server addresses")
rootCmd.PersistentFlags().StringVar(&tlsServerName, "tls_server",
"", "server name for TLS validation")
// Bind flags with config.
viper.BindPFlag("uri", rootCmd.PersistentFlags().Lookup("uri"))
viper.BindPFlag("db", rootCmd.PersistentFlags().Lookup("db"))
viper.BindPFlag("password", rootCmd.PersistentFlags().Lookup("password"))
viper.BindPFlag("cluster", rootCmd.PersistentFlags().Lookup("cluster"))
viper.BindPFlag("cluster_addrs", rootCmd.PersistentFlags().Lookup("cluster_addrs"))
viper.BindPFlag("tls_server", rootCmd.PersistentFlags().Lookup("tls_server"))
}
// initConfig reads in config file and ENV variables if set.
@@ -114,56 +93,6 @@ func initConfig() {
}
}
// createRDB creates a RDB instance using flag values and returns it.
func createRDB() *rdb.RDB {
var c redis.UniversalClient
if useRedisCluster {
addrs := strings.Split(viper.GetString("cluster_addrs"), ",")
c = redis.NewClusterClient(&redis.ClusterOptions{
Addrs: addrs,
Password: viper.GetString("password"),
TLSConfig: getTLSConfig(),
})
} else {
c = redis.NewClient(&redis.Options{
Addr: viper.GetString("uri"),
DB: viper.GetInt("db"),
Password: viper.GetString("password"),
TLSConfig: getTLSConfig(),
})
}
return rdb.NewRDB(c)
}
// createRDB creates a Inspector instance using flag values and returns it.
func createInspector() *inspeq.Inspector {
var connOpt asynq.RedisConnOpt
if useRedisCluster {
addrs := strings.Split(viper.GetString("cluster_addrs"), ",")
connOpt = asynq.RedisClusterClientOpt{
Addrs: addrs,
Password: viper.GetString("password"),
TLSConfig: getTLSConfig(),
}
} else {
connOpt = asynq.RedisClientOpt{
Addr: viper.GetString("uri"),
DB: viper.GetInt("db"),
Password: viper.GetString("password"),
TLSConfig: getTLSConfig(),
}
}
return inspeq.New(connOpt)
}
func getTLSConfig() *tls.Config {
tlsServer := viper.GetString("tls_server")
if tlsServer == "" {
return nil
}
return &tls.Config{ServerName: tlsServer}
}
// printTable is a helper function to print data in table format.
//
// cols is a list of headers and printRow specifies how to print rows.

View File

@@ -12,24 +12,18 @@ import (
"strings"
"time"
"github.com/go-redis/redis/v7"
"github.com/hibiken/asynq/internal/rdb"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
func init() {
rootCmd.AddCommand(serverCmd)
serverCmd.AddCommand(serverListCmd)
}
var serverCmd = &cobra.Command{
Use: "server",
Short: "Manage servers",
}
var serverListCmd = &cobra.Command{
Use: "ls",
Short: "List servers",
Long: `Server list (asynq server ls) shows all running worker servers
pulling tasks from the given redis instance.
// serversCmd represents the servers command
var serversCmd = &cobra.Command{
Use: "servers",
Short: "Shows all running worker servers",
Long: `Servers (asynq servers) will show all running worker servers
pulling tasks from the specified redis instance.
The command shows the following for each server:
* Host and PID of the process in which the server is running
@@ -40,11 +34,20 @@ The command shows the following for each server:
A "running" server is pulling tasks from queues and processing them.
A "quiet" server is no longer pulling new tasks from queues`,
Run: serverList,
Args: cobra.NoArgs,
Run: servers,
}
func serverList(cmd *cobra.Command, args []string) {
r := createRDB()
func init() {
rootCmd.AddCommand(serversCmd)
}
func servers(cmd *cobra.Command, args []string) {
r := rdb.NewRDB(redis.NewClient(&redis.Options{
Addr: viper.GetString("uri"),
DB: viper.GetInt("db"),
Password: viper.GetString("password"),
}))
servers, err := r.ListServers()
if err != nil {
@@ -78,6 +81,12 @@ func serverList(cmd *cobra.Command, args []string) {
printTable(cols, printRows)
}
// timeAgo takes a time and returns a string of the format "<duration> ago".
func timeAgo(since time.Time) string {
d := time.Since(since).Round(time.Second)
return fmt.Sprintf("%v ago", d)
}
func formatQueues(qmap map[string]int) string {
// sort queues by priority and name
type queue struct {
@@ -107,9 +116,3 @@ func formatQueues(qmap map[string]int) string {
}
return b.String()
}
// timeAgo takes a time and returns a string of the format "<duration> ago".
func timeAgo(since time.Time) string {
d := time.Since(since).Round(time.Second)
return fmt.Sprintf("%v ago", d)
}

View File

@@ -6,16 +6,15 @@ package cmd
import (
"fmt"
"io"
"os"
"strconv"
"strings"
"text/tabwriter"
"time"
"github.com/fatih/color"
"github.com/go-redis/redis/v7"
"github.com/hibiken/asynq/internal/rdb"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
// statsCmd represents the stats command
@@ -52,93 +51,57 @@ func init() {
// statsCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
}
type AggregateStats struct {
Active int
Pending int
Scheduled int
Retry int
Archived int
Processed int
Failed int
Timestamp time.Time
}
func stats(cmd *cobra.Command, args []string) {
r := createRDB()
c := redis.NewClient(&redis.Options{
Addr: viper.GetString("uri"),
DB: viper.GetInt("db"),
Password: viper.GetString("password"),
})
r := rdb.NewRDB(c)
queues, err := r.AllQueues()
stats, err := r.CurrentStats()
if err != nil {
fmt.Println(err)
os.Exit(1)
}
var aggStats AggregateStats
var stats []*rdb.Stats
for _, qname := range queues {
s, err := r.CurrentStats(qname)
info, err := r.RedisInfo()
if err != nil {
fmt.Println(err)
os.Exit(1)
}
aggStats.Active += s.Active
aggStats.Pending += s.Pending
aggStats.Scheduled += s.Scheduled
aggStats.Retry += s.Retry
aggStats.Archived += s.Archived
aggStats.Processed += s.Processed
aggStats.Failed += s.Failed
aggStats.Timestamp = s.Timestamp
stats = append(stats, s)
}
var info map[string]string
if useRedisCluster {
info, err = r.RedisClusterInfo()
} else {
info, err = r.RedisInfo()
}
if err != nil {
fmt.Println(err)
os.Exit(1)
}
bold := color.New(color.Bold)
bold.Println("Task Count by State")
printStatsByState(&aggStats)
fmt.Println("STATES")
printStates(stats)
fmt.Println()
bold.Println("Task Count by Queue")
printStatsByQueue(stats)
fmt.Println("QUEUES")
printQueues(stats.Queues)
fmt.Println()
bold.Printf("Daily Stats %s UTC\n", aggStats.Timestamp.UTC().Format("2006-01-02"))
printSuccessFailureStats(&aggStats)
fmt.Printf("STATS FOR %s UTC\n", stats.Timestamp.UTC().Format("2006-01-02"))
printStats(stats)
fmt.Println()
if useRedisCluster {
bold.Println("Redis Cluster Info")
printClusterInfo(info)
} else {
bold.Println("Redis Info")
fmt.Println("REDIS INFO")
printInfo(info)
}
fmt.Println()
}
func printStatsByState(s *AggregateStats) {
func printStates(s *rdb.Stats) {
format := strings.Repeat("%v\t", 5) + "\n"
tw := new(tabwriter.Writer).Init(os.Stdout, 0, 8, 2, ' ', 0)
fmt.Fprintf(tw, format, "active", "pending", "scheduled", "retry", "archived")
fmt.Fprintf(tw, format, "InProgress", "Enqueued", "Scheduled", "Retry", "Dead")
fmt.Fprintf(tw, format, "----------", "--------", "---------", "-----", "----")
fmt.Fprintf(tw, format, s.Active, s.Pending, s.Scheduled, s.Retry, s.Archived)
fmt.Fprintf(tw, format, s.InProgress, s.Enqueued, s.Scheduled, s.Retry, s.Dead)
tw.Flush()
}
func printStatsByQueue(stats []*rdb.Stats) {
func printQueues(queues []*rdb.Queue) {
var headers, seps, counts []string
for _, s := range stats {
title := queueTitle(s)
for _, q := range queues {
title := queueTitle(q)
headers = append(headers, title)
seps = append(seps, strings.Repeat("-", len(title)))
counts = append(counts, strconv.Itoa(s.Size))
counts = append(counts, strconv.Itoa(q.Size))
}
format := strings.Repeat("%v\t", len(headers)) + "\n"
tw := new(tabwriter.Writer).Init(os.Stdout, 0, 8, 2, ' ', 0)
@@ -148,19 +111,19 @@ func printStatsByQueue(stats []*rdb.Stats) {
tw.Flush()
}
func queueTitle(s *rdb.Stats) string {
func queueTitle(q *rdb.Queue) string {
var b strings.Builder
b.WriteString(s.Queue)
if s.Paused {
b.WriteString(" (paused)")
b.WriteString(strings.Title(q.Name))
if q.Paused {
b.WriteString(" (Paused)")
}
return b.String()
}
func printSuccessFailureStats(s *AggregateStats) {
func printStats(s *rdb.Stats) {
format := strings.Repeat("%v\t", 3) + "\n"
tw := new(tabwriter.Writer).Init(os.Stdout, 0, 8, 2, ' ', 0)
fmt.Fprintf(tw, format, "processed", "failed", "error rate")
fmt.Fprintf(tw, format, "Processed", "Failed", "Error Rate")
fmt.Fprintf(tw, format, "---------", "------", "----------")
var errrate string
if s.Processed == 0 {
@@ -175,7 +138,7 @@ func printSuccessFailureStats(s *AggregateStats) {
func printInfo(info map[string]string) {
format := strings.Repeat("%v\t", 5) + "\n"
tw := new(tabwriter.Writer).Init(os.Stdout, 0, 8, 2, ' ', 0)
fmt.Fprintf(tw, format, "version", "uptime", "connections", "memory usage", "peak memory usage")
fmt.Fprintf(tw, format, "Version", "Uptime", "Connections", "Memory Usage", "Peak Memory Usage")
fmt.Fprintf(tw, format, "-------", "------", "-----------", "------------", "-----------------")
fmt.Fprintf(tw, format,
info["redis_version"],
@@ -187,19 +150,6 @@ func printInfo(info map[string]string) {
tw.Flush()
}
func printClusterInfo(info map[string]string) {
printTable(
[]string{"State", "Known Nodes", "Cluster Size"},
func(w io.Writer, tmpl string) {
fmt.Fprintf(w, tmpl,
strings.ToUpper(info["cluster_state"]),
info["cluster_known_nodes"],
info["cluster_size"],
)
},
)
}
func toInterfaceSlice(strs []string) []interface{} {
var res []interface{}
for _, s := range strs {

View File

@@ -1,467 +0,0 @@
// Copyright 2020 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
package cmd
import (
"fmt"
"io"
"os"
"time"
"github.com/hibiken/asynq/inspeq"
"github.com/spf13/cobra"
)
func init() {
rootCmd.AddCommand(taskCmd)
taskCmd.AddCommand(taskListCmd)
taskListCmd.Flags().StringP("queue", "q", "", "queue to inspect")
taskListCmd.Flags().StringP("state", "s", "", "state of the tasks to inspect")
taskListCmd.Flags().Int("page", 1, "page number")
taskListCmd.Flags().Int("size", 30, "page size")
taskListCmd.MarkFlagRequired("queue")
taskListCmd.MarkFlagRequired("state")
taskCmd.AddCommand(taskCancelCmd)
taskCmd.AddCommand(taskArchiveCmd)
taskArchiveCmd.Flags().StringP("queue", "q", "", "queue to which the task belongs")
taskArchiveCmd.Flags().StringP("key", "k", "", "key of the task")
taskArchiveCmd.MarkFlagRequired("queue")
taskArchiveCmd.MarkFlagRequired("key")
taskCmd.AddCommand(taskDeleteCmd)
taskDeleteCmd.Flags().StringP("queue", "q", "", "queue to which the task belongs")
taskDeleteCmd.Flags().StringP("key", "k", "", "key of the task")
taskDeleteCmd.MarkFlagRequired("queue")
taskDeleteCmd.MarkFlagRequired("key")
taskCmd.AddCommand(taskRunCmd)
taskRunCmd.Flags().StringP("queue", "q", "", "queue to which the task belongs")
taskRunCmd.Flags().StringP("key", "k", "", "key of the task")
taskRunCmd.MarkFlagRequired("queue")
taskRunCmd.MarkFlagRequired("key")
taskCmd.AddCommand(taskArchiveAllCmd)
taskArchiveAllCmd.Flags().StringP("queue", "q", "", "queue to which the tasks belong")
taskArchiveAllCmd.Flags().StringP("state", "s", "", "state of the tasks")
taskArchiveAllCmd.MarkFlagRequired("queue")
taskArchiveAllCmd.MarkFlagRequired("state")
taskCmd.AddCommand(taskDeleteAllCmd)
taskDeleteAllCmd.Flags().StringP("queue", "q", "", "queue to which the tasks belong")
taskDeleteAllCmd.Flags().StringP("state", "s", "", "state of the tasks")
taskDeleteAllCmd.MarkFlagRequired("queue")
taskDeleteAllCmd.MarkFlagRequired("state")
taskCmd.AddCommand(taskRunAllCmd)
taskRunAllCmd.Flags().StringP("queue", "q", "", "queue to which the tasks belong")
taskRunAllCmd.Flags().StringP("state", "s", "", "state of the tasks")
taskRunAllCmd.MarkFlagRequired("queue")
taskRunAllCmd.MarkFlagRequired("state")
}
var taskCmd = &cobra.Command{
Use: "task",
Short: "Manage tasks",
}
var taskListCmd = &cobra.Command{
Use: "ls --queue=QUEUE --state=STATE",
Short: "List tasks",
Long: `List tasks of the given state from the specified queue.
The value for the state flag should be one of:
- active
- pending
- scheduled
- retry
- archived
List opeartion paginates the result set.
By default, the command fetches the first 30 tasks.
Use --page and --size flags to specify the page number and size.
Example:
To list pending tasks from "default" queue, run
asynq task ls --queue=default --state=pending
To list the tasks from the second page, run
asynq task ls --queue=default --state=pending --page=1`,
Run: taskList,
}
var taskCancelCmd = &cobra.Command{
Use: "cancel TASK_ID [TASK_ID...]",
Short: "Cancel one or more active tasks",
Args: cobra.MinimumNArgs(1),
Run: taskCancel,
}
var taskArchiveCmd = &cobra.Command{
Use: "archive --queue=QUEUE --key=KEY",
Short: "Archive a task with the given key",
Args: cobra.NoArgs,
Run: taskArchive,
}
var taskDeleteCmd = &cobra.Command{
Use: "delete --queue=QUEUE --key=KEY",
Short: "Delete a task with the given key",
Args: cobra.NoArgs,
Run: taskDelete,
}
var taskRunCmd = &cobra.Command{
Use: "run --queue=QUEUE --key=KEY",
Short: "Run a task with the given key",
Args: cobra.NoArgs,
Run: taskRun,
}
var taskArchiveAllCmd = &cobra.Command{
Use: "archive-all --queue=QUEUE --state=STATE",
Short: "Archive all tasks in the given state",
Args: cobra.NoArgs,
Run: taskArchiveAll,
}
var taskDeleteAllCmd = &cobra.Command{
Use: "delete-all --queue=QUEUE --key=KEY",
Short: "Delete all tasks in the given state",
Args: cobra.NoArgs,
Run: taskDeleteAll,
}
var taskRunAllCmd = &cobra.Command{
Use: "run-all --queue=QUEUE --key=KEY",
Short: "Run all tasks in the given state",
Args: cobra.NoArgs,
Run: taskRunAll,
}
func taskList(cmd *cobra.Command, args []string) {
qname, err := cmd.Flags().GetString("queue")
if err != nil {
fmt.Println(err)
os.Exit(1)
}
state, err := cmd.Flags().GetString("state")
if err != nil {
fmt.Println(err)
os.Exit(1)
}
pageNum, err := cmd.Flags().GetInt("page")
if err != nil {
fmt.Println(err)
os.Exit(1)
}
pageSize, err := cmd.Flags().GetInt("size")
if err != nil {
fmt.Println(err)
os.Exit(1)
}
switch state {
case "active":
listActiveTasks(qname, pageNum, pageSize)
case "pending":
listPendingTasks(qname, pageNum, pageSize)
case "scheduled":
listScheduledTasks(qname, pageNum, pageSize)
case "retry":
listRetryTasks(qname, pageNum, pageSize)
case "archived":
listArchivedTasks(qname, pageNum, pageSize)
default:
fmt.Printf("error: state=%q is not supported\n", state)
os.Exit(1)
}
}
func listActiveTasks(qname string, pageNum, pageSize int) {
i := createInspector()
tasks, err := i.ListActiveTasks(qname, inspeq.PageSize(pageSize), inspeq.Page(pageNum))
if err != nil {
fmt.Println(err)
os.Exit(1)
}
if len(tasks) == 0 {
fmt.Printf("No active tasks in %q queue\n", qname)
return
}
printTable(
[]string{"ID", "Type", "Payload"},
func(w io.Writer, tmpl string) {
for _, t := range tasks {
fmt.Fprintf(w, tmpl, t.ID, t.Type, t.Payload)
}
},
)
}
func listPendingTasks(qname string, pageNum, pageSize int) {
i := createInspector()
tasks, err := i.ListPendingTasks(qname, inspeq.PageSize(pageSize), inspeq.Page(pageNum))
if err != nil {
fmt.Println(err)
os.Exit(1)
}
if len(tasks) == 0 {
fmt.Printf("No pending tasks in %q queue\n", qname)
return
}
printTable(
[]string{"Key", "Type", "Payload"},
func(w io.Writer, tmpl string) {
for _, t := range tasks {
fmt.Fprintf(w, tmpl, t.Key(), t.Type, t.Payload)
}
},
)
}
func listScheduledTasks(qname string, pageNum, pageSize int) {
i := createInspector()
tasks, err := i.ListScheduledTasks(qname, inspeq.PageSize(pageSize), inspeq.Page(pageNum))
if err != nil {
fmt.Println(err)
os.Exit(1)
}
if len(tasks) == 0 {
fmt.Printf("No scheduled tasks in %q queue\n", qname)
return
}
printTable(
[]string{"Key", "Type", "Payload", "Process In"},
func(w io.Writer, tmpl string) {
for _, t := range tasks {
processIn := fmt.Sprintf("%.0f seconds",
t.NextProcessAt.Sub(time.Now()).Seconds())
fmt.Fprintf(w, tmpl, t.Key(), t.Type, t.Payload, processIn)
}
},
)
}
func listRetryTasks(qname string, pageNum, pageSize int) {
i := createInspector()
tasks, err := i.ListRetryTasks(qname, inspeq.PageSize(pageSize), inspeq.Page(pageNum))
if err != nil {
fmt.Println(err)
os.Exit(1)
}
if len(tasks) == 0 {
fmt.Printf("No retry tasks in %q queue\n", qname)
return
}
printTable(
[]string{"Key", "Type", "Payload", "Next Retry", "Last Error", "Retried", "Max Retry"},
func(w io.Writer, tmpl string) {
for _, t := range tasks {
var nextRetry string
if d := t.NextProcessAt.Sub(time.Now()); d > 0 {
nextRetry = fmt.Sprintf("in %v", d.Round(time.Second))
} else {
nextRetry = "right now"
}
fmt.Fprintf(w, tmpl, t.Key(), t.Type, t.Payload, nextRetry, t.LastError, t.Retried, t.MaxRetry)
}
},
)
}
func listArchivedTasks(qname string, pageNum, pageSize int) {
i := createInspector()
tasks, err := i.ListArchivedTasks(qname, inspeq.PageSize(pageSize), inspeq.Page(pageNum))
if err != nil {
fmt.Println(err)
os.Exit(1)
}
if len(tasks) == 0 {
fmt.Printf("No archived tasks in %q queue\n", qname)
return
}
printTable(
[]string{"Key", "Type", "Payload", "Last Failed", "Last Error"},
func(w io.Writer, tmpl string) {
for _, t := range tasks {
fmt.Fprintf(w, tmpl, t.Key(), t.Type, t.Payload, t.LastFailedAt, t.LastError)
}
})
}
func taskCancel(cmd *cobra.Command, args []string) {
r := createRDB()
for _, id := range args {
err := r.PublishCancelation(id)
if err != nil {
fmt.Printf("error: could not send cancelation signal: %v\n", err)
continue
}
fmt.Printf("Sent cancelation signal for task %s\n", id)
}
}
func taskArchive(cmd *cobra.Command, args []string) {
qname, err := cmd.Flags().GetString("queue")
if err != nil {
fmt.Printf("error: %v\n", err)
os.Exit(1)
}
key, err := cmd.Flags().GetString("key")
if err != nil {
fmt.Printf("error: %v\n", err)
os.Exit(1)
}
i := createInspector()
err = i.ArchiveTaskByKey(qname, key)
if err != nil {
fmt.Printf("error: %v\n", err)
os.Exit(1)
}
fmt.Println("task archived")
}
func taskDelete(cmd *cobra.Command, args []string) {
qname, err := cmd.Flags().GetString("queue")
if err != nil {
fmt.Printf("error: %v\n", err)
os.Exit(1)
}
key, err := cmd.Flags().GetString("key")
if err != nil {
fmt.Printf("error: %v\n", err)
os.Exit(1)
}
i := createInspector()
err = i.DeleteTaskByKey(qname, key)
if err != nil {
fmt.Printf("error: %v\n", err)
os.Exit(1)
}
fmt.Println("task deleted")
}
func taskRun(cmd *cobra.Command, args []string) {
qname, err := cmd.Flags().GetString("queue")
if err != nil {
fmt.Printf("error: %v\n", err)
os.Exit(1)
}
key, err := cmd.Flags().GetString("key")
if err != nil {
fmt.Printf("error: %v\n", err)
os.Exit(1)
}
i := createInspector()
err = i.RunTaskByKey(qname, key)
if err != nil {
fmt.Printf("error: %v\n", err)
os.Exit(1)
}
fmt.Println("task is now pending")
}
func taskArchiveAll(cmd *cobra.Command, args []string) {
qname, err := cmd.Flags().GetString("queue")
if err != nil {
fmt.Printf("error: %v\n", err)
os.Exit(1)
}
state, err := cmd.Flags().GetString("state")
if err != nil {
fmt.Printf("error: %v\n", err)
os.Exit(1)
}
i := createInspector()
var n int
switch state {
case "pending":
n, err = i.ArchiveAllPendingTasks(qname)
case "scheduled":
n, err = i.ArchiveAllScheduledTasks(qname)
case "retry":
n, err = i.ArchiveAllRetryTasks(qname)
default:
fmt.Printf("error: unsupported state %q\n", state)
os.Exit(1)
}
if err != nil {
fmt.Printf("error: %v\n", err)
os.Exit(1)
}
fmt.Printf("%d tasks archived\n", n)
}
func taskDeleteAll(cmd *cobra.Command, args []string) {
qname, err := cmd.Flags().GetString("queue")
if err != nil {
fmt.Printf("error: %v\n", err)
os.Exit(1)
}
state, err := cmd.Flags().GetString("state")
if err != nil {
fmt.Printf("error: %v\n", err)
os.Exit(1)
}
i := createInspector()
var n int
switch state {
case "pending":
n, err = i.DeleteAllPendingTasks(qname)
case "scheduled":
n, err = i.DeleteAllScheduledTasks(qname)
case "retry":
n, err = i.DeleteAllRetryTasks(qname)
case "archived":
n, err = i.DeleteAllArchivedTasks(qname)
default:
fmt.Printf("error: unsupported state %q\n", state)
os.Exit(1)
}
if err != nil {
fmt.Printf("error: %v\n", err)
os.Exit(1)
}
fmt.Printf("%d tasks deleted\n", n)
}
func taskRunAll(cmd *cobra.Command, args []string) {
qname, err := cmd.Flags().GetString("queue")
if err != nil {
fmt.Printf("error: %v\n", err)
os.Exit(1)
}
state, err := cmd.Flags().GetString("state")
if err != nil {
fmt.Printf("error: %v\n", err)
os.Exit(1)
}
i := createInspector()
var n int
switch state {
case "scheduled":
n, err = i.RunAllScheduledTasks(qname)
case "retry":
n, err = i.RunAllRetryTasks(qname)
case "archived":
n, err = i.RunAllArchivedTasks(qname)
default:
fmt.Printf("error: unsupported state %q\n", state)
os.Exit(1)
}
if err != nil {
fmt.Printf("error: %v\n", err)
os.Exit(1)
}
fmt.Printf("%d tasks are now pending\n", n)
}

View File

@@ -0,0 +1,46 @@
// Copyright 2020 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
package cmd
import (
"fmt"
"os"
"github.com/go-redis/redis/v7"
"github.com/hibiken/asynq/internal/rdb"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
// unpauseCmd represents the unpause command
var unpauseCmd = &cobra.Command{
Use: "unpause [queue name]",
Short: "Unpauses the specified queue",
Long: `Unpause (asynq unpause) will unpause the specified queue.
Asynq servers will process tasks from unpaused/resumed queues.
Example: asynq unpause default -> Resume the "default" queue`,
Args: cobra.ExactValidArgs(1),
Run: unpause,
}
func init() {
rootCmd.AddCommand(unpauseCmd)
}
func unpause(cmd *cobra.Command, args []string) {
c := redis.NewClient(&redis.Options{
Addr: viper.GetString("uri"),
DB: viper.GetInt("db"),
Password: viper.GetString("password"),
})
r := rdb.NewRDB(c)
err := r.Unpause(args[0])
if err != nil {
fmt.Printf("error: %v\n", err)
os.Exit(1)
}
fmt.Printf("Successfully resumed queue %q\n", args[0])
}

View File

@@ -0,0 +1,75 @@
// Copyright 2020 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
package cmd
import (
"fmt"
"io"
"os"
"sort"
"github.com/go-redis/redis/v7"
"github.com/hibiken/asynq/internal/rdb"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
// workersCmd represents the workers command
var workersCmd = &cobra.Command{
Use: "workers",
Short: "Shows all running workers information",
Long: `Workers (asynq workers) will show all running workers information.
The command shows the following for each worker:
* Process in which the worker is running
* ID of the task worker is processing
* Type of the task worker is processing
* Payload of the task worker is processing
* Queue that the task was pulled from.
* Time the worker started processing the task`,
Args: cobra.NoArgs,
Run: workers,
}
func init() {
rootCmd.AddCommand(workersCmd)
}
func workers(cmd *cobra.Command, args []string) {
r := rdb.NewRDB(redis.NewClient(&redis.Options{
Addr: viper.GetString("uri"),
DB: viper.GetInt("db"),
Password: viper.GetString("password"),
}))
workers, err := r.ListWorkers()
if err != nil {
fmt.Println(err)
os.Exit(1)
}
if len(workers) == 0 {
fmt.Println("No workers")
return
}
// sort by started timestamp or ID.
sort.Slice(workers, func(i, j int) bool {
x, y := workers[i], workers[j]
if x.Started != y.Started {
return x.Started.Before(y.Started)
}
return x.ID < y.ID
})
cols := []string{"Process", "ID", "Type", "Payload", "Queue", "Started"}
printRows := func(w io.Writer, tmpl string) {
for _, wk := range workers {
fmt.Fprintf(w, tmpl,
fmt.Sprintf("%s:%d", wk.Host, wk.PID), wk.ID, wk.Type, wk.Payload, wk.Queue, timeAgo(wk.Started))
}
}
printTable(cols, printRows)
}

View File

@@ -3,20 +3,13 @@ module github.com/hibiken/asynq/tools
go 1.13
require (
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6 // indirect
github.com/coreos/go-etcd v2.0.0+incompatible // indirect
github.com/cpuguy83/go-md2man v1.0.10 // indirect
github.com/fatih/color v1.9.0
github.com/go-redis/redis/v7 v7.4.0
github.com/go-redis/redis/v7 v7.2.0
github.com/google/uuid v1.1.1
github.com/hibiken/asynq v0.14.0
github.com/hibiken/asynq v0.4.0
github.com/mitchellh/go-homedir v1.1.0
github.com/spf13/cast v1.3.1
github.com/spf13/cobra v1.1.1
github.com/spf13/viper v1.7.0
github.com/ugorji/go v1.1.4 // indirect
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8 // indirect
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77 // indirect
github.com/spf13/cobra v0.0.5
github.com/spf13/viper v1.6.2
)
replace github.com/hibiken/asynq => ./..

View File

@@ -1,122 +1,62 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s=
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-redis/redis/v7 v7.2.0 h1:CrCexy/jYWZjW0AyVoHlcJUeZN19VWlbepTh1Vq6dJs=
github.com/go-redis/redis/v7 v7.2.0/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRfnwdHj/Dcg=
github.com/go-redis/redis/v7 v7.4.0 h1:7obg6wUoj05T0EpY0o8B59S9w5yeMWql7sw2kwNW1x4=
github.com/go-redis/redis/v7 v7.4.0/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRfnwdHj/Dcg=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
@@ -132,27 +72,11 @@ github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA=
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.11 h1:FxPOTFNqGkuDUGi3H/qkUbQO4ZiBa2brKq5r0l8TGeM=
github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
@@ -160,14 +84,11 @@ github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo=
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
@@ -177,15 +98,8 @@ github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
@@ -200,27 +114,16 @@ github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng=
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s=
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
github.com/spf13/cobra v1.0.0 h1:6m/oheQuQ13N9ks4hubMG6BnvwOeaJrqSPLahSnczz8=
github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
github.com/spf13/cobra v1.1.1 h1:KfztREH0tPxJJ+geloSLaAkaPkr4ki2Er5quFV1TDo4=
github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI=
github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
github.com/spf13/viper v1.6.2 h1:7aKfF+e8/k68gda3LOjo5RxiUqddoFxVq4BKBPrxk5E=
github.com/spf13/viper v1.6.2/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k=
github.com/spf13/viper v1.7.0 h1:xVKxvI7ouOI5I+U9s2eeiUfMaWBVoXA3AWskkrqK0VM=
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
@@ -229,137 +132,57 @@ github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljT
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/goleak v0.10.0/go.mod h1:VCZuO8V8mFPlL0F5J5GK1rtHV3DrFcQ1R8ryq7FK0aI=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190923162816-aa69164e4478 h1:l5EDrHhldLYb3ZRHDUhXF7Om7MvYXnkV9/iQNo1lX6g=
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e h1:9vRrk9YW2BTzLP0VCB9ZDjU4cPqkg+IDWL7XgxA1yxQ=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno=
@@ -373,10 +196,4 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo=
gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=