mirror of
https://github.com/hibiken/asynq.git
synced 2025-10-21 09:36:12 +08:00
Compare commits
53 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
bc77f6fe14 | ||
|
efe197a47b | ||
|
97b5516183 | ||
|
8eafa03ca7 | ||
|
430b01c9aa | ||
|
14c381dc40 | ||
|
e13122723a | ||
|
eba7c4e085 | ||
|
bfde0b6283 | ||
|
afde6a7266 | ||
|
6529a1e0b1 | ||
|
c9a6ab8ae1 | ||
|
557c1a5044 | ||
|
0236eb9a1c | ||
|
3c2b2cf4a3 | ||
|
04df71198d | ||
|
2884044e75 | ||
|
3719fad396 | ||
|
42c7ac0746 | ||
|
d331ff055d | ||
|
ccb682853e | ||
|
7c3ad9e45c | ||
|
ea23db4f6b | ||
|
00a25ca570 | ||
|
7235041128 | ||
|
a150d18ed7 | ||
|
0712e90f23 | ||
|
c5100a9c23 | ||
|
196d66f221 | ||
|
38509e309f | ||
|
f4dd8fe962 | ||
|
c06e9de97d | ||
|
52d536a8f5 | ||
|
f9c0673116 | ||
|
b604d25937 | ||
|
dfdf530a24 | ||
|
e9239260ae | ||
|
8f9d5a3352 | ||
|
c4dc993241 | ||
|
37dfd746d4 | ||
|
8d6e4167ab | ||
|
476862dd7b | ||
|
dcd873fa2a | ||
|
2604bb2192 | ||
|
942345ee80 | ||
|
1f059eeee1 | ||
|
4ae73abdaa | ||
|
96b2318300 | ||
|
8312515e64 | ||
|
50e7f38365 | ||
|
fadcae76d6 | ||
|
a2d4ead989 | ||
|
82b6828f43 |
82
.github/workflows/benchstat.yml
vendored
Normal file
82
.github/workflows/benchstat.yml
vendored
Normal file
@@ -0,0 +1,82 @@
|
|||||||
|
# This workflow runs benchmarks against the current branch,
|
||||||
|
# compares them to benchmarks against master,
|
||||||
|
# and uploads the results as an artifact.
|
||||||
|
|
||||||
|
name: benchstat
|
||||||
|
|
||||||
|
on: [pull_request]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
incoming:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
services:
|
||||||
|
redis:
|
||||||
|
image: redis
|
||||||
|
ports:
|
||||||
|
- 6379:6379
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
- name: Set up Go
|
||||||
|
uses: actions/setup-go@v2
|
||||||
|
with:
|
||||||
|
go-version: 1.15.x
|
||||||
|
- name: Benchmark
|
||||||
|
run: go test -run=^$ -bench=. -count=5 -timeout=60m ./... | tee -a new.txt
|
||||||
|
- name: Upload Benchmark
|
||||||
|
uses: actions/upload-artifact@v2
|
||||||
|
with:
|
||||||
|
name: bench-incoming
|
||||||
|
path: new.txt
|
||||||
|
|
||||||
|
current:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
services:
|
||||||
|
redis:
|
||||||
|
image: redis
|
||||||
|
ports:
|
||||||
|
- 6379:6379
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
with:
|
||||||
|
ref: master
|
||||||
|
- name: Set up Go
|
||||||
|
uses: actions/setup-go@v2
|
||||||
|
with:
|
||||||
|
go-version: 1.15.x
|
||||||
|
- name: Benchmark
|
||||||
|
run: go test -run=^$ -bench=. -count=5 -timeout=60m ./... | tee -a old.txt
|
||||||
|
- name: Upload Benchmark
|
||||||
|
uses: actions/upload-artifact@v2
|
||||||
|
with:
|
||||||
|
name: bench-current
|
||||||
|
path: old.txt
|
||||||
|
|
||||||
|
benchstat:
|
||||||
|
needs: [incoming, current]
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
- name: Set up Go
|
||||||
|
uses: actions/setup-go@v2
|
||||||
|
with:
|
||||||
|
go-version: 1.15.x
|
||||||
|
- name: Install benchstat
|
||||||
|
run: go get -u golang.org/x/perf/cmd/benchstat
|
||||||
|
- name: Download Incoming
|
||||||
|
uses: actions/download-artifact@v2
|
||||||
|
with:
|
||||||
|
name: bench-incoming
|
||||||
|
- name: Download Current
|
||||||
|
uses: actions/download-artifact@v2
|
||||||
|
with:
|
||||||
|
name: bench-current
|
||||||
|
- name: Benchstat Results
|
||||||
|
run: benchstat old.txt new.txt | tee -a benchstat.txt
|
||||||
|
- name: Upload benchstat results
|
||||||
|
uses: actions/upload-artifact@v2
|
||||||
|
with:
|
||||||
|
name: benchstat
|
||||||
|
path: benchstat.txt
|
35
.github/workflows/build.yml
vendored
Normal file
35
.github/workflows/build.yml
vendored
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
name: build
|
||||||
|
|
||||||
|
on: [push, pull_request]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
os: [ubuntu-latest]
|
||||||
|
go-version: [1.13.x, 1.14.x, 1.15.x]
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
|
services:
|
||||||
|
redis:
|
||||||
|
image: redis
|
||||||
|
ports:
|
||||||
|
- 6379:6379
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
|
||||||
|
- name: Set up Go
|
||||||
|
uses: actions/setup-go@v2
|
||||||
|
with:
|
||||||
|
go-version: ${{ matrix.go-version }}
|
||||||
|
|
||||||
|
- name: Build
|
||||||
|
run: go build -v ./...
|
||||||
|
|
||||||
|
- name: Test
|
||||||
|
run: go test -race -v -coverprofile=coverage.txt -covermode=atomic ./...
|
||||||
|
|
||||||
|
- name: Benchmark Test
|
||||||
|
run: go test -run=^$ -bench=. -loglevel=debug ./...
|
||||||
|
|
||||||
|
- name: Upload coverage to Codecov
|
||||||
|
uses: codecov/codecov-action@v1
|
13
.travis.yml
13
.travis.yml
@@ -1,13 +0,0 @@
|
|||||||
language: go
|
|
||||||
go_import_path: github.com/hibiken/asynq
|
|
||||||
git:
|
|
||||||
depth: 1
|
|
||||||
go: [1.13.x, 1.14.x, 1.15.x]
|
|
||||||
script:
|
|
||||||
- go test -race -v -coverprofile=coverage.txt -covermode=atomic ./...
|
|
||||||
- go test -run=XXX -bench=. -loglevel=debug ./...
|
|
||||||
services:
|
|
||||||
- redis-server
|
|
||||||
after_success:
|
|
||||||
- bash ./.travis/benchcmp.sh
|
|
||||||
- bash <(curl -s https://codecov.io/bash)
|
|
@@ -1,18 +0,0 @@
|
|||||||
if [ "${TRAVIS_PULL_REQUEST_BRANCH:-$TRAVIS_BRANCH}" != "master" ]; then
|
|
||||||
REMOTE_URL="$(git config --get remote.origin.url)";
|
|
||||||
cd ${TRAVIS_BUILD_DIR}/.. && \
|
|
||||||
git clone ${REMOTE_URL} "${TRAVIS_REPO_SLUG}-bench" && \
|
|
||||||
cd "${TRAVIS_REPO_SLUG}-bench" && \
|
|
||||||
|
|
||||||
# Benchmark master
|
|
||||||
git checkout master && \
|
|
||||||
go test -run=XXX -bench=. ./... > master.txt && \
|
|
||||||
|
|
||||||
# Benchmark feature branch
|
|
||||||
git checkout ${TRAVIS_COMMIT} && \
|
|
||||||
go test -run=XXX -bench=. ./... > feature.txt && \
|
|
||||||
|
|
||||||
# compare two benchmarks
|
|
||||||
go get -u golang.org/x/tools/cmd/benchcmp && \
|
|
||||||
benchcmp master.txt feature.txt;
|
|
||||||
fi
|
|
87
CHANGELOG.md
87
CHANGELOG.md
@@ -7,14 +7,85 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
|||||||
|
|
||||||
## [Unreleased]
|
## [Unreleased]
|
||||||
|
|
||||||
|
## [0.15.0] - 2021-01-31
|
||||||
|
|
||||||
|
**IMPORTATNT**: All `Inspector` related code are moved to subpackage "github.com/hibiken/asynq/inspeq"
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- `Inspector` related code are moved to subpackage "github.com/hibken/asynq/inspeq".
|
||||||
|
- `RedisConnOpt` interface has changed slightly. If you have been passing `RedisClientOpt`, `RedisFailoverClientOpt`, or `RedisClusterClientOpt` as a pointer,
|
||||||
|
update your code to pass as a value.
|
||||||
|
- `ErrorMsg` field in `RetryTask` and `ArchivedTask` was renamed to `LastError`.
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- `MaxRetry`, `Retried`, `LastError` fields were added to all task types returned from `Inspector`.
|
||||||
|
- `MemoryUsage` field was added to `QueueStats`.
|
||||||
|
- `DeleteAllPendingTasks`, `ArchiveAllPendingTasks` were added to `Inspector`
|
||||||
|
- `DeleteTaskByKey` and `ArchiveTaskByKey` now supports deleting/archiving `PendingTask`.
|
||||||
|
- asynq CLI now supports deleting/archiving pending tasks.
|
||||||
|
|
||||||
|
## [0.14.1] - 2021-01-19
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- `go.mod` file for CLI
|
||||||
|
|
||||||
|
## [0.14.0] - 2021-01-14
|
||||||
|
|
||||||
|
**IMPORTATNT**: Please run `asynq migrate` command to migrate from the previous versions.
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- Renamed `DeadTask` to `ArchivedTask`.
|
||||||
|
- Renamed the operation `Kill` to `Archive` in `Inpsector`.
|
||||||
|
- Print stack trace when Handler panics.
|
||||||
|
- Include a file name and a line number in the error message when recovering from a panic.
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- `DefaultRetryDelayFunc` is now a public API, which can be used in the custom `RetryDelayFunc`.
|
||||||
|
- `SkipRetry` error is added to be used as a return value from `Handler`.
|
||||||
|
- `Servers` method is added to `Inspector`
|
||||||
|
- `CancelActiveTask` method is added to `Inspector`.
|
||||||
|
- `ListSchedulerEnqueueEvents` method is added to `Inspector`.
|
||||||
|
- `SchedulerEntries` method is added to `Inspector`.
|
||||||
|
- `DeleteQueue` method is added to `Inspector`.
|
||||||
|
|
||||||
|
## [0.13.1] - 2020-11-22
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- Fixed processor to wait for specified time duration before forcefully shutdown workers.
|
||||||
|
|
||||||
|
## [0.13.0] - 2020-10-13
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- `Scheduler` type is added to enable periodic tasks. See the godoc for its APIs and [wiki](https://github.com/hibiken/asynq/wiki/Periodic-Tasks) for the getting-started guide.
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- interface `Option` has changed. See the godoc for the new interface.
|
||||||
|
This change would have no impact as long as you are using exported functions (e.g. `MaxRetry`, `Queue`, etc)
|
||||||
|
to create `Option`s.
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- `Payload.String() string` method is added
|
||||||
|
- `Payload.MarshalJSON() ([]byte, error)` method is added
|
||||||
|
|
||||||
## [0.12.0] - 2020-09-12
|
## [0.12.0] - 2020-09-12
|
||||||
|
|
||||||
**IMPORTANT**: If you are upgrading from a previous version, please install the latest version of the CLI `go get -u github.com/hibiken/asynq/tools/asynq` and run `asynq migrate` command. No process should be writing to Redis while you run the migration command.
|
**IMPORTANT**: If you are upgrading from a previous version, please install the latest version of the CLI `go get -u github.com/hibiken/asynq/tools/asynq` and run `asynq migrate` command. No process should be writing to Redis while you run the migration command.
|
||||||
|
|
||||||
## The semantics of queue have changed
|
## The semantics of queue have changed
|
||||||
Previously, we called tasks that are ready to be processed *"Enqueued tasks"*, and other tasks that are scheduled to be processed in the future *"Scheduled tasks"*, etc.
|
|
||||||
We changed the semantics of *"Enqueue"* slightly; All tasks that client pushes to Redis are *Enqueued* to a queue. Within a queue, tasks will transition from one state to another.
|
Previously, we called tasks that are ready to be processed _"Enqueued tasks"_, and other tasks that are scheduled to be processed in the future _"Scheduled tasks"_, etc.
|
||||||
|
We changed the semantics of _"Enqueue"_ slightly; All tasks that client pushes to Redis are _Enqueued_ to a queue. Within a queue, tasks will transition from one state to another.
|
||||||
Possible task states are:
|
Possible task states are:
|
||||||
|
|
||||||
- `Pending`: task is ready to be processed (previously called "Enqueued")
|
- `Pending`: task is ready to be processed (previously called "Enqueued")
|
||||||
- `Active`: tasks is currently being processed (previously called "InProgress")
|
- `Active`: tasks is currently being processed (previously called "InProgress")
|
||||||
- `Scheduled`: task is scheduled to be processed in the future
|
- `Scheduled`: task is scheduled to be processed in the future
|
||||||
@@ -26,23 +97,28 @@ Possible task states are:
|
|||||||
---
|
---
|
||||||
|
|
||||||
### Changed
|
### Changed
|
||||||
|
|
||||||
#### `Client`
|
#### `Client`
|
||||||
|
|
||||||
Use `ProcessIn` or `ProcessAt` option to schedule a task instead of `EnqueueIn` or `EnqueueAt`.
|
Use `ProcessIn` or `ProcessAt` option to schedule a task instead of `EnqueueIn` or `EnqueueAt`.
|
||||||
|
|
||||||
| Previously | v0.12.0 |
|
| Previously | v0.12.0 |
|
||||||
|-----------------------------|--------------------------------------------|
|
| --------------------------- | ------------------------------------------ |
|
||||||
| `client.EnqueueAt(t, task)` | `client.Enqueue(task, asynq.ProcessAt(t))` |
|
| `client.EnqueueAt(t, task)` | `client.Enqueue(task, asynq.ProcessAt(t))` |
|
||||||
| `client.EnqueueIn(d, task)` | `client.Enqueue(task, asynq.ProcessIn(d))` |
|
| `client.EnqueueIn(d, task)` | `client.Enqueue(task, asynq.ProcessIn(d))` |
|
||||||
|
|
||||||
#### `Inspector`
|
#### `Inspector`
|
||||||
|
|
||||||
All Inspector methods are scoped to a queue, and the methods take `qname (string)` as the first argument.
|
All Inspector methods are scoped to a queue, and the methods take `qname (string)` as the first argument.
|
||||||
`EnqueuedTask` is renamed to `PendingTask` and its corresponding methods.
|
`EnqueuedTask` is renamed to `PendingTask` and its corresponding methods.
|
||||||
`InProgressTask` is renamed to `ActiveTask` and its corresponding methods.
|
`InProgressTask` is renamed to `ActiveTask` and its corresponding methods.
|
||||||
Command "Enqueue" is replaced by the verb "Run" (e.g. `EnqueueAllScheduledTasks` --> `RunAllScheduledTasks`)
|
Command "Enqueue" is replaced by the verb "Run" (e.g. `EnqueueAllScheduledTasks` --> `RunAllScheduledTasks`)
|
||||||
|
|
||||||
#### `CLI`
|
#### `CLI`
|
||||||
|
|
||||||
CLI commands are restructured to use subcommands. Commands are organized into a few management commands:
|
CLI commands are restructured to use subcommands. Commands are organized into a few management commands:
|
||||||
To view details on any command, use `asynq help <command> <subcommand>`.
|
To view details on any command, use `asynq help <command> <subcommand>`.
|
||||||
|
|
||||||
- `asynq stats`
|
- `asynq stats`
|
||||||
- `asynq queue [ls inspect history rm pause unpause]`
|
- `asynq queue [ls inspect history rm pause unpause]`
|
||||||
- `asynq task [ls cancel delete kill run delete-all kill-all run-all]`
|
- `asynq task [ls cancel delete kill run delete-all kill-all run-all]`
|
||||||
@@ -51,19 +127,23 @@ To view details on any command, use `asynq help <command> <subcommand>`.
|
|||||||
### Added
|
### Added
|
||||||
|
|
||||||
#### `RedisConnOpt`
|
#### `RedisConnOpt`
|
||||||
|
|
||||||
- `RedisClusterClientOpt` is added to connect to Redis Cluster.
|
- `RedisClusterClientOpt` is added to connect to Redis Cluster.
|
||||||
- `Username` field is added to all `RedisConnOpt` types in order to authenticate connection when Redis ACLs are used.
|
- `Username` field is added to all `RedisConnOpt` types in order to authenticate connection when Redis ACLs are used.
|
||||||
|
|
||||||
#### `Client`
|
#### `Client`
|
||||||
|
|
||||||
- `ProcessIn(d time.Duration) Option` and `ProcessAt(t time.Time) Option` are added to replace `EnqueueIn` and `EnqueueAt` functionality.
|
- `ProcessIn(d time.Duration) Option` and `ProcessAt(t time.Time) Option` are added to replace `EnqueueIn` and `EnqueueAt` functionality.
|
||||||
|
|
||||||
#### `Inspector`
|
#### `Inspector`
|
||||||
|
|
||||||
- `Queues() ([]string, error)` method is added to get all queue names.
|
- `Queues() ([]string, error)` method is added to get all queue names.
|
||||||
- `ClusterKeySlot(qname string) (int64, error)` method is added to get queue's hash slot in Redis cluster.
|
- `ClusterKeySlot(qname string) (int64, error)` method is added to get queue's hash slot in Redis cluster.
|
||||||
- `ClusterNodes(qname string) ([]ClusterNode, error)` method is added to get a list of Redis cluster nodes for the given queue.
|
- `ClusterNodes(qname string) ([]ClusterNode, error)` method is added to get a list of Redis cluster nodes for the given queue.
|
||||||
- `Close() error` method is added to close connection with redis.
|
- `Close() error` method is added to close connection with redis.
|
||||||
|
|
||||||
### `Handler`
|
### `Handler`
|
||||||
|
|
||||||
- `GetQueueName(ctx context.Context) (string, bool)` helper is added to extract queue name from a context.
|
- `GetQueueName(ctx context.Context) (string, bool)` helper is added to extract queue name from a context.
|
||||||
|
|
||||||
## [0.11.0] - 2020-07-28
|
## [0.11.0] - 2020-07-28
|
||||||
@@ -98,7 +178,6 @@ To view details on any command, use `asynq help <command> <subcommand>`.
|
|||||||
|
|
||||||
- Fixes the JSON number overflow issue (https://github.com/hibiken/asynq/issues/166).
|
- Fixes the JSON number overflow issue (https://github.com/hibiken/asynq/issues/166).
|
||||||
|
|
||||||
|
|
||||||
## [0.9.2] - 2020-06-08
|
## [0.9.2] - 2020-06-08
|
||||||
|
|
||||||
### Added
|
### Added
|
||||||
|
14
README.md
14
README.md
@@ -1,11 +1,10 @@
|
|||||||
# Asynq
|
# Asynq
|
||||||
|
|
||||||
[](https://travis-ci.com/hibiken/asynq)
|

|
||||||
[](https://opensource.org/licenses/MIT)
|
|
||||||
[](https://goreportcard.com/report/github.com/hibiken/asynq)
|
|
||||||
[](https://godoc.org/github.com/hibiken/asynq)
|
[](https://godoc.org/github.com/hibiken/asynq)
|
||||||
|
[](https://goreportcard.com/report/github.com/hibiken/asynq)
|
||||||
|
[](https://opensource.org/licenses/MIT)
|
||||||
[](https://gitter.im/go-asynq/community)
|
[](https://gitter.im/go-asynq/community)
|
||||||
[](https://codecov.io/gh/hibiken/asynq)
|
|
||||||
|
|
||||||
## Overview
|
## Overview
|
||||||
|
|
||||||
@@ -42,6 +41,7 @@ A system can consist of multiple worker servers and brokers, giving way to high
|
|||||||
- Allow [timeout and deadline per task](https://github.com/hibiken/asynq/wiki/Task-Timeout-and-Cancelation)
|
- Allow [timeout and deadline per task](https://github.com/hibiken/asynq/wiki/Task-Timeout-and-Cancelation)
|
||||||
- [Flexible handler interface with support for middlewares](https://github.com/hibiken/asynq/wiki/Handler-Deep-Dive)
|
- [Flexible handler interface with support for middlewares](https://github.com/hibiken/asynq/wiki/Handler-Deep-Dive)
|
||||||
- [Ability to pause queue](/tools/asynq/README.md#pause) to stop processing tasks from the queue
|
- [Ability to pause queue](/tools/asynq/README.md#pause) to stop processing tasks from the queue
|
||||||
|
- [Periodic Tasks](https://github.com/hibiken/asynq/wiki/Periodic-Tasks)
|
||||||
- [Support Redis Cluster](https://github.com/hibiken/asynq/wiki/Redis-Cluster) for automatic sharding and high availability
|
- [Support Redis Cluster](https://github.com/hibiken/asynq/wiki/Redis-Cluster) for automatic sharding and high availability
|
||||||
- [Support Redis Sentinels](https://github.com/hibiken/asynq/wiki/Automatic-Failover) for high availability
|
- [Support Redis Sentinels](https://github.com/hibiken/asynq/wiki/Automatic-Failover) for high availability
|
||||||
- [CLI](#command-line-tool) to inspect and remote-control queues and tasks
|
- [CLI](#command-line-tool) to inspect and remote-control queues and tasks
|
||||||
@@ -109,7 +109,7 @@ func HandleEmailDeliveryTask(ctx context.Context, t *asynq.Task) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ImageProcessor implements asynq.Handler interface.
|
// ImageProcessor implements asynq.Handler interface.
|
||||||
type ImageProcesser struct {
|
type ImageProcessor struct {
|
||||||
// ... fields for struct
|
// ... fields for struct
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -134,6 +134,8 @@ In your application code, import the above package and use [`Client`](https://pk
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/hibiken/asynq"
|
"github.com/hibiken/asynq"
|
||||||
@@ -178,7 +180,7 @@ func main() {
|
|||||||
// Options include MaxRetry, Queue, Timeout, Deadline, Unique etc.
|
// Options include MaxRetry, Queue, Timeout, Deadline, Unique etc.
|
||||||
// ----------------------------------------------------------------------------
|
// ----------------------------------------------------------------------------
|
||||||
|
|
||||||
c.SetDefaultOptions(tasks.ImageProcessing, asynq.MaxRetry(10), asynq.Timeout(3*time.Minute))
|
c.SetDefaultOptions(tasks.TypeImageResize, asynq.MaxRetry(10), asynq.Timeout(3*time.Minute))
|
||||||
|
|
||||||
t = tasks.NewImageResizeTask("some/blobstore/path")
|
t = tasks.NewImageResizeTask("some/blobstore/path")
|
||||||
res, err = c.Enqueue(t)
|
res, err = c.Enqueue(t)
|
||||||
|
109
asynq.go
109
asynq.go
@@ -40,7 +40,11 @@ func NewTask(typename string, payload map[string]interface{}) *Task {
|
|||||||
// - RedisClientOpt
|
// - RedisClientOpt
|
||||||
// - RedisFailoverClientOpt
|
// - RedisFailoverClientOpt
|
||||||
// - RedisClusterClientOpt
|
// - RedisClusterClientOpt
|
||||||
type RedisConnOpt interface{}
|
type RedisConnOpt interface {
|
||||||
|
// MakeRedisClient returns a new redis client instance.
|
||||||
|
// Return value is intentionally opaque to hide the implementation detail of redis client.
|
||||||
|
MakeRedisClient() interface{}
|
||||||
|
}
|
||||||
|
|
||||||
// RedisClientOpt is used to create a redis client that connects
|
// RedisClientOpt is used to create a redis client that connects
|
||||||
// to a redis server directly.
|
// to a redis server directly.
|
||||||
@@ -73,6 +77,18 @@ type RedisClientOpt struct {
|
|||||||
TLSConfig *tls.Config
|
TLSConfig *tls.Config
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (opt RedisClientOpt) MakeRedisClient() interface{} {
|
||||||
|
return redis.NewClient(&redis.Options{
|
||||||
|
Network: opt.Network,
|
||||||
|
Addr: opt.Addr,
|
||||||
|
Username: opt.Username,
|
||||||
|
Password: opt.Password,
|
||||||
|
DB: opt.DB,
|
||||||
|
PoolSize: opt.PoolSize,
|
||||||
|
TLSConfig: opt.TLSConfig,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// RedisFailoverClientOpt is used to creates a redis client that talks
|
// RedisFailoverClientOpt is used to creates a redis client that talks
|
||||||
// to redis sentinels for service discovery and has an automatic failover
|
// to redis sentinels for service discovery and has an automatic failover
|
||||||
// capability.
|
// capability.
|
||||||
@@ -109,6 +125,19 @@ type RedisFailoverClientOpt struct {
|
|||||||
TLSConfig *tls.Config
|
TLSConfig *tls.Config
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (opt RedisFailoverClientOpt) MakeRedisClient() interface{} {
|
||||||
|
return redis.NewFailoverClient(&redis.FailoverOptions{
|
||||||
|
MasterName: opt.MasterName,
|
||||||
|
SentinelAddrs: opt.SentinelAddrs,
|
||||||
|
SentinelPassword: opt.SentinelPassword,
|
||||||
|
Username: opt.Username,
|
||||||
|
Password: opt.Password,
|
||||||
|
DB: opt.DB,
|
||||||
|
PoolSize: opt.PoolSize,
|
||||||
|
TLSConfig: opt.TLSConfig,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// RedisFailoverClientOpt is used to creates a redis client that connects to
|
// RedisFailoverClientOpt is used to creates a redis client that connects to
|
||||||
// redis cluster.
|
// redis cluster.
|
||||||
type RedisClusterClientOpt struct {
|
type RedisClusterClientOpt struct {
|
||||||
@@ -133,6 +162,16 @@ type RedisClusterClientOpt struct {
|
|||||||
TLSConfig *tls.Config
|
TLSConfig *tls.Config
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (opt RedisClusterClientOpt) MakeRedisClient() interface{} {
|
||||||
|
return redis.NewClusterClient(&redis.ClusterOptions{
|
||||||
|
Addrs: opt.Addrs,
|
||||||
|
MaxRedirects: opt.MaxRedirects,
|
||||||
|
Username: opt.Username,
|
||||||
|
Password: opt.Password,
|
||||||
|
TLSConfig: opt.TLSConfig,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// ParseRedisURI parses redis uri string and returns RedisConnOpt if uri is valid.
|
// ParseRedisURI parses redis uri string and returns RedisConnOpt if uri is valid.
|
||||||
// It returns a non-nil error if uri cannot be parsed.
|
// It returns a non-nil error if uri cannot be parsed.
|
||||||
//
|
//
|
||||||
@@ -205,71 +244,3 @@ func parseRedisSentinelURI(u *url.URL) (RedisConnOpt, error) {
|
|||||||
}
|
}
|
||||||
return RedisFailoverClientOpt{MasterName: master, SentinelAddrs: addrs, Password: password}, nil
|
return RedisFailoverClientOpt{MasterName: master, SentinelAddrs: addrs, Password: password}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// createRedisClient returns a redis client given a redis connection configuration.
|
|
||||||
//
|
|
||||||
// Passing an unexpected type as a RedisConnOpt argument will cause panic.
|
|
||||||
func createRedisClient(r RedisConnOpt) redis.UniversalClient {
|
|
||||||
switch r := r.(type) {
|
|
||||||
case *RedisClientOpt:
|
|
||||||
return redis.NewClient(&redis.Options{
|
|
||||||
Network: r.Network,
|
|
||||||
Addr: r.Addr,
|
|
||||||
Username: r.Username,
|
|
||||||
Password: r.Password,
|
|
||||||
DB: r.DB,
|
|
||||||
PoolSize: r.PoolSize,
|
|
||||||
TLSConfig: r.TLSConfig,
|
|
||||||
})
|
|
||||||
case RedisClientOpt:
|
|
||||||
return redis.NewClient(&redis.Options{
|
|
||||||
Network: r.Network,
|
|
||||||
Addr: r.Addr,
|
|
||||||
Username: r.Username,
|
|
||||||
Password: r.Password,
|
|
||||||
DB: r.DB,
|
|
||||||
PoolSize: r.PoolSize,
|
|
||||||
TLSConfig: r.TLSConfig,
|
|
||||||
})
|
|
||||||
case *RedisFailoverClientOpt:
|
|
||||||
return redis.NewFailoverClient(&redis.FailoverOptions{
|
|
||||||
MasterName: r.MasterName,
|
|
||||||
SentinelAddrs: r.SentinelAddrs,
|
|
||||||
SentinelPassword: r.SentinelPassword,
|
|
||||||
Username: r.Username,
|
|
||||||
Password: r.Password,
|
|
||||||
DB: r.DB,
|
|
||||||
PoolSize: r.PoolSize,
|
|
||||||
TLSConfig: r.TLSConfig,
|
|
||||||
})
|
|
||||||
case RedisFailoverClientOpt:
|
|
||||||
return redis.NewFailoverClient(&redis.FailoverOptions{
|
|
||||||
MasterName: r.MasterName,
|
|
||||||
SentinelAddrs: r.SentinelAddrs,
|
|
||||||
SentinelPassword: r.SentinelPassword,
|
|
||||||
Username: r.Username,
|
|
||||||
Password: r.Password,
|
|
||||||
DB: r.DB,
|
|
||||||
PoolSize: r.PoolSize,
|
|
||||||
TLSConfig: r.TLSConfig,
|
|
||||||
})
|
|
||||||
case RedisClusterClientOpt:
|
|
||||||
return redis.NewClusterClient(&redis.ClusterOptions{
|
|
||||||
Addrs: r.Addrs,
|
|
||||||
MaxRedirects: r.MaxRedirects,
|
|
||||||
Username: r.Username,
|
|
||||||
Password: r.Password,
|
|
||||||
TLSConfig: r.TLSConfig,
|
|
||||||
})
|
|
||||||
case *RedisClusterClientOpt:
|
|
||||||
return redis.NewClusterClient(&redis.ClusterOptions{
|
|
||||||
Addrs: r.Addrs,
|
|
||||||
MaxRedirects: r.MaxRedirects,
|
|
||||||
Username: r.Username,
|
|
||||||
Password: r.Password,
|
|
||||||
TLSConfig: r.TLSConfig,
|
|
||||||
})
|
|
||||||
default:
|
|
||||||
panic(fmt.Sprintf("asynq: unexpected type %T for RedisConnOpt", r))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
82
client.go
82
client.go
@@ -11,6 +11,7 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/go-redis/redis/v7"
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
"github.com/hibiken/asynq/internal/base"
|
"github.com/hibiken/asynq/internal/base"
|
||||||
"github.com/hibiken/asynq/internal/rdb"
|
"github.com/hibiken/asynq/internal/rdb"
|
||||||
@@ -28,17 +29,42 @@ type Client struct {
|
|||||||
rdb *rdb.RDB
|
rdb *rdb.RDB
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewClient and returns a new Client given a redis connection option.
|
// NewClient returns a new Client instance given a redis connection option.
|
||||||
func NewClient(r RedisConnOpt) *Client {
|
func NewClient(r RedisConnOpt) *Client {
|
||||||
rdb := rdb.NewRDB(createRedisClient(r))
|
c, ok := r.MakeRedisClient().(redis.UniversalClient)
|
||||||
|
if !ok {
|
||||||
|
panic(fmt.Sprintf("asynq: unsupported RedisConnOpt type %T", r))
|
||||||
|
}
|
||||||
|
rdb := rdb.NewRDB(c)
|
||||||
return &Client{
|
return &Client{
|
||||||
opts: make(map[string][]Option),
|
opts: make(map[string][]Option),
|
||||||
rdb: rdb,
|
rdb: rdb,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type OptionType int
|
||||||
|
|
||||||
|
const (
|
||||||
|
MaxRetryOpt OptionType = iota
|
||||||
|
QueueOpt
|
||||||
|
TimeoutOpt
|
||||||
|
DeadlineOpt
|
||||||
|
UniqueOpt
|
||||||
|
ProcessAtOpt
|
||||||
|
ProcessInOpt
|
||||||
|
)
|
||||||
|
|
||||||
// Option specifies the task processing behavior.
|
// Option specifies the task processing behavior.
|
||||||
type Option interface{}
|
type Option interface {
|
||||||
|
// String returns a string representation of the option.
|
||||||
|
String() string
|
||||||
|
|
||||||
|
// Type describes the type of the option.
|
||||||
|
Type() OptionType
|
||||||
|
|
||||||
|
// Value returns a value used to create this option.
|
||||||
|
Value() interface{}
|
||||||
|
}
|
||||||
|
|
||||||
// Internal option representations.
|
// Internal option representations.
|
||||||
type (
|
type (
|
||||||
@@ -62,13 +88,21 @@ func MaxRetry(n int) Option {
|
|||||||
return retryOption(n)
|
return retryOption(n)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (n retryOption) String() string { return fmt.Sprintf("MaxRetry(%d)", int(n)) }
|
||||||
|
func (n retryOption) Type() OptionType { return MaxRetryOpt }
|
||||||
|
func (n retryOption) Value() interface{} { return int(n) }
|
||||||
|
|
||||||
// Queue returns an option to specify the queue to enqueue the task into.
|
// Queue returns an option to specify the queue to enqueue the task into.
|
||||||
//
|
//
|
||||||
// Queue name is case-insensitive and the lowercased version is used.
|
// Queue name is case-insensitive and the lowercased version is used.
|
||||||
func Queue(name string) Option {
|
func Queue(qname string) Option {
|
||||||
return queueOption(strings.ToLower(name))
|
return queueOption(strings.ToLower(qname))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (qname queueOption) String() string { return fmt.Sprintf("Queue(%q)", string(qname)) }
|
||||||
|
func (qname queueOption) Type() OptionType { return QueueOpt }
|
||||||
|
func (qname queueOption) Value() interface{} { return string(qname) }
|
||||||
|
|
||||||
// Timeout returns an option to specify how long a task may run.
|
// Timeout returns an option to specify how long a task may run.
|
||||||
// If the timeout elapses before the Handler returns, then the task
|
// If the timeout elapses before the Handler returns, then the task
|
||||||
// will be retried.
|
// will be retried.
|
||||||
@@ -81,6 +115,10 @@ func Timeout(d time.Duration) Option {
|
|||||||
return timeoutOption(d)
|
return timeoutOption(d)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d timeoutOption) String() string { return fmt.Sprintf("Timeout(%v)", time.Duration(d)) }
|
||||||
|
func (d timeoutOption) Type() OptionType { return TimeoutOpt }
|
||||||
|
func (d timeoutOption) Value() interface{} { return time.Duration(d) }
|
||||||
|
|
||||||
// Deadline returns an option to specify the deadline for the given task.
|
// Deadline returns an option to specify the deadline for the given task.
|
||||||
// If it reaches the deadline before the Handler returns, then the task
|
// If it reaches the deadline before the Handler returns, then the task
|
||||||
// will be retried.
|
// will be retried.
|
||||||
@@ -91,6 +129,12 @@ func Deadline(t time.Time) Option {
|
|||||||
return deadlineOption(t)
|
return deadlineOption(t)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (t deadlineOption) String() string {
|
||||||
|
return fmt.Sprintf("Deadline(%v)", time.Time(t).Format(time.UnixDate))
|
||||||
|
}
|
||||||
|
func (t deadlineOption) Type() OptionType { return DeadlineOpt }
|
||||||
|
func (t deadlineOption) Value() interface{} { return time.Time(t) }
|
||||||
|
|
||||||
// Unique returns an option to enqueue a task only if the given task is unique.
|
// Unique returns an option to enqueue a task only if the given task is unique.
|
||||||
// Task enqueued with this option is guaranteed to be unique within the given ttl.
|
// Task enqueued with this option is guaranteed to be unique within the given ttl.
|
||||||
// Once the task gets processed successfully or once the TTL has expired, another task with the same uniqueness may be enqueued.
|
// Once the task gets processed successfully or once the TTL has expired, another task with the same uniqueness may be enqueued.
|
||||||
@@ -104,6 +148,10 @@ func Unique(ttl time.Duration) Option {
|
|||||||
return uniqueOption(ttl)
|
return uniqueOption(ttl)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (ttl uniqueOption) String() string { return fmt.Sprintf("Unique(%v)", time.Duration(ttl)) }
|
||||||
|
func (ttl uniqueOption) Type() OptionType { return UniqueOpt }
|
||||||
|
func (ttl uniqueOption) Value() interface{} { return time.Duration(ttl) }
|
||||||
|
|
||||||
// ProcessAt returns an option to specify when to process the given task.
|
// ProcessAt returns an option to specify when to process the given task.
|
||||||
//
|
//
|
||||||
// If there's a conflicting ProcessIn option, the last option passed to Enqueue overrides the others.
|
// If there's a conflicting ProcessIn option, the last option passed to Enqueue overrides the others.
|
||||||
@@ -111,6 +159,12 @@ func ProcessAt(t time.Time) Option {
|
|||||||
return processAtOption(t)
|
return processAtOption(t)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (t processAtOption) String() string {
|
||||||
|
return fmt.Sprintf("ProcessAt(%v)", time.Time(t).Format(time.UnixDate))
|
||||||
|
}
|
||||||
|
func (t processAtOption) Type() OptionType { return ProcessAtOpt }
|
||||||
|
func (t processAtOption) Value() interface{} { return time.Time(t) }
|
||||||
|
|
||||||
// ProcessIn returns an option to specify when to process the given task relative to the current time.
|
// ProcessIn returns an option to specify when to process the given task relative to the current time.
|
||||||
//
|
//
|
||||||
// If there's a conflicting ProcessAt option, the last option passed to Enqueue overrides the others.
|
// If there's a conflicting ProcessAt option, the last option passed to Enqueue overrides the others.
|
||||||
@@ -118,6 +172,11 @@ func ProcessIn(d time.Duration) Option {
|
|||||||
return processInOption(d)
|
return processInOption(d)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d processInOption) String() string { return fmt.Sprintf("ProcessIn(%v)", time.Duration(d)) }
|
||||||
|
func (d processInOption) Type() OptionType { return ProcessInOpt }
|
||||||
|
func (d processInOption) Value() interface{} { return time.Duration(d) }
|
||||||
|
|
||||||
|
|
||||||
// ErrDuplicateTask indicates that the given task could not be enqueued since it's a duplicate of another task.
|
// ErrDuplicateTask indicates that the given task could not be enqueued since it's a duplicate of another task.
|
||||||
//
|
//
|
||||||
// ErrDuplicateTask error only applies to tasks enqueued with a Unique option.
|
// ErrDuplicateTask error only applies to tasks enqueued with a Unique option.
|
||||||
@@ -150,7 +209,7 @@ func composeOptions(opts ...Option) (option, error) {
|
|||||||
res.retry = int(opt)
|
res.retry = int(opt)
|
||||||
case queueOption:
|
case queueOption:
|
||||||
trimmed := strings.TrimSpace(string(opt))
|
trimmed := strings.TrimSpace(string(opt))
|
||||||
if err := validateQueueName(trimmed); err != nil {
|
if err := base.ValidateQueueName(trimmed); err != nil {
|
||||||
return option{}, err
|
return option{}, err
|
||||||
}
|
}
|
||||||
res.queue = trimmed
|
res.queue = trimmed
|
||||||
@@ -171,13 +230,6 @@ func composeOptions(opts ...Option) (option, error) {
|
|||||||
return res, nil
|
return res, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func validateQueueName(qname string) error {
|
|
||||||
if len(qname) == 0 {
|
|
||||||
return fmt.Errorf("queue name must contain one or more characters")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// Default max retry count used if nothing is specified.
|
// Default max retry count used if nothing is specified.
|
||||||
defaultMaxRetry = 25
|
defaultMaxRetry = 25
|
||||||
@@ -208,6 +260,9 @@ type Result struct {
|
|||||||
// ID is a unique identifier for the task.
|
// ID is a unique identifier for the task.
|
||||||
ID string
|
ID string
|
||||||
|
|
||||||
|
// EnqueuedAt is the time the task was enqueued in UTC.
|
||||||
|
EnqueuedAt time.Time
|
||||||
|
|
||||||
// ProcessAt indicates when the task should be processed.
|
// ProcessAt indicates when the task should be processed.
|
||||||
ProcessAt time.Time
|
ProcessAt time.Time
|
||||||
|
|
||||||
@@ -299,6 +354,7 @@ func (c *Client) Enqueue(task *Task, opts ...Option) (*Result, error) {
|
|||||||
}
|
}
|
||||||
return &Result{
|
return &Result{
|
||||||
ID: msg.ID.String(),
|
ID: msg.ID.String(),
|
||||||
|
EnqueuedAt: time.Now().UTC(),
|
||||||
ProcessAt: opt.processAt,
|
ProcessAt: opt.processAt,
|
||||||
Queue: msg.Queue,
|
Queue: msg.Queue,
|
||||||
Retry: msg.Retry,
|
Retry: msg.Retry,
|
||||||
|
@@ -42,6 +42,7 @@ func TestClientEnqueueWithProcessAtOption(t *testing.T) {
|
|||||||
processAt: now,
|
processAt: now,
|
||||||
opts: []Option{},
|
opts: []Option{},
|
||||||
wantRes: &Result{
|
wantRes: &Result{
|
||||||
|
EnqueuedAt: now.UTC(),
|
||||||
ProcessAt: now,
|
ProcessAt: now,
|
||||||
Queue: "default",
|
Queue: "default",
|
||||||
Retry: defaultMaxRetry,
|
Retry: defaultMaxRetry,
|
||||||
@@ -70,6 +71,7 @@ func TestClientEnqueueWithProcessAtOption(t *testing.T) {
|
|||||||
processAt: oneHourLater,
|
processAt: oneHourLater,
|
||||||
opts: []Option{},
|
opts: []Option{},
|
||||||
wantRes: &Result{
|
wantRes: &Result{
|
||||||
|
EnqueuedAt: now.UTC(),
|
||||||
ProcessAt: oneHourLater,
|
ProcessAt: oneHourLater,
|
||||||
Queue: "default",
|
Queue: "default",
|
||||||
Retry: defaultMaxRetry,
|
Retry: defaultMaxRetry,
|
||||||
@@ -111,8 +113,8 @@ func TestClientEnqueueWithProcessAtOption(t *testing.T) {
|
|||||||
cmpopts.EquateApproxTime(500 * time.Millisecond),
|
cmpopts.EquateApproxTime(500 * time.Millisecond),
|
||||||
}
|
}
|
||||||
if diff := cmp.Diff(tc.wantRes, gotRes, cmpOptions...); diff != "" {
|
if diff := cmp.Diff(tc.wantRes, gotRes, cmpOptions...); diff != "" {
|
||||||
t.Errorf("%s;\nEnqueueAt(processAt, task) returned %v, want %v; (-want,+got)\n%s",
|
t.Errorf("%s;\nEnqueue(task, ProcessAt(%v)) returned %v, want %v; (-want,+got)\n%s",
|
||||||
tc.desc, gotRes, tc.wantRes, diff)
|
tc.desc, tc.processAt, gotRes, tc.wantRes, diff)
|
||||||
}
|
}
|
||||||
|
|
||||||
for qname, want := range tc.wantPending {
|
for qname, want := range tc.wantPending {
|
||||||
@@ -366,7 +368,7 @@ func TestClientEnqueue(t *testing.T) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
cmpOptions := []cmp.Option{
|
cmpOptions := []cmp.Option{
|
||||||
cmpopts.IgnoreFields(Result{}, "ID"),
|
cmpopts.IgnoreFields(Result{}, "ID", "EnqueuedAt"),
|
||||||
cmpopts.EquateApproxTime(500 * time.Millisecond),
|
cmpopts.EquateApproxTime(500 * time.Millisecond),
|
||||||
}
|
}
|
||||||
if diff := cmp.Diff(tc.wantRes, gotRes, cmpOptions...); diff != "" {
|
if diff := cmp.Diff(tc.wantRes, gotRes, cmpOptions...); diff != "" {
|
||||||
@@ -471,12 +473,12 @@ func TestClientEnqueueWithProcessInOption(t *testing.T) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
cmpOptions := []cmp.Option{
|
cmpOptions := []cmp.Option{
|
||||||
cmpopts.IgnoreFields(Result{}, "ID"),
|
cmpopts.IgnoreFields(Result{}, "ID", "EnqueuedAt"),
|
||||||
cmpopts.EquateApproxTime(500 * time.Millisecond),
|
cmpopts.EquateApproxTime(500 * time.Millisecond),
|
||||||
}
|
}
|
||||||
if diff := cmp.Diff(tc.wantRes, gotRes, cmpOptions...); diff != "" {
|
if diff := cmp.Diff(tc.wantRes, gotRes, cmpOptions...); diff != "" {
|
||||||
t.Errorf("%s;\nEnqueueIn(delay, task) returned %v, want %v; (-want,+got)\n%s",
|
t.Errorf("%s;\nEnqueue(task, ProcessIn(%v)) returned %v, want %v; (-want,+got)\n%s",
|
||||||
tc.desc, gotRes, tc.wantRes, diff)
|
tc.desc, tc.delay, gotRes, tc.wantRes, diff)
|
||||||
}
|
}
|
||||||
|
|
||||||
for qname, want := range tc.wantPending {
|
for qname, want := range tc.wantPending {
|
||||||
@@ -617,7 +619,7 @@ func TestClientDefaultOptions(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
cmpOptions := []cmp.Option{
|
cmpOptions := []cmp.Option{
|
||||||
cmpopts.IgnoreFields(Result{}, "ID"),
|
cmpopts.IgnoreFields(Result{}, "ID", "EnqueuedAt"),
|
||||||
cmpopts.EquateApproxTime(500 * time.Millisecond),
|
cmpopts.EquateApproxTime(500 * time.Millisecond),
|
||||||
}
|
}
|
||||||
if diff := cmp.Diff(tc.wantRes, gotRes, cmpOptions...); diff != "" {
|
if diff := cmp.Diff(tc.wantRes, gotRes, cmpOptions...); diff != "" {
|
||||||
@@ -772,3 +774,4 @@ func TestClientEnqueueUniqueWithProcessAtOption(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -9,6 +9,7 @@ import (
|
|||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/hibiken/asynq"
|
"github.com/hibiken/asynq"
|
||||||
"golang.org/x/sys/unix"
|
"golang.org/x/sys/unix"
|
||||||
@@ -78,6 +79,25 @@ func ExampleServer_Quiet() {
|
|||||||
srv.Stop()
|
srv.Stop()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func ExampleScheduler() {
|
||||||
|
scheduler := asynq.NewScheduler(
|
||||||
|
asynq.RedisClientOpt{Addr: ":6379"},
|
||||||
|
&asynq.SchedulerOpts{Location: time.Local},
|
||||||
|
)
|
||||||
|
|
||||||
|
if _, err := scheduler.Register("* * * * *", asynq.NewTask("task1", nil)); err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
if _, err := scheduler.Register("@every 30s", asynq.NewTask("task2", nil)); err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run blocks and waits for os signal to terminate the program.
|
||||||
|
if err := scheduler.Run(); err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func ExampleParseRedisURI() {
|
func ExampleParseRedisURI() {
|
||||||
rconn, err := asynq.ParseRedisURI("redis://localhost:6379/10")
|
rconn, err := asynq.ParseRedisURI("redis://localhost:6379/10")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
75
forwarder.go
Normal file
75
forwarder.go
Normal file
@@ -0,0 +1,75 @@
|
|||||||
|
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license
|
||||||
|
// that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package asynq
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/hibiken/asynq/internal/base"
|
||||||
|
"github.com/hibiken/asynq/internal/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A forwarder is responsible for moving scheduled and retry tasks to pending state
|
||||||
|
// so that the tasks get processed by the workers.
|
||||||
|
type forwarder struct {
|
||||||
|
logger *log.Logger
|
||||||
|
broker base.Broker
|
||||||
|
|
||||||
|
// channel to communicate back to the long running "forwarder" goroutine.
|
||||||
|
done chan struct{}
|
||||||
|
|
||||||
|
// list of queue names to check and enqueue.
|
||||||
|
queues []string
|
||||||
|
|
||||||
|
// poll interval on average
|
||||||
|
avgInterval time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
type forwarderParams struct {
|
||||||
|
logger *log.Logger
|
||||||
|
broker base.Broker
|
||||||
|
queues []string
|
||||||
|
interval time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
func newForwarder(params forwarderParams) *forwarder {
|
||||||
|
return &forwarder{
|
||||||
|
logger: params.logger,
|
||||||
|
broker: params.broker,
|
||||||
|
done: make(chan struct{}),
|
||||||
|
queues: params.queues,
|
||||||
|
avgInterval: params.interval,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *forwarder) terminate() {
|
||||||
|
f.logger.Debug("Forwarder shutting down...")
|
||||||
|
// Signal the forwarder goroutine to stop polling.
|
||||||
|
f.done <- struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// start starts the "forwarder" goroutine.
|
||||||
|
func (f *forwarder) start(wg *sync.WaitGroup) {
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-f.done:
|
||||||
|
f.logger.Debug("Forwarder done")
|
||||||
|
return
|
||||||
|
case <-time.After(f.avgInterval):
|
||||||
|
f.exec()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *forwarder) exec() {
|
||||||
|
if err := f.broker.CheckAndEnqueue(f.queues...); err != nil {
|
||||||
|
f.logger.Errorf("Could not enqueue scheduled tasks: %v", err)
|
||||||
|
}
|
||||||
|
}
|
137
forwarder_test.go
Normal file
137
forwarder_test.go
Normal file
@@ -0,0 +1,137 @@
|
|||||||
|
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license
|
||||||
|
// that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package asynq
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/google/go-cmp/cmp"
|
||||||
|
h "github.com/hibiken/asynq/internal/asynqtest"
|
||||||
|
"github.com/hibiken/asynq/internal/base"
|
||||||
|
"github.com/hibiken/asynq/internal/rdb"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestForwarder(t *testing.T) {
|
||||||
|
r := setup(t)
|
||||||
|
defer r.Close()
|
||||||
|
rdbClient := rdb.NewRDB(r)
|
||||||
|
const pollInterval = time.Second
|
||||||
|
s := newForwarder(forwarderParams{
|
||||||
|
logger: testLogger,
|
||||||
|
broker: rdbClient,
|
||||||
|
queues: []string{"default", "critical"},
|
||||||
|
interval: pollInterval,
|
||||||
|
})
|
||||||
|
t1 := h.NewTaskMessageWithQueue("gen_thumbnail", nil, "default")
|
||||||
|
t2 := h.NewTaskMessageWithQueue("send_email", nil, "critical")
|
||||||
|
t3 := h.NewTaskMessageWithQueue("reindex", nil, "default")
|
||||||
|
t4 := h.NewTaskMessageWithQueue("sync", nil, "critical")
|
||||||
|
now := time.Now()
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
initScheduled map[string][]base.Z // scheduled queue initial state
|
||||||
|
initRetry map[string][]base.Z // retry queue initial state
|
||||||
|
initPending map[string][]*base.TaskMessage // default queue initial state
|
||||||
|
wait time.Duration // wait duration before checking for final state
|
||||||
|
wantScheduled map[string][]*base.TaskMessage // schedule queue final state
|
||||||
|
wantRetry map[string][]*base.TaskMessage // retry queue final state
|
||||||
|
wantPending map[string][]*base.TaskMessage // default queue final state
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
initScheduled: map[string][]base.Z{
|
||||||
|
"default": {{Message: t1, Score: now.Add(time.Hour).Unix()}},
|
||||||
|
"critical": {{Message: t2, Score: now.Add(-2 * time.Second).Unix()}},
|
||||||
|
},
|
||||||
|
initRetry: map[string][]base.Z{
|
||||||
|
"default": {{Message: t3, Score: time.Now().Add(-500 * time.Millisecond).Unix()}},
|
||||||
|
"critical": {},
|
||||||
|
},
|
||||||
|
initPending: map[string][]*base.TaskMessage{
|
||||||
|
"default": {},
|
||||||
|
"critical": {t4},
|
||||||
|
},
|
||||||
|
wait: pollInterval * 2,
|
||||||
|
wantScheduled: map[string][]*base.TaskMessage{
|
||||||
|
"default": {t1},
|
||||||
|
"critical": {},
|
||||||
|
},
|
||||||
|
wantRetry: map[string][]*base.TaskMessage{
|
||||||
|
"default": {},
|
||||||
|
"critical": {},
|
||||||
|
},
|
||||||
|
wantPending: map[string][]*base.TaskMessage{
|
||||||
|
"default": {t3},
|
||||||
|
"critical": {t2, t4},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
initScheduled: map[string][]base.Z{
|
||||||
|
"default": {
|
||||||
|
{Message: t1, Score: now.Unix()},
|
||||||
|
{Message: t3, Score: now.Add(-500 * time.Millisecond).Unix()},
|
||||||
|
},
|
||||||
|
"critical": {
|
||||||
|
{Message: t2, Score: now.Add(-2 * time.Second).Unix()},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
initRetry: map[string][]base.Z{
|
||||||
|
"default": {},
|
||||||
|
"critical": {},
|
||||||
|
},
|
||||||
|
initPending: map[string][]*base.TaskMessage{
|
||||||
|
"default": {},
|
||||||
|
"critical": {t4},
|
||||||
|
},
|
||||||
|
wait: pollInterval * 2,
|
||||||
|
wantScheduled: map[string][]*base.TaskMessage{
|
||||||
|
"default": {},
|
||||||
|
"critical": {},
|
||||||
|
},
|
||||||
|
wantRetry: map[string][]*base.TaskMessage{
|
||||||
|
"default": {},
|
||||||
|
"critical": {},
|
||||||
|
},
|
||||||
|
wantPending: map[string][]*base.TaskMessage{
|
||||||
|
"default": {t1, t3},
|
||||||
|
"critical": {t2, t4},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
h.FlushDB(t, r) // clean up db before each test case.
|
||||||
|
h.SeedAllScheduledQueues(t, r, tc.initScheduled) // initialize scheduled queue
|
||||||
|
h.SeedAllRetryQueues(t, r, tc.initRetry) // initialize retry queue
|
||||||
|
h.SeedAllPendingQueues(t, r, tc.initPending) // initialize default queue
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
s.start(&wg)
|
||||||
|
time.Sleep(tc.wait)
|
||||||
|
s.terminate()
|
||||||
|
|
||||||
|
for qname, want := range tc.wantScheduled {
|
||||||
|
gotScheduled := h.GetScheduledMessages(t, r, qname)
|
||||||
|
if diff := cmp.Diff(want, gotScheduled, h.SortMsgOpt); diff != "" {
|
||||||
|
t.Errorf("mismatch found in %q after running forwarder: (-want, +got)\n%s", base.ScheduledKey(qname), diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for qname, want := range tc.wantRetry {
|
||||||
|
gotRetry := h.GetRetryMessages(t, r, qname)
|
||||||
|
if diff := cmp.Diff(want, gotRetry, h.SortMsgOpt); diff != "" {
|
||||||
|
t.Errorf("mismatch found in %q after running forwarder: (-want, +got)\n%s", base.RetryKey(qname), diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for qname, want := range tc.wantPending {
|
||||||
|
gotPending := h.GetPendingMessages(t, r, qname)
|
||||||
|
if diff := cmp.Diff(want, gotPending, h.SortMsgOpt); diff != "" {
|
||||||
|
t.Errorf("mismatch found in %q after running forwarder: (-want, +got)\n%s", base.QueueKey(qname), diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
1
go.mod
1
go.mod
@@ -6,6 +6,7 @@ require (
|
|||||||
github.com/go-redis/redis/v7 v7.4.0
|
github.com/go-redis/redis/v7 v7.4.0
|
||||||
github.com/google/go-cmp v0.4.0
|
github.com/google/go-cmp v0.4.0
|
||||||
github.com/google/uuid v1.1.1
|
github.com/google/uuid v1.1.1
|
||||||
|
github.com/robfig/cron/v3 v3.0.1
|
||||||
github.com/spf13/cast v1.3.1
|
github.com/spf13/cast v1.3.1
|
||||||
go.uber.org/goleak v0.10.0
|
go.uber.org/goleak v0.10.0
|
||||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e
|
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e
|
||||||
|
2
go.sum
2
go.sum
@@ -27,6 +27,8 @@ github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME=
|
|||||||
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
|
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
|
||||||
|
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
|
||||||
github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng=
|
github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng=
|
||||||
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||||
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
|
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
|
||||||
|
35
heartbeat.go
35
heartbeat.go
@@ -38,13 +38,13 @@ type heartbeater struct {
|
|||||||
// heartbeater goroutine. In other words, confine these variables
|
// heartbeater goroutine. In other words, confine these variables
|
||||||
// to this goroutine only.
|
// to this goroutine only.
|
||||||
started time.Time
|
started time.Time
|
||||||
workers map[string]workerStat
|
workers map[string]*workerInfo
|
||||||
|
|
||||||
// status is shared with other goroutine but is concurrency safe.
|
// status is shared with other goroutine but is concurrency safe.
|
||||||
status *base.ServerStatus
|
status *base.ServerStatus
|
||||||
|
|
||||||
// channels to receive updates on active workers.
|
// channels to receive updates on active workers.
|
||||||
starting <-chan *base.TaskMessage
|
starting <-chan *workerInfo
|
||||||
finished <-chan *base.TaskMessage
|
finished <-chan *base.TaskMessage
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -56,7 +56,7 @@ type heartbeaterParams struct {
|
|||||||
queues map[string]int
|
queues map[string]int
|
||||||
strictPriority bool
|
strictPriority bool
|
||||||
status *base.ServerStatus
|
status *base.ServerStatus
|
||||||
starting <-chan *base.TaskMessage
|
starting <-chan *workerInfo
|
||||||
finished <-chan *base.TaskMessage
|
finished <-chan *base.TaskMessage
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -80,7 +80,7 @@ func newHeartbeater(params heartbeaterParams) *heartbeater {
|
|||||||
strictPriority: params.strictPriority,
|
strictPriority: params.strictPriority,
|
||||||
|
|
||||||
status: params.status,
|
status: params.status,
|
||||||
workers: make(map[string]workerStat),
|
workers: make(map[string]*workerInfo),
|
||||||
starting: params.starting,
|
starting: params.starting,
|
||||||
finished: params.finished,
|
finished: params.finished,
|
||||||
}
|
}
|
||||||
@@ -92,11 +92,14 @@ func (h *heartbeater) terminate() {
|
|||||||
h.done <- struct{}{}
|
h.done <- struct{}{}
|
||||||
}
|
}
|
||||||
|
|
||||||
// A workerStat records the message a worker is working on
|
// A workerInfo holds an active worker information.
|
||||||
// and the time the worker has started processing the message.
|
type workerInfo struct {
|
||||||
type workerStat struct {
|
// the task message the worker is processing.
|
||||||
started time.Time
|
|
||||||
msg *base.TaskMessage
|
msg *base.TaskMessage
|
||||||
|
// the time the worker has started processing the message.
|
||||||
|
started time.Time
|
||||||
|
// deadline the worker has to finish processing the task by.
|
||||||
|
deadline time.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *heartbeater) start(wg *sync.WaitGroup) {
|
func (h *heartbeater) start(wg *sync.WaitGroup) {
|
||||||
@@ -121,8 +124,8 @@ func (h *heartbeater) start(wg *sync.WaitGroup) {
|
|||||||
h.beat()
|
h.beat()
|
||||||
timer.Reset(h.interval)
|
timer.Reset(h.interval)
|
||||||
|
|
||||||
case msg := <-h.starting:
|
case w := <-h.starting:
|
||||||
h.workers[msg.ID.String()] = workerStat{time.Now(), msg}
|
h.workers[w.msg.ID.String()] = w
|
||||||
|
|
||||||
case msg := <-h.finished:
|
case msg := <-h.finished:
|
||||||
delete(h.workers, msg.ID.String())
|
delete(h.workers, msg.ID.String())
|
||||||
@@ -145,15 +148,17 @@ func (h *heartbeater) beat() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var ws []*base.WorkerInfo
|
var ws []*base.WorkerInfo
|
||||||
for id, stat := range h.workers {
|
for id, w := range h.workers {
|
||||||
ws = append(ws, &base.WorkerInfo{
|
ws = append(ws, &base.WorkerInfo{
|
||||||
Host: h.host,
|
Host: h.host,
|
||||||
PID: h.pid,
|
PID: h.pid,
|
||||||
|
ServerID: h.serverID,
|
||||||
ID: id,
|
ID: id,
|
||||||
Type: stat.msg.Type,
|
Type: w.msg.Type,
|
||||||
Queue: stat.msg.Queue,
|
Queue: w.msg.Queue,
|
||||||
Payload: stat.msg.Payload,
|
Payload: w.msg.Payload,
|
||||||
Started: stat.started,
|
Started: w.started,
|
||||||
|
Deadline: w.deadline,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -47,7 +47,7 @@ func TestHeartbeater(t *testing.T) {
|
|||||||
queues: tc.queues,
|
queues: tc.queues,
|
||||||
strictPriority: false,
|
strictPriority: false,
|
||||||
status: status,
|
status: status,
|
||||||
starting: make(chan *base.TaskMessage),
|
starting: make(chan *workerInfo),
|
||||||
finished: make(chan *base.TaskMessage),
|
finished: make(chan *base.TaskMessage),
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -139,7 +139,7 @@ func TestHeartbeaterWithRedisDown(t *testing.T) {
|
|||||||
queues: map[string]int{"default": 1},
|
queues: map[string]int{"default": 1},
|
||||||
strictPriority: false,
|
strictPriority: false,
|
||||||
status: base.NewServerStatus(base.StatusRunning),
|
status: base.NewServerStatus(base.StatusRunning),
|
||||||
starting: make(chan *base.TaskMessage),
|
starting: make(chan *workerInfo),
|
||||||
finished: make(chan *base.TaskMessage),
|
finished: make(chan *base.TaskMessage),
|
||||||
})
|
})
|
||||||
|
|
||||||
|
606
inspector.go
606
inspector.go
@@ -1,606 +0,0 @@
|
|||||||
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
|
||||||
// Use of this source code is governed by a MIT license
|
|
||||||
// that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package asynq
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/google/uuid"
|
|
||||||
"github.com/hibiken/asynq/internal/rdb"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Inspector is a client interface to inspect and mutate the state of
|
|
||||||
// queues and tasks.
|
|
||||||
type Inspector struct {
|
|
||||||
rdb *rdb.RDB
|
|
||||||
}
|
|
||||||
|
|
||||||
// New returns a new instance of Inspector.
|
|
||||||
func NewInspector(r RedisConnOpt) *Inspector {
|
|
||||||
return &Inspector{
|
|
||||||
rdb: rdb.NewRDB(createRedisClient(r)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close closes the connection with redis.
|
|
||||||
func (i *Inspector) Close() error {
|
|
||||||
return i.rdb.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Queues returns a list of all queue names.
|
|
||||||
func (i *Inspector) Queues() ([]string, error) {
|
|
||||||
return i.rdb.AllQueues()
|
|
||||||
}
|
|
||||||
|
|
||||||
// QueueStats represents a state of queues at a certain time.
|
|
||||||
type QueueStats struct {
|
|
||||||
// Name of the queue.
|
|
||||||
Queue string
|
|
||||||
// Size is the total number of tasks in the queue.
|
|
||||||
// The value is the sum of Pending, Active, Scheduled, Retry, and Dead.
|
|
||||||
Size int
|
|
||||||
// Number of pending tasks.
|
|
||||||
Pending int
|
|
||||||
// Number of active tasks.
|
|
||||||
Active int
|
|
||||||
// Number of scheduled tasks.
|
|
||||||
Scheduled int
|
|
||||||
// Number of retry tasks.
|
|
||||||
Retry int
|
|
||||||
// Number of dead tasks.
|
|
||||||
Dead int
|
|
||||||
// Total number of tasks being processed during the given date.
|
|
||||||
// The number includes both succeeded and failed tasks.
|
|
||||||
Processed int
|
|
||||||
// Total number of tasks failed to be processed during the given date.
|
|
||||||
Failed int
|
|
||||||
// Paused indicates whether the queue is paused.
|
|
||||||
// If true, tasks in the queue will not be processed.
|
|
||||||
Paused bool
|
|
||||||
// Time when this stats was taken.
|
|
||||||
Timestamp time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
// CurrentStats returns a current stats of the given queue.
|
|
||||||
func (i *Inspector) CurrentStats(qname string) (*QueueStats, error) {
|
|
||||||
if err := validateQueueName(qname); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
stats, err := i.rdb.CurrentStats(qname)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &QueueStats{
|
|
||||||
Queue: stats.Queue,
|
|
||||||
Size: stats.Size,
|
|
||||||
Pending: stats.Pending,
|
|
||||||
Active: stats.Active,
|
|
||||||
Scheduled: stats.Scheduled,
|
|
||||||
Retry: stats.Retry,
|
|
||||||
Dead: stats.Dead,
|
|
||||||
Processed: stats.Processed,
|
|
||||||
Failed: stats.Failed,
|
|
||||||
Paused: stats.Paused,
|
|
||||||
Timestamp: stats.Timestamp,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DailyStats holds aggregate data for a given day for a given queue.
|
|
||||||
type DailyStats struct {
|
|
||||||
// Name of the queue.
|
|
||||||
Queue string
|
|
||||||
// Total number of tasks being processed during the given date.
|
|
||||||
// The number includes both succeeded and failed tasks.
|
|
||||||
Processed int
|
|
||||||
// Total number of tasks failed to be processed during the given date.
|
|
||||||
Failed int
|
|
||||||
// Date this stats was taken.
|
|
||||||
Date time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
// History returns a list of stats from the last n days.
|
|
||||||
func (i *Inspector) History(qname string, n int) ([]*DailyStats, error) {
|
|
||||||
if err := validateQueueName(qname); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
stats, err := i.rdb.HistoricalStats(qname, n)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
var res []*DailyStats
|
|
||||||
for _, s := range stats {
|
|
||||||
res = append(res, &DailyStats{
|
|
||||||
Queue: s.Queue,
|
|
||||||
Processed: s.Processed,
|
|
||||||
Failed: s.Failed,
|
|
||||||
Date: s.Time,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return res, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// PendingTask is a task in a queue and is ready to be processed.
|
|
||||||
type PendingTask struct {
|
|
||||||
*Task
|
|
||||||
ID string
|
|
||||||
Queue string
|
|
||||||
}
|
|
||||||
|
|
||||||
// ActiveTask is a task that's currently being processed.
|
|
||||||
type ActiveTask struct {
|
|
||||||
*Task
|
|
||||||
ID string
|
|
||||||
Queue string
|
|
||||||
}
|
|
||||||
|
|
||||||
// ScheduledTask is a task scheduled to be processed in the future.
|
|
||||||
type ScheduledTask struct {
|
|
||||||
*Task
|
|
||||||
ID string
|
|
||||||
Queue string
|
|
||||||
NextProcessAt time.Time
|
|
||||||
|
|
||||||
score int64
|
|
||||||
}
|
|
||||||
|
|
||||||
// RetryTask is a task scheduled to be retried in the future.
|
|
||||||
type RetryTask struct {
|
|
||||||
*Task
|
|
||||||
ID string
|
|
||||||
Queue string
|
|
||||||
NextProcessAt time.Time
|
|
||||||
MaxRetry int
|
|
||||||
Retried int
|
|
||||||
ErrorMsg string
|
|
||||||
// TODO: LastFailedAt time.Time
|
|
||||||
|
|
||||||
score int64
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeadTask is a task exhausted its retries.
|
|
||||||
// DeadTask won't be retried automatically.
|
|
||||||
type DeadTask struct {
|
|
||||||
*Task
|
|
||||||
ID string
|
|
||||||
Queue string
|
|
||||||
MaxRetry int
|
|
||||||
Retried int
|
|
||||||
LastFailedAt time.Time
|
|
||||||
ErrorMsg string
|
|
||||||
|
|
||||||
score int64
|
|
||||||
}
|
|
||||||
|
|
||||||
// Key returns a key used to delete, run, and kill the task.
|
|
||||||
func (t *ScheduledTask) Key() string {
|
|
||||||
return fmt.Sprintf("s:%v:%v", t.ID, t.score)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Key returns a key used to delete, run, and kill the task.
|
|
||||||
func (t *RetryTask) Key() string {
|
|
||||||
return fmt.Sprintf("r:%v:%v", t.ID, t.score)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Key returns a key used to delete, run, and kill the task.
|
|
||||||
func (t *DeadTask) Key() string {
|
|
||||||
return fmt.Sprintf("d:%v:%v", t.ID, t.score)
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseTaskKey parses a key string and returns each part of key with proper
|
|
||||||
// type if valid, otherwise it reports an error.
|
|
||||||
func parseTaskKey(key string) (id uuid.UUID, score int64, state string, err error) {
|
|
||||||
parts := strings.Split(key, ":")
|
|
||||||
if len(parts) != 3 {
|
|
||||||
return uuid.Nil, 0, "", fmt.Errorf("invalid id")
|
|
||||||
}
|
|
||||||
id, err = uuid.Parse(parts[1])
|
|
||||||
if err != nil {
|
|
||||||
return uuid.Nil, 0, "", fmt.Errorf("invalid id")
|
|
||||||
}
|
|
||||||
score, err = strconv.ParseInt(parts[2], 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return uuid.Nil, 0, "", fmt.Errorf("invalid id")
|
|
||||||
}
|
|
||||||
state = parts[0]
|
|
||||||
if len(state) != 1 || !strings.Contains("srd", state) {
|
|
||||||
return uuid.Nil, 0, "", fmt.Errorf("invalid id")
|
|
||||||
}
|
|
||||||
return id, score, state, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListOption specifies behavior of list operation.
|
|
||||||
type ListOption interface{}
|
|
||||||
|
|
||||||
// Internal list option representations.
|
|
||||||
type (
|
|
||||||
pageSizeOpt int
|
|
||||||
pageNumOpt int
|
|
||||||
)
|
|
||||||
|
|
||||||
type listOption struct {
|
|
||||||
pageSize int
|
|
||||||
pageNum int
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
// Page size used by default in list operation.
|
|
||||||
defaultPageSize = 30
|
|
||||||
|
|
||||||
// Page number used by default in list operation.
|
|
||||||
defaultPageNum = 1
|
|
||||||
)
|
|
||||||
|
|
||||||
func composeListOptions(opts ...ListOption) listOption {
|
|
||||||
res := listOption{
|
|
||||||
pageSize: defaultPageSize,
|
|
||||||
pageNum: defaultPageNum,
|
|
||||||
}
|
|
||||||
for _, opt := range opts {
|
|
||||||
switch opt := opt.(type) {
|
|
||||||
case pageSizeOpt:
|
|
||||||
res.pageSize = int(opt)
|
|
||||||
case pageNumOpt:
|
|
||||||
res.pageNum = int(opt)
|
|
||||||
default:
|
|
||||||
// ignore unexpected option
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
// PageSize returns an option to specify the page size for list operation.
|
|
||||||
//
|
|
||||||
// Negative page size is treated as zero.
|
|
||||||
func PageSize(n int) ListOption {
|
|
||||||
if n < 0 {
|
|
||||||
n = 0
|
|
||||||
}
|
|
||||||
return pageSizeOpt(n)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Page returns an option to specify the page number for list operation.
|
|
||||||
// The value 1 fetches the first page.
|
|
||||||
//
|
|
||||||
// Negative page number is treated as one.
|
|
||||||
func Page(n int) ListOption {
|
|
||||||
if n < 0 {
|
|
||||||
n = 1
|
|
||||||
}
|
|
||||||
return pageNumOpt(n)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListPendingTasks retrieves pending tasks from the specified queue.
|
|
||||||
//
|
|
||||||
// By default, it retrieves the first 30 tasks.
|
|
||||||
func (i *Inspector) ListPendingTasks(qname string, opts ...ListOption) ([]*PendingTask, error) {
|
|
||||||
if err := validateQueueName(qname); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
opt := composeListOptions(opts...)
|
|
||||||
pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1}
|
|
||||||
msgs, err := i.rdb.ListPending(qname, pgn)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
var tasks []*PendingTask
|
|
||||||
for _, m := range msgs {
|
|
||||||
tasks = append(tasks, &PendingTask{
|
|
||||||
Task: NewTask(m.Type, m.Payload),
|
|
||||||
ID: m.ID.String(),
|
|
||||||
Queue: m.Queue,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return tasks, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListActiveTasks retrieves active tasks from the specified queue.
|
|
||||||
//
|
|
||||||
// By default, it retrieves the first 30 tasks.
|
|
||||||
func (i *Inspector) ListActiveTasks(qname string, opts ...ListOption) ([]*ActiveTask, error) {
|
|
||||||
if err := validateQueueName(qname); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
opt := composeListOptions(opts...)
|
|
||||||
pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1}
|
|
||||||
msgs, err := i.rdb.ListActive(qname, pgn)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
var tasks []*ActiveTask
|
|
||||||
for _, m := range msgs {
|
|
||||||
tasks = append(tasks, &ActiveTask{
|
|
||||||
Task: NewTask(m.Type, m.Payload),
|
|
||||||
ID: m.ID.String(),
|
|
||||||
Queue: m.Queue,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return tasks, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListScheduledTasks retrieves scheduled tasks from the specified queue.
|
|
||||||
// Tasks are sorted by NextProcessAt field in ascending order.
|
|
||||||
//
|
|
||||||
// By default, it retrieves the first 30 tasks.
|
|
||||||
func (i *Inspector) ListScheduledTasks(qname string, opts ...ListOption) ([]*ScheduledTask, error) {
|
|
||||||
if err := validateQueueName(qname); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
opt := composeListOptions(opts...)
|
|
||||||
pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1}
|
|
||||||
zs, err := i.rdb.ListScheduled(qname, pgn)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
var tasks []*ScheduledTask
|
|
||||||
for _, z := range zs {
|
|
||||||
processAt := time.Unix(z.Score, 0)
|
|
||||||
t := NewTask(z.Message.Type, z.Message.Payload)
|
|
||||||
tasks = append(tasks, &ScheduledTask{
|
|
||||||
Task: t,
|
|
||||||
ID: z.Message.ID.String(),
|
|
||||||
Queue: z.Message.Queue,
|
|
||||||
NextProcessAt: processAt,
|
|
||||||
score: z.Score,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return tasks, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListRetryTasks retrieves retry tasks from the specified queue.
|
|
||||||
// Tasks are sorted by NextProcessAt field in ascending order.
|
|
||||||
//
|
|
||||||
// By default, it retrieves the first 30 tasks.
|
|
||||||
func (i *Inspector) ListRetryTasks(qname string, opts ...ListOption) ([]*RetryTask, error) {
|
|
||||||
if err := validateQueueName(qname); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
opt := composeListOptions(opts...)
|
|
||||||
pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1}
|
|
||||||
zs, err := i.rdb.ListRetry(qname, pgn)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
var tasks []*RetryTask
|
|
||||||
for _, z := range zs {
|
|
||||||
processAt := time.Unix(z.Score, 0)
|
|
||||||
t := NewTask(z.Message.Type, z.Message.Payload)
|
|
||||||
tasks = append(tasks, &RetryTask{
|
|
||||||
Task: t,
|
|
||||||
ID: z.Message.ID.String(),
|
|
||||||
Queue: z.Message.Queue,
|
|
||||||
NextProcessAt: processAt,
|
|
||||||
MaxRetry: z.Message.Retry,
|
|
||||||
Retried: z.Message.Retried,
|
|
||||||
// TODO: LastFailedAt: z.Message.LastFailedAt
|
|
||||||
ErrorMsg: z.Message.ErrorMsg,
|
|
||||||
score: z.Score,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return tasks, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListDeadTasks retrieves dead tasks from the specified queue.
|
|
||||||
// Tasks are sorted by LastFailedAt field in descending order.
|
|
||||||
//
|
|
||||||
// By default, it retrieves the first 30 tasks.
|
|
||||||
func (i *Inspector) ListDeadTasks(qname string, opts ...ListOption) ([]*DeadTask, error) {
|
|
||||||
if err := validateQueueName(qname); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
opt := composeListOptions(opts...)
|
|
||||||
pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1}
|
|
||||||
zs, err := i.rdb.ListDead(qname, pgn)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
var tasks []*DeadTask
|
|
||||||
for _, z := range zs {
|
|
||||||
failedAt := time.Unix(z.Score, 0)
|
|
||||||
t := NewTask(z.Message.Type, z.Message.Payload)
|
|
||||||
tasks = append(tasks, &DeadTask{
|
|
||||||
Task: t,
|
|
||||||
ID: z.Message.ID.String(),
|
|
||||||
Queue: z.Message.Queue,
|
|
||||||
MaxRetry: z.Message.Retry,
|
|
||||||
Retried: z.Message.Retried,
|
|
||||||
LastFailedAt: failedAt,
|
|
||||||
ErrorMsg: z.Message.ErrorMsg,
|
|
||||||
score: z.Score,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return tasks, nil
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteAllScheduledTasks deletes all scheduled tasks from the specified queue,
|
|
||||||
// and reports the number tasks deleted.
|
|
||||||
func (i *Inspector) DeleteAllScheduledTasks(qname string) (int, error) {
|
|
||||||
if err := validateQueueName(qname); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
n, err := i.rdb.DeleteAllScheduledTasks(qname)
|
|
||||||
return int(n), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteAllRetryTasks deletes all retry tasks from the specified queue,
|
|
||||||
// and reports the number tasks deleted.
|
|
||||||
func (i *Inspector) DeleteAllRetryTasks(qname string) (int, error) {
|
|
||||||
if err := validateQueueName(qname); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
n, err := i.rdb.DeleteAllRetryTasks(qname)
|
|
||||||
return int(n), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteAllDeadTasks deletes all dead tasks from the specified queue,
|
|
||||||
// and reports the number tasks deleted.
|
|
||||||
func (i *Inspector) DeleteAllDeadTasks(qname string) (int, error) {
|
|
||||||
if err := validateQueueName(qname); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
n, err := i.rdb.DeleteAllDeadTasks(qname)
|
|
||||||
return int(n), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteTaskByKey deletes a task with the given key from the given queue.
|
|
||||||
func (i *Inspector) DeleteTaskByKey(qname, key string) error {
|
|
||||||
if err := validateQueueName(qname); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
id, score, state, err := parseTaskKey(key)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
switch state {
|
|
||||||
case "s":
|
|
||||||
return i.rdb.DeleteScheduledTask(qname, id, score)
|
|
||||||
case "r":
|
|
||||||
return i.rdb.DeleteRetryTask(qname, id, score)
|
|
||||||
case "d":
|
|
||||||
return i.rdb.DeleteDeadTask(qname, id, score)
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("invalid key")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// RunAllScheduledTasks transition all scheduled tasks to pending state within the given queue,
|
|
||||||
// and reports the number of tasks transitioned.
|
|
||||||
func (i *Inspector) RunAllScheduledTasks(qname string) (int, error) {
|
|
||||||
if err := validateQueueName(qname); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
n, err := i.rdb.RunAllScheduledTasks(qname)
|
|
||||||
return int(n), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// RunAllRetryTasks transition all retry tasks to pending state within the given queue,
|
|
||||||
// and reports the number of tasks transitioned.
|
|
||||||
func (i *Inspector) RunAllRetryTasks(qname string) (int, error) {
|
|
||||||
if err := validateQueueName(qname); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
n, err := i.rdb.RunAllRetryTasks(qname)
|
|
||||||
return int(n), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// RunAllDeadTasks transition all dead tasks to pending state within the given queue,
|
|
||||||
// and reports the number of tasks transitioned.
|
|
||||||
func (i *Inspector) RunAllDeadTasks(qname string) (int, error) {
|
|
||||||
if err := validateQueueName(qname); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
n, err := i.rdb.RunAllDeadTasks(qname)
|
|
||||||
return int(n), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// RunTaskByKey transition a task to pending state given task key and queue name.
|
|
||||||
func (i *Inspector) RunTaskByKey(qname, key string) error {
|
|
||||||
if err := validateQueueName(qname); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
id, score, state, err := parseTaskKey(key)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
switch state {
|
|
||||||
case "s":
|
|
||||||
return i.rdb.RunScheduledTask(qname, id, score)
|
|
||||||
case "r":
|
|
||||||
return i.rdb.RunRetryTask(qname, id, score)
|
|
||||||
case "d":
|
|
||||||
return i.rdb.RunDeadTask(qname, id, score)
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("invalid key")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// KillAllScheduledTasks kills all scheduled tasks within the given queue,
|
|
||||||
// and reports the number of tasks killed.
|
|
||||||
func (i *Inspector) KillAllScheduledTasks(qname string) (int, error) {
|
|
||||||
if err := validateQueueName(qname); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
n, err := i.rdb.KillAllScheduledTasks(qname)
|
|
||||||
return int(n), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// KillAllRetryTasks kills all retry tasks within the given queue,
|
|
||||||
// and reports the number of tasks killed.
|
|
||||||
func (i *Inspector) KillAllRetryTasks(qname string) (int, error) {
|
|
||||||
if err := validateQueueName(qname); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
n, err := i.rdb.KillAllRetryTasks(qname)
|
|
||||||
return int(n), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// KillTaskByKey kills a task with the given key in the given queue.
|
|
||||||
func (i *Inspector) KillTaskByKey(qname, key string) error {
|
|
||||||
if err := validateQueueName(qname); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
id, score, state, err := parseTaskKey(key)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
switch state {
|
|
||||||
case "s":
|
|
||||||
return i.rdb.KillScheduledTask(qname, id, score)
|
|
||||||
case "r":
|
|
||||||
return i.rdb.KillRetryTask(qname, id, score)
|
|
||||||
case "d":
|
|
||||||
return fmt.Errorf("task already dead")
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("invalid key")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// PauseQueue pauses task processing on the specified queue.
|
|
||||||
// If the queue is already paused, it will return a non-nil error.
|
|
||||||
func (i *Inspector) PauseQueue(qname string) error {
|
|
||||||
if err := validateQueueName(qname); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return i.rdb.Pause(qname)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnpauseQueue resumes task processing on the specified queue.
|
|
||||||
// If the queue is not paused, it will return a non-nil error.
|
|
||||||
func (i *Inspector) UnpauseQueue(qname string) error {
|
|
||||||
if err := validateQueueName(qname); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return i.rdb.Unpause(qname)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ClusterKeySlot returns an integer identifying the hash slot the given queue hashes to.
|
|
||||||
func (i *Inspector) ClusterKeySlot(qname string) (int64, error) {
|
|
||||||
return i.rdb.ClusterKeySlot(qname)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ClusterNode describes a node in redis cluster.
|
|
||||||
type ClusterNode struct {
|
|
||||||
// Node ID in the cluster.
|
|
||||||
ID string
|
|
||||||
|
|
||||||
// Address of the node.
|
|
||||||
Addr string
|
|
||||||
}
|
|
||||||
|
|
||||||
// ClusterNode returns a list of nodes the given queue belongs to.
|
|
||||||
func (i *Inspector) ClusterNodes(qname string) ([]ClusterNode, error) {
|
|
||||||
nodes, err := i.rdb.ClusterNodes(qname)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
var res []ClusterNode
|
|
||||||
for _, node := range nodes {
|
|
||||||
res = append(res, ClusterNode{ID: node.ID, Addr: node.Addr})
|
|
||||||
}
|
|
||||||
return res, nil
|
|
||||||
}
|
|
1932
inspector_test.go
1932
inspector_test.go
File diff suppressed because it is too large
Load Diff
22
inspeq/doc.go
Normal file
22
inspeq/doc.go
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license
|
||||||
|
// that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
/*
|
||||||
|
Package inspeq provides helper types and functions to inspect queues and tasks managed by Asynq.
|
||||||
|
|
||||||
|
Inspector is used to query and mutate the state of queues and tasks.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
inspector := inspeq.New(asynq.RedisClientOpt{Addr: "localhost:6379"})
|
||||||
|
|
||||||
|
tasks, err := inspector.ListArchivedTasks("my-queue")
|
||||||
|
|
||||||
|
for _, t := range tasks {
|
||||||
|
if err := inspector.DeleteTaskByKey(t.Key()); err != nil {
|
||||||
|
// handle error
|
||||||
|
}
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
package inspeq
|
956
inspeq/inspector.go
Normal file
956
inspeq/inspector.go
Normal file
@@ -0,0 +1,956 @@
|
|||||||
|
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license
|
||||||
|
// that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package inspeq
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/go-redis/redis/v7"
|
||||||
|
"github.com/google/uuid"
|
||||||
|
"github.com/hibiken/asynq"
|
||||||
|
"github.com/hibiken/asynq/internal/base"
|
||||||
|
"github.com/hibiken/asynq/internal/rdb"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Inspector is a client interface to inspect and mutate the state of
|
||||||
|
// queues and tasks.
|
||||||
|
type Inspector struct {
|
||||||
|
rdb *rdb.RDB
|
||||||
|
}
|
||||||
|
|
||||||
|
// New returns a new instance of Inspector.
|
||||||
|
func New(r asynq.RedisConnOpt) *Inspector {
|
||||||
|
c, ok := r.MakeRedisClient().(redis.UniversalClient)
|
||||||
|
if !ok {
|
||||||
|
panic(fmt.Sprintf("inspeq: unsupported RedisConnOpt type %T", r))
|
||||||
|
}
|
||||||
|
return &Inspector{
|
||||||
|
rdb: rdb.NewRDB(c),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes the connection with redis.
|
||||||
|
func (i *Inspector) Close() error {
|
||||||
|
return i.rdb.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Queues returns a list of all queue names.
|
||||||
|
func (i *Inspector) Queues() ([]string, error) {
|
||||||
|
return i.rdb.AllQueues()
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueueStats represents a state of queues at a certain time.
|
||||||
|
type QueueStats struct {
|
||||||
|
// Name of the queue.
|
||||||
|
Queue string
|
||||||
|
// Total number of bytes that the queue and its tasks require to be stored in redis.
|
||||||
|
MemoryUsage int64
|
||||||
|
// Size is the total number of tasks in the queue.
|
||||||
|
// The value is the sum of Pending, Active, Scheduled, Retry, and Archived.
|
||||||
|
Size int
|
||||||
|
// Number of pending tasks.
|
||||||
|
Pending int
|
||||||
|
// Number of active tasks.
|
||||||
|
Active int
|
||||||
|
// Number of scheduled tasks.
|
||||||
|
Scheduled int
|
||||||
|
// Number of retry tasks.
|
||||||
|
Retry int
|
||||||
|
// Number of archived tasks.
|
||||||
|
Archived int
|
||||||
|
// Total number of tasks being processed during the given date.
|
||||||
|
// The number includes both succeeded and failed tasks.
|
||||||
|
Processed int
|
||||||
|
// Total number of tasks failed to be processed during the given date.
|
||||||
|
Failed int
|
||||||
|
// Paused indicates whether the queue is paused.
|
||||||
|
// If true, tasks in the queue will not be processed.
|
||||||
|
Paused bool
|
||||||
|
// Time when this stats was taken.
|
||||||
|
Timestamp time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// CurrentStats returns a current stats of the given queue.
|
||||||
|
func (i *Inspector) CurrentStats(qname string) (*QueueStats, error) {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
stats, err := i.rdb.CurrentStats(qname)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &QueueStats{
|
||||||
|
Queue: stats.Queue,
|
||||||
|
MemoryUsage: stats.MemoryUsage,
|
||||||
|
Size: stats.Size,
|
||||||
|
Pending: stats.Pending,
|
||||||
|
Active: stats.Active,
|
||||||
|
Scheduled: stats.Scheduled,
|
||||||
|
Retry: stats.Retry,
|
||||||
|
Archived: stats.Archived,
|
||||||
|
Processed: stats.Processed,
|
||||||
|
Failed: stats.Failed,
|
||||||
|
Paused: stats.Paused,
|
||||||
|
Timestamp: stats.Timestamp,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DailyStats holds aggregate data for a given day for a given queue.
|
||||||
|
type DailyStats struct {
|
||||||
|
// Name of the queue.
|
||||||
|
Queue string
|
||||||
|
// Total number of tasks being processed during the given date.
|
||||||
|
// The number includes both succeeded and failed tasks.
|
||||||
|
Processed int
|
||||||
|
// Total number of tasks failed to be processed during the given date.
|
||||||
|
Failed int
|
||||||
|
// Date this stats was taken.
|
||||||
|
Date time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// History returns a list of stats from the last n days.
|
||||||
|
func (i *Inspector) History(qname string, n int) ([]*DailyStats, error) {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
stats, err := i.rdb.HistoricalStats(qname, n)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var res []*DailyStats
|
||||||
|
for _, s := range stats {
|
||||||
|
res = append(res, &DailyStats{
|
||||||
|
Queue: s.Queue,
|
||||||
|
Processed: s.Processed,
|
||||||
|
Failed: s.Failed,
|
||||||
|
Date: s.Time,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrQueueNotFound indicates that the specified queue does not exist.
|
||||||
|
type ErrQueueNotFound struct {
|
||||||
|
qname string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *ErrQueueNotFound) Error() string {
|
||||||
|
return fmt.Sprintf("queue %q does not exist", e.qname)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrQueueNotEmpty indicates that the specified queue is not empty.
|
||||||
|
type ErrQueueNotEmpty struct {
|
||||||
|
qname string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *ErrQueueNotEmpty) Error() string {
|
||||||
|
return fmt.Sprintf("queue %q is not empty", e.qname)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteQueue removes the specified queue.
|
||||||
|
//
|
||||||
|
// If force is set to true, DeleteQueue will remove the queue regardless of
|
||||||
|
// the queue size as long as no tasks are active in the queue.
|
||||||
|
// If force is set to false, DeleteQueue will remove the queue only if
|
||||||
|
// the queue is empty.
|
||||||
|
//
|
||||||
|
// If the specified queue does not exist, DeleteQueue returns ErrQueueNotFound.
|
||||||
|
// If force is set to false and the specified queue is not empty, DeleteQueue
|
||||||
|
// returns ErrQueueNotEmpty.
|
||||||
|
func (i *Inspector) DeleteQueue(qname string, force bool) error {
|
||||||
|
err := i.rdb.RemoveQueue(qname, force)
|
||||||
|
if _, ok := err.(*rdb.ErrQueueNotFound); ok {
|
||||||
|
return &ErrQueueNotFound{qname}
|
||||||
|
}
|
||||||
|
if _, ok := err.(*rdb.ErrQueueNotEmpty); ok {
|
||||||
|
return &ErrQueueNotEmpty{qname}
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// PendingTask is a task in a queue and is ready to be processed.
|
||||||
|
type PendingTask struct {
|
||||||
|
*asynq.Task
|
||||||
|
ID string
|
||||||
|
Queue string
|
||||||
|
MaxRetry int
|
||||||
|
Retried int
|
||||||
|
LastError string
|
||||||
|
}
|
||||||
|
|
||||||
|
// ActiveTask is a task that's currently being processed.
|
||||||
|
type ActiveTask struct {
|
||||||
|
*asynq.Task
|
||||||
|
ID string
|
||||||
|
Queue string
|
||||||
|
MaxRetry int
|
||||||
|
Retried int
|
||||||
|
LastError string
|
||||||
|
}
|
||||||
|
|
||||||
|
// ScheduledTask is a task scheduled to be processed in the future.
|
||||||
|
type ScheduledTask struct {
|
||||||
|
*asynq.Task
|
||||||
|
ID string
|
||||||
|
Queue string
|
||||||
|
MaxRetry int
|
||||||
|
Retried int
|
||||||
|
LastError string
|
||||||
|
NextProcessAt time.Time
|
||||||
|
|
||||||
|
score int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// RetryTask is a task scheduled to be retried in the future.
|
||||||
|
type RetryTask struct {
|
||||||
|
*asynq.Task
|
||||||
|
ID string
|
||||||
|
Queue string
|
||||||
|
NextProcessAt time.Time
|
||||||
|
MaxRetry int
|
||||||
|
Retried int
|
||||||
|
LastError string
|
||||||
|
// TODO: LastFailedAt time.Time
|
||||||
|
|
||||||
|
score int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// ArchivedTask is a task archived for debugging and inspection purposes, and
|
||||||
|
// it won't be retried automatically.
|
||||||
|
// A task can be archived when the task exhausts its retry counts or manually
|
||||||
|
// archived by a user via the CLI or Inspector.
|
||||||
|
type ArchivedTask struct {
|
||||||
|
*asynq.Task
|
||||||
|
ID string
|
||||||
|
Queue string
|
||||||
|
MaxRetry int
|
||||||
|
Retried int
|
||||||
|
LastFailedAt time.Time
|
||||||
|
LastError string
|
||||||
|
|
||||||
|
score int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// Format string used for task key.
|
||||||
|
// Format is <prefix>:<uuid>:<score>.
|
||||||
|
const taskKeyFormat = "%s:%v:%v"
|
||||||
|
|
||||||
|
// Prefix used for task key.
|
||||||
|
const (
|
||||||
|
keyPrefixPending = "p"
|
||||||
|
keyPrefixScheduled = "s"
|
||||||
|
keyPrefixRetry = "r"
|
||||||
|
keyPrefixArchived = "a"
|
||||||
|
|
||||||
|
allKeyPrefixes = keyPrefixPending + keyPrefixScheduled + keyPrefixRetry + keyPrefixArchived
|
||||||
|
)
|
||||||
|
|
||||||
|
// Key returns a key used to delete, and archive the pending task.
|
||||||
|
func (t *PendingTask) Key() string {
|
||||||
|
// Note: Pending tasks are stored in redis LIST, therefore no score.
|
||||||
|
// Use zero for the score to use the same key format.
|
||||||
|
return fmt.Sprintf(taskKeyFormat, keyPrefixPending, t.ID, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Key returns a key used to delete, run, and archive the scheduled task.
|
||||||
|
func (t *ScheduledTask) Key() string {
|
||||||
|
return fmt.Sprintf(taskKeyFormat, keyPrefixScheduled, t.ID, t.score)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Key returns a key used to delete, run, and archive the retry task.
|
||||||
|
func (t *RetryTask) Key() string {
|
||||||
|
return fmt.Sprintf(taskKeyFormat, keyPrefixRetry, t.ID, t.score)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Key returns a key used to delete and run the archived task.
|
||||||
|
func (t *ArchivedTask) Key() string {
|
||||||
|
return fmt.Sprintf(taskKeyFormat, keyPrefixArchived, t.ID, t.score)
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseTaskKey parses a key string and returns each part of key with proper
|
||||||
|
// type if valid, otherwise it reports an error.
|
||||||
|
func parseTaskKey(key string) (prefix string, id uuid.UUID, score int64, err error) {
|
||||||
|
parts := strings.Split(key, ":")
|
||||||
|
if len(parts) != 3 {
|
||||||
|
return "", uuid.Nil, 0, fmt.Errorf("invalid id")
|
||||||
|
}
|
||||||
|
id, err = uuid.Parse(parts[1])
|
||||||
|
if err != nil {
|
||||||
|
return "", uuid.Nil, 0, fmt.Errorf("invalid id")
|
||||||
|
}
|
||||||
|
score, err = strconv.ParseInt(parts[2], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return "", uuid.Nil, 0, fmt.Errorf("invalid id")
|
||||||
|
}
|
||||||
|
prefix = parts[0]
|
||||||
|
if len(prefix) != 1 || !strings.Contains(allKeyPrefixes, prefix) {
|
||||||
|
return "", uuid.Nil, 0, fmt.Errorf("invalid id")
|
||||||
|
}
|
||||||
|
return prefix, id, score, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListOption specifies behavior of list operation.
|
||||||
|
type ListOption interface{}
|
||||||
|
|
||||||
|
// Internal list option representations.
|
||||||
|
type (
|
||||||
|
pageSizeOpt int
|
||||||
|
pageNumOpt int
|
||||||
|
)
|
||||||
|
|
||||||
|
type listOption struct {
|
||||||
|
pageSize int
|
||||||
|
pageNum int
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Page size used by default in list operation.
|
||||||
|
defaultPageSize = 30
|
||||||
|
|
||||||
|
// Page number used by default in list operation.
|
||||||
|
defaultPageNum = 1
|
||||||
|
)
|
||||||
|
|
||||||
|
func composeListOptions(opts ...ListOption) listOption {
|
||||||
|
res := listOption{
|
||||||
|
pageSize: defaultPageSize,
|
||||||
|
pageNum: defaultPageNum,
|
||||||
|
}
|
||||||
|
for _, opt := range opts {
|
||||||
|
switch opt := opt.(type) {
|
||||||
|
case pageSizeOpt:
|
||||||
|
res.pageSize = int(opt)
|
||||||
|
case pageNumOpt:
|
||||||
|
res.pageNum = int(opt)
|
||||||
|
default:
|
||||||
|
// ignore unexpected option
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
// PageSize returns an option to specify the page size for list operation.
|
||||||
|
//
|
||||||
|
// Negative page size is treated as zero.
|
||||||
|
func PageSize(n int) ListOption {
|
||||||
|
if n < 0 {
|
||||||
|
n = 0
|
||||||
|
}
|
||||||
|
return pageSizeOpt(n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Page returns an option to specify the page number for list operation.
|
||||||
|
// The value 1 fetches the first page.
|
||||||
|
//
|
||||||
|
// Negative page number is treated as one.
|
||||||
|
func Page(n int) ListOption {
|
||||||
|
if n < 0 {
|
||||||
|
n = 1
|
||||||
|
}
|
||||||
|
return pageNumOpt(n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListPendingTasks retrieves pending tasks from the specified queue.
|
||||||
|
//
|
||||||
|
// By default, it retrieves the first 30 tasks.
|
||||||
|
func (i *Inspector) ListPendingTasks(qname string, opts ...ListOption) ([]*PendingTask, error) {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
opt := composeListOptions(opts...)
|
||||||
|
pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1}
|
||||||
|
msgs, err := i.rdb.ListPending(qname, pgn)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var tasks []*PendingTask
|
||||||
|
for _, m := range msgs {
|
||||||
|
tasks = append(tasks, &PendingTask{
|
||||||
|
Task: asynq.NewTask(m.Type, m.Payload),
|
||||||
|
ID: m.ID.String(),
|
||||||
|
Queue: m.Queue,
|
||||||
|
MaxRetry: m.Retry,
|
||||||
|
Retried: m.Retried,
|
||||||
|
LastError: m.ErrorMsg,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return tasks, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListActiveTasks retrieves active tasks from the specified queue.
|
||||||
|
//
|
||||||
|
// By default, it retrieves the first 30 tasks.
|
||||||
|
func (i *Inspector) ListActiveTasks(qname string, opts ...ListOption) ([]*ActiveTask, error) {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
opt := composeListOptions(opts...)
|
||||||
|
pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1}
|
||||||
|
msgs, err := i.rdb.ListActive(qname, pgn)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var tasks []*ActiveTask
|
||||||
|
for _, m := range msgs {
|
||||||
|
|
||||||
|
tasks = append(tasks, &ActiveTask{
|
||||||
|
Task: asynq.NewTask(m.Type, m.Payload),
|
||||||
|
ID: m.ID.String(),
|
||||||
|
Queue: m.Queue,
|
||||||
|
MaxRetry: m.Retry,
|
||||||
|
Retried: m.Retried,
|
||||||
|
LastError: m.ErrorMsg,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return tasks, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListScheduledTasks retrieves scheduled tasks from the specified queue.
|
||||||
|
// Tasks are sorted by NextProcessAt field in ascending order.
|
||||||
|
//
|
||||||
|
// By default, it retrieves the first 30 tasks.
|
||||||
|
func (i *Inspector) ListScheduledTasks(qname string, opts ...ListOption) ([]*ScheduledTask, error) {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
opt := composeListOptions(opts...)
|
||||||
|
pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1}
|
||||||
|
zs, err := i.rdb.ListScheduled(qname, pgn)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var tasks []*ScheduledTask
|
||||||
|
for _, z := range zs {
|
||||||
|
processAt := time.Unix(z.Score, 0)
|
||||||
|
t := asynq.NewTask(z.Message.Type, z.Message.Payload)
|
||||||
|
tasks = append(tasks, &ScheduledTask{
|
||||||
|
Task: t,
|
||||||
|
ID: z.Message.ID.String(),
|
||||||
|
Queue: z.Message.Queue,
|
||||||
|
MaxRetry: z.Message.Retry,
|
||||||
|
Retried: z.Message.Retried,
|
||||||
|
LastError: z.Message.ErrorMsg,
|
||||||
|
NextProcessAt: processAt,
|
||||||
|
score: z.Score,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return tasks, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListRetryTasks retrieves retry tasks from the specified queue.
|
||||||
|
// Tasks are sorted by NextProcessAt field in ascending order.
|
||||||
|
//
|
||||||
|
// By default, it retrieves the first 30 tasks.
|
||||||
|
func (i *Inspector) ListRetryTasks(qname string, opts ...ListOption) ([]*RetryTask, error) {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
opt := composeListOptions(opts...)
|
||||||
|
pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1}
|
||||||
|
zs, err := i.rdb.ListRetry(qname, pgn)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var tasks []*RetryTask
|
||||||
|
for _, z := range zs {
|
||||||
|
processAt := time.Unix(z.Score, 0)
|
||||||
|
t := asynq.NewTask(z.Message.Type, z.Message.Payload)
|
||||||
|
tasks = append(tasks, &RetryTask{
|
||||||
|
Task: t,
|
||||||
|
ID: z.Message.ID.String(),
|
||||||
|
Queue: z.Message.Queue,
|
||||||
|
NextProcessAt: processAt,
|
||||||
|
MaxRetry: z.Message.Retry,
|
||||||
|
Retried: z.Message.Retried,
|
||||||
|
// TODO: LastFailedAt: z.Message.LastFailedAt
|
||||||
|
LastError: z.Message.ErrorMsg,
|
||||||
|
score: z.Score,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return tasks, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListArchivedTasks retrieves archived tasks from the specified queue.
|
||||||
|
// Tasks are sorted by LastFailedAt field in descending order.
|
||||||
|
//
|
||||||
|
// By default, it retrieves the first 30 tasks.
|
||||||
|
func (i *Inspector) ListArchivedTasks(qname string, opts ...ListOption) ([]*ArchivedTask, error) {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
opt := composeListOptions(opts...)
|
||||||
|
pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1}
|
||||||
|
zs, err := i.rdb.ListArchived(qname, pgn)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var tasks []*ArchivedTask
|
||||||
|
for _, z := range zs {
|
||||||
|
failedAt := time.Unix(z.Score, 0)
|
||||||
|
t := asynq.NewTask(z.Message.Type, z.Message.Payload)
|
||||||
|
tasks = append(tasks, &ArchivedTask{
|
||||||
|
Task: t,
|
||||||
|
ID: z.Message.ID.String(),
|
||||||
|
Queue: z.Message.Queue,
|
||||||
|
MaxRetry: z.Message.Retry,
|
||||||
|
Retried: z.Message.Retried,
|
||||||
|
LastFailedAt: failedAt,
|
||||||
|
LastError: z.Message.ErrorMsg,
|
||||||
|
score: z.Score,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return tasks, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteAllPendingTasks deletes all pending tasks from the specified queue,
|
||||||
|
// and reports the number tasks deleted.
|
||||||
|
func (i *Inspector) DeleteAllPendingTasks(qname string) (int, error) {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
n, err := i.rdb.DeleteAllPendingTasks(qname)
|
||||||
|
return int(n), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteAllScheduledTasks deletes all scheduled tasks from the specified queue,
|
||||||
|
// and reports the number tasks deleted.
|
||||||
|
func (i *Inspector) DeleteAllScheduledTasks(qname string) (int, error) {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
n, err := i.rdb.DeleteAllScheduledTasks(qname)
|
||||||
|
return int(n), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteAllRetryTasks deletes all retry tasks from the specified queue,
|
||||||
|
// and reports the number tasks deleted.
|
||||||
|
func (i *Inspector) DeleteAllRetryTasks(qname string) (int, error) {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
n, err := i.rdb.DeleteAllRetryTasks(qname)
|
||||||
|
return int(n), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteAllArchivedTasks deletes all archived tasks from the specified queue,
|
||||||
|
// and reports the number tasks deleted.
|
||||||
|
func (i *Inspector) DeleteAllArchivedTasks(qname string) (int, error) {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
n, err := i.rdb.DeleteAllArchivedTasks(qname)
|
||||||
|
return int(n), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteTaskByKey deletes a task with the given key from the given queue.
|
||||||
|
func (i *Inspector) DeleteTaskByKey(qname, key string) error {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
prefix, id, score, err := parseTaskKey(key)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
switch prefix {
|
||||||
|
case keyPrefixPending:
|
||||||
|
return i.rdb.DeletePendingTask(qname, id)
|
||||||
|
case keyPrefixScheduled:
|
||||||
|
return i.rdb.DeleteScheduledTask(qname, id, score)
|
||||||
|
case keyPrefixRetry:
|
||||||
|
return i.rdb.DeleteRetryTask(qname, id, score)
|
||||||
|
case keyPrefixArchived:
|
||||||
|
return i.rdb.DeleteArchivedTask(qname, id, score)
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("invalid key")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// RunAllScheduledTasks transition all scheduled tasks to pending state from the given queue,
|
||||||
|
// and reports the number of tasks transitioned.
|
||||||
|
func (i *Inspector) RunAllScheduledTasks(qname string) (int, error) {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
n, err := i.rdb.RunAllScheduledTasks(qname)
|
||||||
|
return int(n), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// RunAllRetryTasks transition all retry tasks to pending state from the given queue,
|
||||||
|
// and reports the number of tasks transitioned.
|
||||||
|
func (i *Inspector) RunAllRetryTasks(qname string) (int, error) {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
n, err := i.rdb.RunAllRetryTasks(qname)
|
||||||
|
return int(n), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// RunAllArchivedTasks transition all archived tasks to pending state from the given queue,
|
||||||
|
// and reports the number of tasks transitioned.
|
||||||
|
func (i *Inspector) RunAllArchivedTasks(qname string) (int, error) {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
n, err := i.rdb.RunAllArchivedTasks(qname)
|
||||||
|
return int(n), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// RunTaskByKey transition a task to pending state given task key and queue name.
|
||||||
|
func (i *Inspector) RunTaskByKey(qname, key string) error {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
prefix, id, score, err := parseTaskKey(key)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
switch prefix {
|
||||||
|
case keyPrefixScheduled:
|
||||||
|
return i.rdb.RunScheduledTask(qname, id, score)
|
||||||
|
case keyPrefixRetry:
|
||||||
|
return i.rdb.RunRetryTask(qname, id, score)
|
||||||
|
case keyPrefixArchived:
|
||||||
|
return i.rdb.RunArchivedTask(qname, id, score)
|
||||||
|
case keyPrefixPending:
|
||||||
|
return fmt.Errorf("task is already pending for run")
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("invalid key")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ArchiveAllPendingTasks archives all pending tasks from the given queue,
|
||||||
|
// and reports the number of tasks archived.
|
||||||
|
func (i *Inspector) ArchiveAllPendingTasks(qname string) (int, error) {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
n, err := i.rdb.ArchiveAllPendingTasks(qname)
|
||||||
|
return int(n), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ArchiveAllScheduledTasks archives all scheduled tasks from the given queue,
|
||||||
|
// and reports the number of tasks archiveed.
|
||||||
|
func (i *Inspector) ArchiveAllScheduledTasks(qname string) (int, error) {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
n, err := i.rdb.ArchiveAllScheduledTasks(qname)
|
||||||
|
return int(n), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ArchiveAllRetryTasks archives all retry tasks from the given queue,
|
||||||
|
// and reports the number of tasks archiveed.
|
||||||
|
func (i *Inspector) ArchiveAllRetryTasks(qname string) (int, error) {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
n, err := i.rdb.ArchiveAllRetryTasks(qname)
|
||||||
|
return int(n), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ArchiveTaskByKey archives a task with the given key in the given queue.
|
||||||
|
func (i *Inspector) ArchiveTaskByKey(qname, key string) error {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
prefix, id, score, err := parseTaskKey(key)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
switch prefix {
|
||||||
|
case keyPrefixPending:
|
||||||
|
return i.rdb.ArchivePendingTask(qname, id)
|
||||||
|
case keyPrefixScheduled:
|
||||||
|
return i.rdb.ArchiveScheduledTask(qname, id, score)
|
||||||
|
case keyPrefixRetry:
|
||||||
|
return i.rdb.ArchiveRetryTask(qname, id, score)
|
||||||
|
case keyPrefixArchived:
|
||||||
|
return fmt.Errorf("task is already archived")
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("invalid key")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// CancelActiveTask sends a signal to cancel processing of the task with
|
||||||
|
// the given id. CancelActiveTask is best-effort, which means that it does not
|
||||||
|
// guarantee that the task with the given id will be canceled. The return
|
||||||
|
// value only indicates whether the cancelation signal has been sent.
|
||||||
|
func (i *Inspector) CancelActiveTask(id string) error {
|
||||||
|
return i.rdb.PublishCancelation(id)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PauseQueue pauses task processing on the specified queue.
|
||||||
|
// If the queue is already paused, it will return a non-nil error.
|
||||||
|
func (i *Inspector) PauseQueue(qname string) error {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return i.rdb.Pause(qname)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnpauseQueue resumes task processing on the specified queue.
|
||||||
|
// If the queue is not paused, it will return a non-nil error.
|
||||||
|
func (i *Inspector) UnpauseQueue(qname string) error {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return i.rdb.Unpause(qname)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Servers return a list of running servers' information.
|
||||||
|
func (i *Inspector) Servers() ([]*ServerInfo, error) {
|
||||||
|
servers, err := i.rdb.ListServers()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
workers, err := i.rdb.ListWorkers()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
m := make(map[string]*ServerInfo) // ServerInfo keyed by serverID
|
||||||
|
for _, s := range servers {
|
||||||
|
m[s.ServerID] = &ServerInfo{
|
||||||
|
ID: s.ServerID,
|
||||||
|
Host: s.Host,
|
||||||
|
PID: s.PID,
|
||||||
|
Concurrency: s.Concurrency,
|
||||||
|
Queues: s.Queues,
|
||||||
|
StrictPriority: s.StrictPriority,
|
||||||
|
Started: s.Started,
|
||||||
|
Status: s.Status,
|
||||||
|
ActiveWorkers: make([]*WorkerInfo, 0),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, w := range workers {
|
||||||
|
srvInfo, ok := m[w.ServerID]
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
wrkInfo := &WorkerInfo{
|
||||||
|
Started: w.Started,
|
||||||
|
Deadline: w.Deadline,
|
||||||
|
Task: &ActiveTask{
|
||||||
|
Task: asynq.NewTask(w.Type, w.Payload),
|
||||||
|
ID: w.ID,
|
||||||
|
Queue: w.Queue,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
srvInfo.ActiveWorkers = append(srvInfo.ActiveWorkers, wrkInfo)
|
||||||
|
}
|
||||||
|
var out []*ServerInfo
|
||||||
|
for _, srvInfo := range m {
|
||||||
|
out = append(out, srvInfo)
|
||||||
|
}
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServerInfo describes a running Server instance.
|
||||||
|
type ServerInfo struct {
|
||||||
|
// Unique Identifier for the server.
|
||||||
|
ID string
|
||||||
|
// Host machine on which the server is running.
|
||||||
|
Host string
|
||||||
|
// PID of the process in which the server is running.
|
||||||
|
PID int
|
||||||
|
|
||||||
|
// Server configuration details.
|
||||||
|
// See Config doc for field descriptions.
|
||||||
|
Concurrency int
|
||||||
|
Queues map[string]int
|
||||||
|
StrictPriority bool
|
||||||
|
|
||||||
|
// Time the server started.
|
||||||
|
Started time.Time
|
||||||
|
// Status indicates the status of the server.
|
||||||
|
// TODO: Update comment with more details.
|
||||||
|
Status string
|
||||||
|
// A List of active workers currently processing tasks.
|
||||||
|
ActiveWorkers []*WorkerInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
// WorkerInfo describes a running worker processing a task.
|
||||||
|
type WorkerInfo struct {
|
||||||
|
// The task the worker is processing.
|
||||||
|
Task *ActiveTask
|
||||||
|
// Time the worker started processing the task.
|
||||||
|
Started time.Time
|
||||||
|
// Time the worker needs to finish processing the task by.
|
||||||
|
Deadline time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClusterKeySlot returns an integer identifying the hash slot the given queue hashes to.
|
||||||
|
func (i *Inspector) ClusterKeySlot(qname string) (int64, error) {
|
||||||
|
return i.rdb.ClusterKeySlot(qname)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClusterNode describes a node in redis cluster.
|
||||||
|
type ClusterNode struct {
|
||||||
|
// Node ID in the cluster.
|
||||||
|
ID string
|
||||||
|
|
||||||
|
// Address of the node.
|
||||||
|
Addr string
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClusterNodes returns a list of nodes the given queue belongs to.
|
||||||
|
func (i *Inspector) ClusterNodes(qname string) ([]ClusterNode, error) {
|
||||||
|
nodes, err := i.rdb.ClusterNodes(qname)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var res []ClusterNode
|
||||||
|
for _, node := range nodes {
|
||||||
|
res = append(res, ClusterNode{ID: node.ID, Addr: node.Addr})
|
||||||
|
}
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SchedulerEntry holds information about a periodic task registered with a scheduler.
|
||||||
|
type SchedulerEntry struct {
|
||||||
|
// Identifier of this entry.
|
||||||
|
ID string
|
||||||
|
|
||||||
|
// Spec describes the schedule of this entry.
|
||||||
|
Spec string
|
||||||
|
|
||||||
|
// Periodic Task registered for this entry.
|
||||||
|
Task *asynq.Task
|
||||||
|
|
||||||
|
// Opts is the options for the periodic task.
|
||||||
|
Opts []asynq.Option
|
||||||
|
|
||||||
|
// Next shows the next time the task will be enqueued.
|
||||||
|
Next time.Time
|
||||||
|
|
||||||
|
// Prev shows the last time the task was enqueued.
|
||||||
|
// Zero time if task was never enqueued.
|
||||||
|
Prev time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// SchedulerEntries returns a list of all entries registered with
|
||||||
|
// currently running schedulers.
|
||||||
|
func (i *Inspector) SchedulerEntries() ([]*SchedulerEntry, error) {
|
||||||
|
var entries []*SchedulerEntry
|
||||||
|
res, err := i.rdb.ListSchedulerEntries()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
for _, e := range res {
|
||||||
|
task := asynq.NewTask(e.Type, e.Payload)
|
||||||
|
var opts []asynq.Option
|
||||||
|
for _, s := range e.Opts {
|
||||||
|
if o, err := parseOption(s); err == nil {
|
||||||
|
// ignore bad data
|
||||||
|
opts = append(opts, o)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
entries = append(entries, &SchedulerEntry{
|
||||||
|
ID: e.ID,
|
||||||
|
Spec: e.Spec,
|
||||||
|
Task: task,
|
||||||
|
Opts: opts,
|
||||||
|
Next: e.Next,
|
||||||
|
Prev: e.Prev,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return entries, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseOption interprets a string s as an Option and returns the Option if parsing is successful,
|
||||||
|
// otherwise returns non-nil error.
|
||||||
|
func parseOption(s string) (asynq.Option, error) {
|
||||||
|
fn, arg := parseOptionFunc(s), parseOptionArg(s)
|
||||||
|
switch fn {
|
||||||
|
case "Queue":
|
||||||
|
qname, err := strconv.Unquote(arg)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return asynq.Queue(qname), nil
|
||||||
|
case "MaxRetry":
|
||||||
|
n, err := strconv.Atoi(arg)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return asynq.MaxRetry(n), nil
|
||||||
|
case "Timeout":
|
||||||
|
d, err := time.ParseDuration(arg)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return asynq.Timeout(d), nil
|
||||||
|
case "Deadline":
|
||||||
|
t, err := time.Parse(time.UnixDate, arg)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return asynq.Deadline(t), nil
|
||||||
|
case "Unique":
|
||||||
|
d, err := time.ParseDuration(arg)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return asynq.Unique(d), nil
|
||||||
|
case "ProcessAt":
|
||||||
|
t, err := time.Parse(time.UnixDate, arg)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return asynq.ProcessAt(t), nil
|
||||||
|
case "ProcessIn":
|
||||||
|
d, err := time.ParseDuration(arg)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return asynq.ProcessIn(d), nil
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("cannot not parse option string %q", s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseOptionFunc(s string) string {
|
||||||
|
i := strings.Index(s, "(")
|
||||||
|
return s[:i]
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseOptionArg(s string) string {
|
||||||
|
i := strings.Index(s, "(")
|
||||||
|
if i >= 0 {
|
||||||
|
j := strings.Index(s, ")")
|
||||||
|
if j > i {
|
||||||
|
return s[i+1 : j]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// SchedulerEnqueueEvent holds information about an enqueue event by a scheduler.
|
||||||
|
type SchedulerEnqueueEvent struct {
|
||||||
|
// ID of the task that was enqueued.
|
||||||
|
TaskID string
|
||||||
|
|
||||||
|
// Time the task was enqueued.
|
||||||
|
EnqueuedAt time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListSchedulerEnqueueEvents retrieves a list of enqueue events from the specified scheduler entry.
|
||||||
|
//
|
||||||
|
// By default, it retrieves the first 30 tasks.
|
||||||
|
func (i *Inspector) ListSchedulerEnqueueEvents(entryID string, opts ...ListOption) ([]*SchedulerEnqueueEvent, error) {
|
||||||
|
opt := composeListOptions(opts...)
|
||||||
|
pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1}
|
||||||
|
data, err := i.rdb.ListSchedulerEnqueueEvents(entryID, pgn)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var events []*SchedulerEnqueueEvent
|
||||||
|
for _, e := range data {
|
||||||
|
events = append(events, &SchedulerEnqueueEvent{TaskID: e.TaskID, EnqueuedAt: e.EnqueuedAt})
|
||||||
|
}
|
||||||
|
return events, nil
|
||||||
|
}
|
2712
inspeq/inspector_test.go
Normal file
2712
inspeq/inspector_test.go
Normal file
File diff suppressed because it is too large
Load Diff
@@ -65,6 +65,24 @@ var SortWorkerInfoOpt = cmp.Transformer("SortWorkerInfo", func(in []*base.Worker
|
|||||||
return out
|
return out
|
||||||
})
|
})
|
||||||
|
|
||||||
|
// SortSchedulerEntryOpt is a cmp.Option to sort base.SchedulerEntry for comparing slice of entries.
|
||||||
|
var SortSchedulerEntryOpt = cmp.Transformer("SortSchedulerEntry", func(in []*base.SchedulerEntry) []*base.SchedulerEntry {
|
||||||
|
out := append([]*base.SchedulerEntry(nil), in...) // Copy input to avoid mutating it
|
||||||
|
sort.Slice(out, func(i, j int) bool {
|
||||||
|
return out[i].Spec < out[j].Spec
|
||||||
|
})
|
||||||
|
return out
|
||||||
|
})
|
||||||
|
|
||||||
|
// SortSchedulerEnqueueEventOpt is a cmp.Option to sort base.SchedulerEnqueueEvent for comparing slice of events.
|
||||||
|
var SortSchedulerEnqueueEventOpt = cmp.Transformer("SortSchedulerEnqueueEvent", func(in []*base.SchedulerEnqueueEvent) []*base.SchedulerEnqueueEvent {
|
||||||
|
out := append([]*base.SchedulerEnqueueEvent(nil), in...)
|
||||||
|
sort.Slice(out, func(i, j int) bool {
|
||||||
|
return out[i].EnqueuedAt.Unix() < out[j].EnqueuedAt.Unix()
|
||||||
|
})
|
||||||
|
return out
|
||||||
|
})
|
||||||
|
|
||||||
// SortStringSliceOpt is a cmp.Option to sort string slice.
|
// SortStringSliceOpt is a cmp.Option to sort string slice.
|
||||||
var SortStringSliceOpt = cmp.Transformer("SortStringSlice", func(in []string) []string {
|
var SortStringSliceOpt = cmp.Transformer("SortStringSlice", func(in []string) []string {
|
||||||
out := append([]string(nil), in...)
|
out := append([]string(nil), in...)
|
||||||
@@ -202,11 +220,11 @@ func SeedRetryQueue(tb testing.TB, r redis.UniversalClient, entries []base.Z, qn
|
|||||||
seedRedisZSet(tb, r, base.RetryKey(qname), entries)
|
seedRedisZSet(tb, r, base.RetryKey(qname), entries)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SeedDeadQueue initializes the dead queue with the given messages.
|
// SeedArchivedQueue initializes the archived queue with the given messages.
|
||||||
func SeedDeadQueue(tb testing.TB, r redis.UniversalClient, entries []base.Z, qname string) {
|
func SeedArchivedQueue(tb testing.TB, r redis.UniversalClient, entries []base.Z, qname string) {
|
||||||
tb.Helper()
|
tb.Helper()
|
||||||
r.SAdd(base.AllQueues, qname)
|
r.SAdd(base.AllQueues, qname)
|
||||||
seedRedisZSet(tb, r, base.DeadKey(qname), entries)
|
seedRedisZSet(tb, r, base.ArchivedKey(qname), entries)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SeedDeadlines initializes the deadlines set with the given entries.
|
// SeedDeadlines initializes the deadlines set with the given entries.
|
||||||
@@ -246,10 +264,10 @@ func SeedAllRetryQueues(tb testing.TB, r redis.UniversalClient, retry map[string
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// SeedAllDeadQueues initializes all of the specified dead queues with the given entries.
|
// SeedAllArchivedQueues initializes all of the specified archived queues with the given entries.
|
||||||
func SeedAllDeadQueues(tb testing.TB, r redis.UniversalClient, dead map[string][]base.Z) {
|
func SeedAllArchivedQueues(tb testing.TB, r redis.UniversalClient, archived map[string][]base.Z) {
|
||||||
for q, entries := range dead {
|
for q, entries := range archived {
|
||||||
SeedDeadQueue(tb, r, entries, q)
|
SeedArchivedQueue(tb, r, entries, q)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -302,10 +320,10 @@ func GetRetryMessages(tb testing.TB, r redis.UniversalClient, qname string) []*b
|
|||||||
return getZSetMessages(tb, r, base.RetryKey(qname))
|
return getZSetMessages(tb, r, base.RetryKey(qname))
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetDeadMessages returns all dead messages in the given queue.
|
// GetArchivedMessages returns all archived messages in the given queue.
|
||||||
func GetDeadMessages(tb testing.TB, r redis.UniversalClient, qname string) []*base.TaskMessage {
|
func GetArchivedMessages(tb testing.TB, r redis.UniversalClient, qname string) []*base.TaskMessage {
|
||||||
tb.Helper()
|
tb.Helper()
|
||||||
return getZSetMessages(tb, r, base.DeadKey(qname))
|
return getZSetMessages(tb, r, base.ArchivedKey(qname))
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetScheduledEntries returns all scheduled messages and its score in the given queue.
|
// GetScheduledEntries returns all scheduled messages and its score in the given queue.
|
||||||
@@ -320,10 +338,10 @@ func GetRetryEntries(tb testing.TB, r redis.UniversalClient, qname string) []bas
|
|||||||
return getZSetEntries(tb, r, base.RetryKey(qname))
|
return getZSetEntries(tb, r, base.RetryKey(qname))
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetDeadEntries returns all dead messages and its score in the given queue.
|
// GetArchivedEntries returns all archived messages and its score in the given queue.
|
||||||
func GetDeadEntries(tb testing.TB, r redis.UniversalClient, qname string) []base.Z {
|
func GetArchivedEntries(tb testing.TB, r redis.UniversalClient, qname string) []base.Z {
|
||||||
tb.Helper()
|
tb.Helper()
|
||||||
return getZSetEntries(tb, r, base.DeadKey(qname))
|
return getZSetEntries(tb, r, base.ArchivedKey(qname))
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetDeadlinesEntries returns all task messages and its score in the deadlines set for the given queue.
|
// GetDeadlinesEntries returns all task messages and its score in the deadlines set for the given queue.
|
||||||
|
@@ -19,7 +19,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// Version of asynq library and CLI.
|
// Version of asynq library and CLI.
|
||||||
const Version = "0.12.0"
|
const Version = "0.15.0"
|
||||||
|
|
||||||
// DefaultQueueName is the queue name used if none are specified by user.
|
// DefaultQueueName is the queue name used if none are specified by user.
|
||||||
const DefaultQueueName = "default"
|
const DefaultQueueName = "default"
|
||||||
@@ -31,10 +31,20 @@ var DefaultQueue = QueueKey(DefaultQueueName)
|
|||||||
const (
|
const (
|
||||||
AllServers = "asynq:servers" // ZSET
|
AllServers = "asynq:servers" // ZSET
|
||||||
AllWorkers = "asynq:workers" // ZSET
|
AllWorkers = "asynq:workers" // ZSET
|
||||||
|
AllSchedulers = "asynq:schedulers" // ZSET
|
||||||
AllQueues = "asynq:queues" // SET
|
AllQueues = "asynq:queues" // SET
|
||||||
CancelChannel = "asynq:cancel" // PubSub channel
|
CancelChannel = "asynq:cancel" // PubSub channel
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// ValidateQueueName validates a given qname to be used as a queue name.
|
||||||
|
// Returns nil if valid, otherwise returns non-nil error.
|
||||||
|
func ValidateQueueName(qname string) error {
|
||||||
|
if len(qname) == 0 {
|
||||||
|
return fmt.Errorf("queue name must contain one or more characters")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// QueueKey returns a redis key for the given queue name.
|
// QueueKey returns a redis key for the given queue name.
|
||||||
func QueueKey(qname string) string {
|
func QueueKey(qname string) string {
|
||||||
return fmt.Sprintf("asynq:{%s}", qname)
|
return fmt.Sprintf("asynq:{%s}", qname)
|
||||||
@@ -55,9 +65,9 @@ func RetryKey(qname string) string {
|
|||||||
return fmt.Sprintf("asynq:{%s}:retry", qname)
|
return fmt.Sprintf("asynq:{%s}:retry", qname)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeadKey returns a redis key for the dead tasks.
|
// ArchivedKey returns a redis key for the archived tasks.
|
||||||
func DeadKey(qname string) string {
|
func ArchivedKey(qname string) string {
|
||||||
return fmt.Sprintf("asynq:{%s}:dead", qname)
|
return fmt.Sprintf("asynq:{%s}:archived", qname)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeadlinesKey returns a redis key for the deadlines.
|
// DeadlinesKey returns a redis key for the deadlines.
|
||||||
@@ -81,13 +91,23 @@ func FailedKey(qname string, t time.Time) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ServerInfoKey returns a redis key for process info.
|
// ServerInfoKey returns a redis key for process info.
|
||||||
func ServerInfoKey(hostname string, pid int, sid string) string {
|
func ServerInfoKey(hostname string, pid int, serverID string) string {
|
||||||
return fmt.Sprintf("asynq:servers:{%s:%d:%s}", hostname, pid, sid)
|
return fmt.Sprintf("asynq:servers:{%s:%d:%s}", hostname, pid, serverID)
|
||||||
}
|
}
|
||||||
|
|
||||||
// WorkersKey returns a redis key for the workers given hostname, pid, and server ID.
|
// WorkersKey returns a redis key for the workers given hostname, pid, and server ID.
|
||||||
func WorkersKey(hostname string, pid int, sid string) string {
|
func WorkersKey(hostname string, pid int, serverID string) string {
|
||||||
return fmt.Sprintf("asynq:workers:{%s:%d:%s}", hostname, pid, sid)
|
return fmt.Sprintf("asynq:workers:{%s:%d:%s}", hostname, pid, serverID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SchedulerEntriesKey returns a redis key for the scheduler entries given scheduler ID.
|
||||||
|
func SchedulerEntriesKey(schedulerID string) string {
|
||||||
|
return fmt.Sprintf("asynq:schedulers:{%s}", schedulerID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SchedulerHistoryKey returns a redis key for the scheduler's history for the given entry.
|
||||||
|
func SchedulerHistoryKey(entryID string) string {
|
||||||
|
return fmt.Sprintf("asynq:scheduler_history:%s", entryID)
|
||||||
}
|
}
|
||||||
|
|
||||||
// UniqueKey returns a redis key with the given type, payload, and queue name.
|
// UniqueKey returns a redis key with the given type, payload, and queue name.
|
||||||
@@ -145,7 +165,7 @@ type TaskMessage struct {
|
|||||||
|
|
||||||
// Timeout specifies timeout in seconds.
|
// Timeout specifies timeout in seconds.
|
||||||
// If task processing doesn't complete within the timeout, the task will be retried
|
// If task processing doesn't complete within the timeout, the task will be retried
|
||||||
// if retry count is remaining. Otherwise it will be moved to the dead queue.
|
// if retry count is remaining. Otherwise it will be moved to the archive.
|
||||||
//
|
//
|
||||||
// Use zero to indicate no timeout.
|
// Use zero to indicate no timeout.
|
||||||
Timeout int64
|
Timeout int64
|
||||||
@@ -153,7 +173,7 @@ type TaskMessage struct {
|
|||||||
// Deadline specifies the deadline for the task in Unix time,
|
// Deadline specifies the deadline for the task in Unix time,
|
||||||
// the number of seconds elapsed since January 1, 1970 UTC.
|
// the number of seconds elapsed since January 1, 1970 UTC.
|
||||||
// If task processing doesn't complete before the deadline, the task will be retried
|
// If task processing doesn't complete before the deadline, the task will be retried
|
||||||
// if retry count is remaining. Otherwise it will be moved to the dead queue.
|
// if retry count is remaining. Otherwise it will be moved to the archive.
|
||||||
//
|
//
|
||||||
// Use zero to indicate no deadline.
|
// Use zero to indicate no deadline.
|
||||||
Deadline int64
|
Deadline int64
|
||||||
@@ -208,10 +228,10 @@ const (
|
|||||||
// StatusIdle indicates the server is in idle state.
|
// StatusIdle indicates the server is in idle state.
|
||||||
StatusIdle ServerStatusValue = iota
|
StatusIdle ServerStatusValue = iota
|
||||||
|
|
||||||
// StatusRunning indicates the servier is up and processing tasks.
|
// StatusRunning indicates the server is up and active.
|
||||||
StatusRunning
|
StatusRunning
|
||||||
|
|
||||||
// StatusQuiet indicates the server is up but not processing new tasks.
|
// StatusQuiet indicates the server is up but not active.
|
||||||
StatusQuiet
|
StatusQuiet
|
||||||
|
|
||||||
// StatusStopped indicates the server server has been stopped.
|
// StatusStopped indicates the server server has been stopped.
|
||||||
@@ -266,11 +286,47 @@ type ServerInfo struct {
|
|||||||
type WorkerInfo struct {
|
type WorkerInfo struct {
|
||||||
Host string
|
Host string
|
||||||
PID int
|
PID int
|
||||||
|
ServerID string
|
||||||
ID string
|
ID string
|
||||||
Type string
|
Type string
|
||||||
Queue string
|
Queue string
|
||||||
Payload map[string]interface{}
|
Payload map[string]interface{}
|
||||||
Started time.Time
|
Started time.Time
|
||||||
|
Deadline time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// SchedulerEntry holds information about a periodic task registered with a scheduler.
|
||||||
|
type SchedulerEntry struct {
|
||||||
|
// Identifier of this entry.
|
||||||
|
ID string
|
||||||
|
|
||||||
|
// Spec describes the schedule of this entry.
|
||||||
|
Spec string
|
||||||
|
|
||||||
|
// Type is the task type of the periodic task.
|
||||||
|
Type string
|
||||||
|
|
||||||
|
// Payload is the payload of the periodic task.
|
||||||
|
Payload map[string]interface{}
|
||||||
|
|
||||||
|
// Opts is the options for the periodic task.
|
||||||
|
Opts []string
|
||||||
|
|
||||||
|
// Next shows the next time the task will be enqueued.
|
||||||
|
Next time.Time
|
||||||
|
|
||||||
|
// Prev shows the last time the task was enqueued.
|
||||||
|
// Zero time if task was never enqueued.
|
||||||
|
Prev time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// SchedulerEnqueueEvent holds information about an enqueue event by a scheduler.
|
||||||
|
type SchedulerEnqueueEvent struct {
|
||||||
|
// ID of the task that was enqueued.
|
||||||
|
TaskID string
|
||||||
|
|
||||||
|
// Time the task was enqueued.
|
||||||
|
EnqueuedAt time.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cancelations is a collection that holds cancel functions for all active tasks.
|
// Cancelations is a collection that holds cancel functions for all active tasks.
|
||||||
@@ -323,7 +379,7 @@ type Broker interface {
|
|||||||
Schedule(msg *TaskMessage, processAt time.Time) error
|
Schedule(msg *TaskMessage, processAt time.Time) error
|
||||||
ScheduleUnique(msg *TaskMessage, processAt time.Time, ttl time.Duration) error
|
ScheduleUnique(msg *TaskMessage, processAt time.Time, ttl time.Duration) error
|
||||||
Retry(msg *TaskMessage, processAt time.Time, errMsg string) error
|
Retry(msg *TaskMessage, processAt time.Time, errMsg string) error
|
||||||
Kill(msg *TaskMessage, errMsg string) error
|
Archive(msg *TaskMessage, errMsg string) error
|
||||||
CheckAndEnqueue(qnames ...string) error
|
CheckAndEnqueue(qnames ...string) error
|
||||||
ListDeadlineExceeded(deadline time.Time, qnames ...string) ([]*TaskMessage, error)
|
ListDeadlineExceeded(deadline time.Time, qnames ...string) ([]*TaskMessage, error)
|
||||||
WriteServerState(info *ServerInfo, workers []*WorkerInfo, ttl time.Duration) error
|
WriteServerState(info *ServerInfo, workers []*WorkerInfo, ttl time.Duration) error
|
||||||
|
@@ -100,19 +100,19 @@ func TestRetryKey(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDeadKey(t *testing.T) {
|
func TestArchivedKey(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
qname string
|
qname string
|
||||||
want string
|
want string
|
||||||
}{
|
}{
|
||||||
{"default", "asynq:{default}:dead"},
|
{"default", "asynq:{default}:archived"},
|
||||||
{"custom", "asynq:{custom}:dead"},
|
{"custom", "asynq:{custom}:archived"},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
got := DeadKey(tc.qname)
|
got := ArchivedKey(tc.qname)
|
||||||
if got != tc.want {
|
if got != tc.want {
|
||||||
t.Errorf("DeadKey(%q) = %q, want %q", tc.qname, got, tc.want)
|
t.Errorf("ArchivedKey(%q) = %q, want %q", tc.qname, got, tc.want)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -212,6 +212,41 @@ func TestWorkersKey(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestSchedulerEntriesKey(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
schedulerID string
|
||||||
|
want string
|
||||||
|
}{
|
||||||
|
{"localhost:9876:scheduler123", "asynq:schedulers:{localhost:9876:scheduler123}"},
|
||||||
|
{"127.0.0.1:1234:scheduler987", "asynq:schedulers:{127.0.0.1:1234:scheduler987}"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
got := SchedulerEntriesKey(tc.schedulerID)
|
||||||
|
if got != tc.want {
|
||||||
|
t.Errorf("SchedulerEntriesKey(%q) = %q, want %q", tc.schedulerID, got, tc.want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSchedulerHistoryKey(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
entryID string
|
||||||
|
want string
|
||||||
|
}{
|
||||||
|
{"entry876", "asynq:scheduler_history:entry876"},
|
||||||
|
{"entry345", "asynq:scheduler_history:entry345"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
got := SchedulerHistoryKey(tc.entryID)
|
||||||
|
if got != tc.want {
|
||||||
|
t.Errorf("SchedulerHistoryKey(%q) = %q, want %q",
|
||||||
|
tc.entryID, got, tc.want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestUniqueKey(t *testing.T) {
|
func TestUniqueKey(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
desc string
|
desc string
|
||||||
@@ -328,14 +363,14 @@ func TestStatusConcurrentAccess(t *testing.T) {
|
|||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
status.Get()
|
status.Get()
|
||||||
status.String()
|
_ = status.String()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
status.Set(StatusStopped)
|
status.Set(StatusStopped)
|
||||||
status.String()
|
_ = status.String()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
@@ -25,6 +25,9 @@ func (r *RDB) AllQueues() ([]string, error) {
|
|||||||
type Stats struct {
|
type Stats struct {
|
||||||
// Name of the queue (e.g. "default", "critical").
|
// Name of the queue (e.g. "default", "critical").
|
||||||
Queue string
|
Queue string
|
||||||
|
// MemoryUsage is the total number of bytes the queue and its tasks require
|
||||||
|
// to be stored in redis.
|
||||||
|
MemoryUsage int64
|
||||||
// Paused indicates whether the queue is paused.
|
// Paused indicates whether the queue is paused.
|
||||||
// If true, tasks in the queue should not be processed.
|
// If true, tasks in the queue should not be processed.
|
||||||
Paused bool
|
Paused bool
|
||||||
@@ -35,7 +38,7 @@ type Stats struct {
|
|||||||
Active int
|
Active int
|
||||||
Scheduled int
|
Scheduled int
|
||||||
Retry int
|
Retry int
|
||||||
Dead int
|
Archived int
|
||||||
// Total number of tasks processed during the current date.
|
// Total number of tasks processed during the current date.
|
||||||
// The number includes both succeeded and failed tasks.
|
// The number includes both succeeded and failed tasks.
|
||||||
Processed int
|
Processed int
|
||||||
@@ -62,7 +65,7 @@ type DailyStats struct {
|
|||||||
// KEYS[2] -> asynq:<qname>:active
|
// KEYS[2] -> asynq:<qname>:active
|
||||||
// KEYS[3] -> asynq:<qname>:scheduled
|
// KEYS[3] -> asynq:<qname>:scheduled
|
||||||
// KEYS[4] -> asynq:<qname>:retry
|
// KEYS[4] -> asynq:<qname>:retry
|
||||||
// KEYS[5] -> asynq:<qname>:dead
|
// KEYS[5] -> asynq:<qname>:archived
|
||||||
// KEYS[6] -> asynq:<qname>:processed:<yyyy-mm-dd>
|
// KEYS[6] -> asynq:<qname>:processed:<yyyy-mm-dd>
|
||||||
// KEYS[7] -> asynq:<qname>:failed:<yyyy-mm-dd>
|
// KEYS[7] -> asynq:<qname>:failed:<yyyy-mm-dd>
|
||||||
// KEYS[8] -> asynq:<qname>:paused
|
// KEYS[8] -> asynq:<qname>:paused
|
||||||
@@ -111,7 +114,7 @@ func (r *RDB) CurrentStats(qname string) (*Stats, error) {
|
|||||||
base.ActiveKey(qname),
|
base.ActiveKey(qname),
|
||||||
base.ScheduledKey(qname),
|
base.ScheduledKey(qname),
|
||||||
base.RetryKey(qname),
|
base.RetryKey(qname),
|
||||||
base.DeadKey(qname),
|
base.ArchivedKey(qname),
|
||||||
base.ProcessedKey(qname, now),
|
base.ProcessedKey(qname, now),
|
||||||
base.FailedKey(qname, now),
|
base.FailedKey(qname, now),
|
||||||
base.PausedKey(qname),
|
base.PausedKey(qname),
|
||||||
@@ -144,8 +147,8 @@ func (r *RDB) CurrentStats(qname string) (*Stats, error) {
|
|||||||
case base.RetryKey(qname):
|
case base.RetryKey(qname):
|
||||||
stats.Retry = val
|
stats.Retry = val
|
||||||
size += val
|
size += val
|
||||||
case base.DeadKey(qname):
|
case base.ArchivedKey(qname):
|
||||||
stats.Dead = val
|
stats.Archived = val
|
||||||
size += val
|
size += val
|
||||||
case base.ProcessedKey(qname, now):
|
case base.ProcessedKey(qname, now):
|
||||||
stats.Processed = val
|
stats.Processed = val
|
||||||
@@ -160,9 +163,30 @@ func (r *RDB) CurrentStats(qname string) (*Stats, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
stats.Size = size
|
stats.Size = size
|
||||||
|
memusg, err := r.memoryUsage(qname)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
stats.MemoryUsage = memusg
|
||||||
return stats, nil
|
return stats, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (r *RDB) memoryUsage(qname string) (int64, error) {
|
||||||
|
keys, err := r.client.Keys(fmt.Sprintf("asynq:{%s}*", qname)).Result()
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
var usg int64
|
||||||
|
for _, k := range keys {
|
||||||
|
n, err := r.client.MemoryUsage(k).Result()
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
usg += n
|
||||||
|
}
|
||||||
|
return usg, nil
|
||||||
|
}
|
||||||
|
|
||||||
var historicalStatsCmd = redis.NewScript(`
|
var historicalStatsCmd = redis.NewScript(`
|
||||||
local res = {}
|
local res = {}
|
||||||
for _, key in ipairs(KEYS) do
|
for _, key in ipairs(KEYS) do
|
||||||
@@ -328,12 +352,12 @@ func (r *RDB) ListRetry(qname string, pgn Pagination) ([]base.Z, error) {
|
|||||||
return r.listZSetEntries(base.RetryKey(qname), pgn)
|
return r.listZSetEntries(base.RetryKey(qname), pgn)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListDead returns all tasks from the given queue that have exhausted its retry limit.
|
// ListArchived returns all tasks from the given queue that have exhausted its retry limit.
|
||||||
func (r *RDB) ListDead(qname string, pgn Pagination) ([]base.Z, error) {
|
func (r *RDB) ListArchived(qname string, pgn Pagination) ([]base.Z, error) {
|
||||||
if !r.client.SIsMember(base.AllQueues, qname).Val() {
|
if !r.client.SIsMember(base.AllQueues, qname).Val() {
|
||||||
return nil, fmt.Errorf("queue %q does not exist", qname)
|
return nil, fmt.Errorf("queue %q does not exist", qname)
|
||||||
}
|
}
|
||||||
return r.listZSetEntries(base.DeadKey(qname), pgn)
|
return r.listZSetEntries(base.ArchivedKey(qname), pgn)
|
||||||
}
|
}
|
||||||
|
|
||||||
// listZSetEntries returns a list of message and score pairs in Redis sorted-set
|
// listZSetEntries returns a list of message and score pairs in Redis sorted-set
|
||||||
@@ -353,16 +377,16 @@ func (r *RDB) listZSetEntries(key string, pgn Pagination) ([]base.Z, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
continue // bad data, ignore and continue
|
continue // bad data, ignore and continue
|
||||||
}
|
}
|
||||||
res = append(res, base.Z{msg, int64(z.Score)})
|
res = append(res, base.Z{Message: msg, Score: int64(z.Score)})
|
||||||
}
|
}
|
||||||
return res, nil
|
return res, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// RunDeadTask finds a dead task that matches the given id and score from
|
// RunArchivedTask finds an archived task that matches the given id and score from
|
||||||
// the given queue and enqueues it for processing.
|
// the given queue and enqueues it for processing.
|
||||||
// If a task that matches the id and score does not exist, it returns ErrTaskNotFound.
|
// If a task that matches the id and score does not exist, it returns ErrTaskNotFound.
|
||||||
func (r *RDB) RunDeadTask(qname string, id uuid.UUID, score int64) error {
|
func (r *RDB) RunArchivedTask(qname string, id uuid.UUID, score int64) error {
|
||||||
n, err := r.removeAndRun(base.DeadKey(qname), base.QueueKey(qname), id.String(), float64(score))
|
n, err := r.removeAndRun(base.ArchivedKey(qname), base.QueueKey(qname), id.String(), float64(score))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -412,10 +436,10 @@ func (r *RDB) RunAllRetryTasks(qname string) (int64, error) {
|
|||||||
return r.removeAndRunAll(base.RetryKey(qname), base.QueueKey(qname))
|
return r.removeAndRunAll(base.RetryKey(qname), base.QueueKey(qname))
|
||||||
}
|
}
|
||||||
|
|
||||||
// RunAllDeadTasks enqueues all tasks from dead queue
|
// RunAllArchivedTasks enqueues all archived tasks from the given queue
|
||||||
// and returns the number of tasks enqueued.
|
// and returns the number of tasks enqueued.
|
||||||
func (r *RDB) RunAllDeadTasks(qname string) (int64, error) {
|
func (r *RDB) RunAllArchivedTasks(qname string) (int64, error) {
|
||||||
return r.removeAndRunAll(base.DeadKey(qname), base.QueueKey(qname))
|
return r.removeAndRunAll(base.ArchivedKey(qname), base.QueueKey(qname))
|
||||||
}
|
}
|
||||||
|
|
||||||
var removeAndRunCmd = redis.NewScript(`
|
var removeAndRunCmd = redis.NewScript(`
|
||||||
@@ -462,10 +486,10 @@ func (r *RDB) removeAndRunAll(zset, qkey string) (int64, error) {
|
|||||||
return n, nil
|
return n, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// KillRetryTask finds a retry task that matches the given id and score from the given queue
|
// ArchiveRetryTask finds a retry task that matches the given id and score from the given queue
|
||||||
// and kills it. If a task that maches the id and score does not exist, it returns ErrTaskNotFound.
|
// and archives it. If a task that maches the id and score does not exist, it returns ErrTaskNotFound.
|
||||||
func (r *RDB) KillRetryTask(qname string, id uuid.UUID, score int64) error {
|
func (r *RDB) ArchiveRetryTask(qname string, id uuid.UUID, score int64) error {
|
||||||
n, err := r.removeAndKill(base.RetryKey(qname), base.DeadKey(qname), id.String(), float64(score))
|
n, err := r.removeAndArchive(base.RetryKey(qname), base.ArchivedKey(qname), id.String(), float64(score))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -475,10 +499,10 @@ func (r *RDB) KillRetryTask(qname string, id uuid.UUID, score int64) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// KillScheduledTask finds a scheduled task that matches the given id and score from the given queue
|
// ArchiveScheduledTask finds a scheduled task that matches the given id and score from the given queue
|
||||||
// and kills it. If a task that maches the id and score does not exist, it returns ErrTaskNotFound.
|
// and archives it. If a task that maches the id and score does not exist, it returns ErrTaskNotFound.
|
||||||
func (r *RDB) KillScheduledTask(qname string, id uuid.UUID, score int64) error {
|
func (r *RDB) ArchiveScheduledTask(qname string, id uuid.UUID, score int64) error {
|
||||||
n, err := r.removeAndKill(base.ScheduledKey(qname), base.DeadKey(qname), id.String(), float64(score))
|
n, err := r.removeAndArchive(base.ScheduledKey(qname), base.ArchivedKey(qname), id.String(), float64(score))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -488,26 +512,119 @@ func (r *RDB) KillScheduledTask(qname string, id uuid.UUID, score int64) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// KillAllRetryTasks kills all retry tasks from the given queue and
|
// KEYS[1] -> asynq:{<qname>}
|
||||||
// returns the number of tasks that were moved.
|
// KEYS[2] -> asynq:{<qname>}:archived
|
||||||
func (r *RDB) KillAllRetryTasks(qname string) (int64, error) {
|
// ARGV[1] -> task message to archive
|
||||||
return r.removeAndKillAll(base.RetryKey(qname), base.DeadKey(qname))
|
// ARGV[2] -> current timestamp
|
||||||
|
// ARGV[3] -> cutoff timestamp (e.g., 90 days ago)
|
||||||
|
// ARGV[4] -> max number of tasks in archive (e.g., 100)
|
||||||
|
var archivePendingCmd = redis.NewScript(`
|
||||||
|
local x = redis.call("LREM", KEYS[1], 1, ARGV[1])
|
||||||
|
if x == 0 then
|
||||||
|
return 0
|
||||||
|
end
|
||||||
|
redis.call("ZADD", KEYS[2], ARGV[2], ARGV[1])
|
||||||
|
redis.call("ZREMRANGEBYSCORE", KEYS[2], "-inf", ARGV[3])
|
||||||
|
redis.call("ZREMRANGEBYRANK", KEYS[2], 0, -ARGV[4])
|
||||||
|
return 1
|
||||||
|
`)
|
||||||
|
|
||||||
|
func (r *RDB) archivePending(qname, msg string) (int64, error) {
|
||||||
|
keys := []string{base.QueueKey(qname), base.ArchivedKey(qname)}
|
||||||
|
now := time.Now()
|
||||||
|
limit := now.AddDate(0, 0, -archivedExpirationInDays).Unix() // 90 days ago
|
||||||
|
args := []interface{}{msg, now.Unix(), limit, maxArchiveSize}
|
||||||
|
res, err := archivePendingCmd.Run(r.client, keys, args...).Result()
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
n, ok := res.(int64)
|
||||||
|
if !ok {
|
||||||
|
return 0, fmt.Errorf("could not cast %v to int64", res)
|
||||||
|
}
|
||||||
|
return n, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// KillAllScheduledTasks kills all scheduled tasks from the given queue and
|
// ArchivePendingTask finds a pending task that matches the given id from the given queue
|
||||||
|
// and archives it. If a task that maches the id does not exist, it returns ErrTaskNotFound.
|
||||||
|
func (r *RDB) ArchivePendingTask(qname string, id uuid.UUID) error {
|
||||||
|
qkey := base.QueueKey(qname)
|
||||||
|
data, err := r.client.LRange(qkey, 0, -1).Result()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, s := range data {
|
||||||
|
msg, err := base.DecodeMessage(s)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if msg.ID == id {
|
||||||
|
n, err := r.archivePending(qname, s)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if n == 0 {
|
||||||
|
return ErrTaskNotFound
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ErrTaskNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
// ArchiveAllRetryTasks archives all retry tasks from the given queue and
|
||||||
// returns the number of tasks that were moved.
|
// returns the number of tasks that were moved.
|
||||||
func (r *RDB) KillAllScheduledTasks(qname string) (int64, error) {
|
func (r *RDB) ArchiveAllRetryTasks(qname string) (int64, error) {
|
||||||
return r.removeAndKillAll(base.ScheduledKey(qname), base.DeadKey(qname))
|
return r.removeAndArchiveAll(base.RetryKey(qname), base.ArchivedKey(qname))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ArchiveAllScheduledTasks archives all scheduled tasks from the given queue and
|
||||||
|
// returns the number of tasks that were moved.
|
||||||
|
func (r *RDB) ArchiveAllScheduledTasks(qname string) (int64, error) {
|
||||||
|
return r.removeAndArchiveAll(base.ScheduledKey(qname), base.ArchivedKey(qname))
|
||||||
|
}
|
||||||
|
|
||||||
|
// KEYS[1] -> asynq:{<qname>}
|
||||||
|
// KEYS[2] -> asynq:{<qname>}:archived
|
||||||
|
// ARGV[1] -> current timestamp
|
||||||
|
// ARGV[2] -> cutoff timestamp (e.g., 90 days ago)
|
||||||
|
// ARGV[3] -> max number of tasks in archive (e.g., 100)
|
||||||
|
var archiveAllPendingCmd = redis.NewScript(`
|
||||||
|
local msgs = redis.call("LRANGE", KEYS[1], 0, -1)
|
||||||
|
for _, msg in ipairs(msgs) do
|
||||||
|
redis.call("ZADD", KEYS[2], ARGV[1], msg)
|
||||||
|
redis.call("ZREMRANGEBYSCORE", KEYS[2], "-inf", ARGV[2])
|
||||||
|
redis.call("ZREMRANGEBYRANK", KEYS[2], 0, -ARGV[3])
|
||||||
|
end
|
||||||
|
redis.call("DEL", KEYS[1])
|
||||||
|
return table.getn(msgs)`)
|
||||||
|
|
||||||
|
// ArchiveAllPendingTasks archives all pending tasks from the given queue and
|
||||||
|
// returns the number of tasks that were moved.
|
||||||
|
func (r *RDB) ArchiveAllPendingTasks(qname string) (int64, error) {
|
||||||
|
keys := []string{base.QueueKey(qname), base.ArchivedKey(qname)}
|
||||||
|
now := time.Now()
|
||||||
|
limit := now.AddDate(0, 0, -archivedExpirationInDays).Unix() // 90 days ago
|
||||||
|
args := []interface{}{now.Unix(), limit, maxArchiveSize}
|
||||||
|
res, err := archiveAllPendingCmd.Run(r.client, keys, args...).Result()
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
n, ok := res.(int64)
|
||||||
|
if !ok {
|
||||||
|
return 0, fmt.Errorf("could not cast %v to int64", res)
|
||||||
|
}
|
||||||
|
return n, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// KEYS[1] -> ZSET to move task from (e.g., retry queue)
|
// KEYS[1] -> ZSET to move task from (e.g., retry queue)
|
||||||
// KEYS[2] -> asynq:{<qname>}:dead
|
// KEYS[2] -> asynq:{<qname>}:archived
|
||||||
// ARGV[1] -> score of the task to kill
|
// ARGV[1] -> score of the task to archive
|
||||||
// ARGV[2] -> id of the task to kill
|
// ARGV[2] -> id of the task to archive
|
||||||
// ARGV[3] -> current timestamp
|
// ARGV[3] -> current timestamp
|
||||||
// ARGV[4] -> cutoff timestamp (e.g., 90 days ago)
|
// ARGV[4] -> cutoff timestamp (e.g., 90 days ago)
|
||||||
// ARGV[5] -> max number of tasks in dead queue (e.g., 100)
|
// ARGV[5] -> max number of tasks in archived state (e.g., 100)
|
||||||
var removeAndKillCmd = redis.NewScript(`
|
var removeAndArchiveCmd = redis.NewScript(`
|
||||||
local msgs = redis.call("ZRANGEBYSCORE", KEYS[1], ARGV[1], ARGV[1])
|
local msgs = redis.call("ZRANGEBYSCORE", KEYS[1], ARGV[1], ARGV[1])
|
||||||
for _, msg in ipairs(msgs) do
|
for _, msg in ipairs(msgs) do
|
||||||
local decoded = cjson.decode(msg)
|
local decoded = cjson.decode(msg)
|
||||||
@@ -521,12 +638,12 @@ for _, msg in ipairs(msgs) do
|
|||||||
end
|
end
|
||||||
return 0`)
|
return 0`)
|
||||||
|
|
||||||
func (r *RDB) removeAndKill(src, dst, id string, score float64) (int64, error) {
|
func (r *RDB) removeAndArchive(src, dst, id string, score float64) (int64, error) {
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
limit := now.AddDate(0, 0, -deadExpirationInDays).Unix() // 90 days ago
|
limit := now.AddDate(0, 0, -archivedExpirationInDays).Unix() // 90 days ago
|
||||||
res, err := removeAndKillCmd.Run(r.client,
|
res, err := removeAndArchiveCmd.Run(r.client,
|
||||||
[]string{src, dst},
|
[]string{src, dst},
|
||||||
score, id, now.Unix(), limit, maxDeadTasks).Result()
|
score, id, now.Unix(), limit, maxArchiveSize).Result()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
@@ -538,11 +655,11 @@ func (r *RDB) removeAndKill(src, dst, id string, score float64) (int64, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// KEYS[1] -> ZSET to move task from (e.g., retry queue)
|
// KEYS[1] -> ZSET to move task from (e.g., retry queue)
|
||||||
// KEYS[2] -> asynq:{<qname>}:dead
|
// KEYS[2] -> asynq:{<qname>}:archived
|
||||||
// ARGV[1] -> current timestamp
|
// ARGV[1] -> current timestamp
|
||||||
// ARGV[2] -> cutoff timestamp (e.g., 90 days ago)
|
// ARGV[2] -> cutoff timestamp (e.g., 90 days ago)
|
||||||
// ARGV[3] -> max number of tasks in dead queue (e.g., 100)
|
// ARGV[3] -> max number of tasks in archive (e.g., 100)
|
||||||
var removeAndKillAllCmd = redis.NewScript(`
|
var removeAndArchiveAllCmd = redis.NewScript(`
|
||||||
local msgs = redis.call("ZRANGE", KEYS[1], 0, -1)
|
local msgs = redis.call("ZRANGE", KEYS[1], 0, -1)
|
||||||
for _, msg in ipairs(msgs) do
|
for _, msg in ipairs(msgs) do
|
||||||
redis.call("ZADD", KEYS[2], ARGV[1], msg)
|
redis.call("ZADD", KEYS[2], ARGV[1], msg)
|
||||||
@@ -552,11 +669,11 @@ for _, msg in ipairs(msgs) do
|
|||||||
end
|
end
|
||||||
return table.getn(msgs)`)
|
return table.getn(msgs)`)
|
||||||
|
|
||||||
func (r *RDB) removeAndKillAll(src, dst string) (int64, error) {
|
func (r *RDB) removeAndArchiveAll(src, dst string) (int64, error) {
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
limit := now.AddDate(0, 0, -deadExpirationInDays).Unix() // 90 days ago
|
limit := now.AddDate(0, 0, -archivedExpirationInDays).Unix() // 90 days ago
|
||||||
res, err := removeAndKillAllCmd.Run(r.client, []string{src, dst},
|
res, err := removeAndArchiveAllCmd.Run(r.client, []string{src, dst},
|
||||||
now.Unix(), limit, maxDeadTasks).Result()
|
now.Unix(), limit, maxArchiveSize).Result()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
@@ -567,10 +684,10 @@ func (r *RDB) removeAndKillAll(src, dst string) (int64, error) {
|
|||||||
return n, nil
|
return n, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteDeadTask deletes a dead task that matches the given id and score from the given queue.
|
// DeleteArchivedTask deletes an archived task that matches the given id and score from the given queue.
|
||||||
// If a task that matches the id and score does not exist, it returns ErrTaskNotFound.
|
// If a task that matches the id and score does not exist, it returns ErrTaskNotFound.
|
||||||
func (r *RDB) DeleteDeadTask(qname string, id uuid.UUID, score int64) error {
|
func (r *RDB) DeleteArchivedTask(qname string, id uuid.UUID, score int64) error {
|
||||||
return r.deleteTask(base.DeadKey(qname), id.String(), float64(score))
|
return r.deleteTask(base.ArchivedKey(qname), id.String(), float64(score))
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteRetryTask deletes a retry task that matches the given id and score from the given queue.
|
// DeleteRetryTask deletes a retry task that matches the given id and score from the given queue.
|
||||||
@@ -585,6 +702,33 @@ func (r *RDB) DeleteScheduledTask(qname string, id uuid.UUID, score int64) error
|
|||||||
return r.deleteTask(base.ScheduledKey(qname), id.String(), float64(score))
|
return r.deleteTask(base.ScheduledKey(qname), id.String(), float64(score))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeletePendingTask deletes a pending tasks that matches the given id from the given queue.
|
||||||
|
// If a task that matches the id does not exist, it returns ErrTaskNotFound.
|
||||||
|
func (r *RDB) DeletePendingTask(qname string, id uuid.UUID) error {
|
||||||
|
qkey := base.QueueKey(qname)
|
||||||
|
data, err := r.client.LRange(qkey, 0, -1).Result()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, s := range data {
|
||||||
|
msg, err := base.DecodeMessage(s)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if msg.ID == id {
|
||||||
|
n, err := r.client.LRem(qkey, 1, s).Result()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if n == 0 {
|
||||||
|
return ErrTaskNotFound
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ErrTaskNotFound
|
||||||
|
}
|
||||||
|
|
||||||
var deleteTaskCmd = redis.NewScript(`
|
var deleteTaskCmd = redis.NewScript(`
|
||||||
local msgs = redis.call("ZRANGEBYSCORE", KEYS[1], ARGV[1], ARGV[1])
|
local msgs = redis.call("ZRANGEBYSCORE", KEYS[1], ARGV[1], ARGV[1])
|
||||||
for _, msg in ipairs(msgs) do
|
for _, msg in ipairs(msgs) do
|
||||||
@@ -617,10 +761,10 @@ local n = redis.call("ZCARD", KEYS[1])
|
|||||||
redis.call("DEL", KEYS[1])
|
redis.call("DEL", KEYS[1])
|
||||||
return n`)
|
return n`)
|
||||||
|
|
||||||
// DeleteAllDeadTasks deletes all dead tasks from the given queue
|
// DeleteAllArchivedTasks deletes all archived tasks from the given queue
|
||||||
// and returns the number of tasks deleted.
|
// and returns the number of tasks deleted.
|
||||||
func (r *RDB) DeleteAllDeadTasks(qname string) (int64, error) {
|
func (r *RDB) DeleteAllArchivedTasks(qname string) (int64, error) {
|
||||||
return r.deleteAll(base.DeadKey(qname))
|
return r.deleteAll(base.ArchivedKey(qname))
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteAllRetryTasks deletes all retry tasks from the given queue
|
// DeleteAllRetryTasks deletes all retry tasks from the given queue
|
||||||
@@ -647,6 +791,26 @@ func (r *RDB) deleteAll(key string) (int64, error) {
|
|||||||
return n, nil
|
return n, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// KEYS[1] -> asynq:{<qname>}
|
||||||
|
var deleteAllPendingCmd = redis.NewScript(`
|
||||||
|
local n = redis.call("LLEN", KEYS[1])
|
||||||
|
redis.call("DEL", KEYS[1])
|
||||||
|
return n`)
|
||||||
|
|
||||||
|
// DeleteAllPendingTasks deletes all pending tasks from the given queue
|
||||||
|
// and returns the number of tasks deleted.
|
||||||
|
func (r *RDB) DeleteAllPendingTasks(qname string) (int64, error) {
|
||||||
|
res, err := deleteAllPendingCmd.Run(r.client, []string{base.QueueKey(qname)}).Result()
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
n, ok := res.(int64)
|
||||||
|
if !ok {
|
||||||
|
return 0, fmt.Errorf("could not cast %v to int64", res)
|
||||||
|
}
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
|
||||||
// ErrQueueNotFound indicates specified queue does not exist.
|
// ErrQueueNotFound indicates specified queue does not exist.
|
||||||
type ErrQueueNotFound struct {
|
type ErrQueueNotFound struct {
|
||||||
qname string
|
qname string
|
||||||
@@ -670,7 +834,7 @@ func (e *ErrQueueNotEmpty) Error() string {
|
|||||||
// KEYS[2] -> asynq:{<qname>}:active
|
// KEYS[2] -> asynq:{<qname>}:active
|
||||||
// KEYS[3] -> asynq:{<qname>}:scheduled
|
// KEYS[3] -> asynq:{<qname>}:scheduled
|
||||||
// KEYS[4] -> asynq:{<qname>}:retry
|
// KEYS[4] -> asynq:{<qname>}:retry
|
||||||
// KEYS[5] -> asynq:{<qname>}:dead
|
// KEYS[5] -> asynq:{<qname>}:archived
|
||||||
// KEYS[6] -> asynq:{<qname>}:deadlines
|
// KEYS[6] -> asynq:{<qname>}:deadlines
|
||||||
var removeQueueForceCmd = redis.NewScript(`
|
var removeQueueForceCmd = redis.NewScript(`
|
||||||
local active = redis.call("LLEN", KEYS[2])
|
local active = redis.call("LLEN", KEYS[2])
|
||||||
@@ -690,15 +854,15 @@ return redis.status_reply("OK")`)
|
|||||||
// KEYS[2] -> asynq:{<qname>}:active
|
// KEYS[2] -> asynq:{<qname>}:active
|
||||||
// KEYS[3] -> asynq:{<qname>}:scheduled
|
// KEYS[3] -> asynq:{<qname>}:scheduled
|
||||||
// KEYS[4] -> asynq:{<qname>}:retry
|
// KEYS[4] -> asynq:{<qname>}:retry
|
||||||
// KEYS[5] -> asynq:{<qname>}:dead
|
// KEYS[5] -> asynq:{<qname>}:archived
|
||||||
// KEYS[6] -> asynq:{<qname>}:deadlines
|
// KEYS[6] -> asynq:{<qname>}:deadlines
|
||||||
var removeQueueCmd = redis.NewScript(`
|
var removeQueueCmd = redis.NewScript(`
|
||||||
local pending = redis.call("LLEN", KEYS[1])
|
local pending = redis.call("LLEN", KEYS[1])
|
||||||
local active = redis.call("LLEN", KEYS[2])
|
local active = redis.call("LLEN", KEYS[2])
|
||||||
local scheduled = redis.call("SCARD", KEYS[3])
|
local scheduled = redis.call("SCARD", KEYS[3])
|
||||||
local retry = redis.call("SCARD", KEYS[4])
|
local retry = redis.call("SCARD", KEYS[4])
|
||||||
local dead = redis.call("SCARD", KEYS[5])
|
local archived = redis.call("SCARD", KEYS[5])
|
||||||
local total = pending + active + scheduled + retry + dead
|
local total = pending + active + scheduled + retry + archived
|
||||||
if total > 0 then
|
if total > 0 then
|
||||||
return redis.error_reply("QUEUE NOT EMPTY")
|
return redis.error_reply("QUEUE NOT EMPTY")
|
||||||
end
|
end
|
||||||
@@ -735,17 +899,15 @@ func (r *RDB) RemoveQueue(qname string, force bool) error {
|
|||||||
base.ActiveKey(qname),
|
base.ActiveKey(qname),
|
||||||
base.ScheduledKey(qname),
|
base.ScheduledKey(qname),
|
||||||
base.RetryKey(qname),
|
base.RetryKey(qname),
|
||||||
base.DeadKey(qname),
|
base.ArchivedKey(qname),
|
||||||
base.DeadlinesKey(qname),
|
base.DeadlinesKey(qname),
|
||||||
}
|
}
|
||||||
if err := script.Run(r.client, keys).Err(); err != nil {
|
if err := script.Run(r.client, keys).Err(); err != nil {
|
||||||
if err.Error() == "QUEUE NOT EMPTY" {
|
if err.Error() == "QUEUE NOT EMPTY" {
|
||||||
return &ErrQueueNotEmpty{qname}
|
return &ErrQueueNotEmpty{qname}
|
||||||
} else {
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
return r.client.SRem(base.AllQueues, qname).Err()
|
return r.client.SRem(base.AllQueues, qname).Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -758,7 +920,7 @@ return keys`)
|
|||||||
|
|
||||||
// ListServers returns the list of server info.
|
// ListServers returns the list of server info.
|
||||||
func (r *RDB) ListServers() ([]*base.ServerInfo, error) {
|
func (r *RDB) ListServers() ([]*base.ServerInfo, error) {
|
||||||
now := time.Now().UTC()
|
now := time.Now()
|
||||||
res, err := listServerKeysCmd.Run(r.client, []string{base.AllServers}, now.Unix()).Result()
|
res, err := listServerKeysCmd.Run(r.client, []string{base.AllServers}, now.Unix()).Result()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -791,7 +953,7 @@ return keys`)
|
|||||||
|
|
||||||
// ListWorkers returns the list of worker stats.
|
// ListWorkers returns the list of worker stats.
|
||||||
func (r *RDB) ListWorkers() ([]*base.WorkerInfo, error) {
|
func (r *RDB) ListWorkers() ([]*base.WorkerInfo, error) {
|
||||||
now := time.Now().UTC()
|
now := time.Now()
|
||||||
res, err := listWorkerKeysCmd.Run(r.client, []string{base.AllWorkers}, now.Unix()).Result()
|
res, err := listWorkerKeysCmd.Run(r.client, []string{base.AllWorkers}, now.Unix()).Result()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -812,12 +974,68 @@ func (r *RDB) ListWorkers() ([]*base.WorkerInfo, error) {
|
|||||||
continue // skip bad data
|
continue // skip bad data
|
||||||
}
|
}
|
||||||
workers = append(workers, &w)
|
workers = append(workers, &w)
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return workers, nil
|
return workers, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Note: Script also removes stale keys.
|
||||||
|
var listSchedulerKeysCmd = redis.NewScript(`
|
||||||
|
local now = tonumber(ARGV[1])
|
||||||
|
local keys = redis.call("ZRANGEBYSCORE", KEYS[1], now, "+inf")
|
||||||
|
redis.call("ZREMRANGEBYSCORE", KEYS[1], "-inf", now-1)
|
||||||
|
return keys`)
|
||||||
|
|
||||||
|
// ListSchedulerEntries returns the list of scheduler entries.
|
||||||
|
func (r *RDB) ListSchedulerEntries() ([]*base.SchedulerEntry, error) {
|
||||||
|
now := time.Now()
|
||||||
|
res, err := listSchedulerKeysCmd.Run(r.client, []string{base.AllSchedulers}, now.Unix()).Result()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
keys, err := cast.ToStringSliceE(res)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var entries []*base.SchedulerEntry
|
||||||
|
for _, key := range keys {
|
||||||
|
data, err := r.client.LRange(key, 0, -1).Result()
|
||||||
|
if err != nil {
|
||||||
|
continue // skip bad data
|
||||||
|
}
|
||||||
|
for _, s := range data {
|
||||||
|
var e base.SchedulerEntry
|
||||||
|
if err := json.Unmarshal([]byte(s), &e); err != nil {
|
||||||
|
continue // skip bad data
|
||||||
|
}
|
||||||
|
entries = append(entries, &e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return entries, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListSchedulerEnqueueEvents returns the list of scheduler enqueue events.
|
||||||
|
func (r *RDB) ListSchedulerEnqueueEvents(entryID string, pgn Pagination) ([]*base.SchedulerEnqueueEvent, error) {
|
||||||
|
key := base.SchedulerHistoryKey(entryID)
|
||||||
|
zs, err := r.client.ZRevRangeWithScores(key, pgn.start(), pgn.stop()).Result()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var events []*base.SchedulerEnqueueEvent
|
||||||
|
for _, z := range zs {
|
||||||
|
data, err := cast.ToStringE(z.Member)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var e base.SchedulerEnqueueEvent
|
||||||
|
if err := json.Unmarshal([]byte(data), &e); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
events = append(events, &e)
|
||||||
|
}
|
||||||
|
return events, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Pause pauses processing of tasks from the given queue.
|
// Pause pauses processing of tasks from the given queue.
|
||||||
func (r *RDB) Pause(qname string) error {
|
func (r *RDB) Pause(qname string) error {
|
||||||
key := base.PausedKey(qname)
|
key := base.PausedKey(qname)
|
||||||
|
File diff suppressed because it is too large
Load Diff
@@ -381,22 +381,22 @@ func (r *RDB) Retry(msg *base.TaskMessage, processAt time.Time, errMsg string) e
|
|||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
maxDeadTasks = 10000
|
maxArchiveSize = 10000 // maximum number of tasks in archive
|
||||||
deadExpirationInDays = 90
|
archivedExpirationInDays = 90 // number of days before an archived task gets deleted permanently
|
||||||
)
|
)
|
||||||
|
|
||||||
// KEYS[1] -> asynq:{<qname>}:active
|
// KEYS[1] -> asynq:{<qname>}:active
|
||||||
// KEYS[2] -> asynq:{<qname>}:deadlines
|
// KEYS[2] -> asynq:{<qname>}:deadlines
|
||||||
// KEYS[3] -> asynq:{<qname>}:dead
|
// KEYS[3] -> asynq:{<qname>}:archived
|
||||||
// KEYS[4] -> asynq:{<qname>}:processed:<yyyy-mm-dd>
|
// KEYS[4] -> asynq:{<qname>}:processed:<yyyy-mm-dd>
|
||||||
// KEYS[5] -> asynq:{<qname>}:failed:<yyyy-mm-dd>
|
// KEYS[5] -> asynq:{<qname>}:failed:<yyyy-mm-dd>
|
||||||
// ARGV[1] -> base.TaskMessage value to remove from base.ActiveQueue queue
|
// ARGV[1] -> base.TaskMessage value to remove
|
||||||
// ARGV[2] -> base.TaskMessage value to add to Dead queue
|
// ARGV[2] -> base.TaskMessage value to add
|
||||||
// ARGV[3] -> died_at UNIX timestamp
|
// ARGV[3] -> died_at UNIX timestamp
|
||||||
// ARGV[4] -> cutoff timestamp (e.g., 90 days ago)
|
// ARGV[4] -> cutoff timestamp (e.g., 90 days ago)
|
||||||
// ARGV[5] -> max number of tasks in dead queue (e.g., 100)
|
// ARGV[5] -> max number of tasks in archive (e.g., 100)
|
||||||
// ARGV[6] -> stats expiration timestamp
|
// ARGV[6] -> stats expiration timestamp
|
||||||
var killCmd = redis.NewScript(`
|
var archiveCmd = redis.NewScript(`
|
||||||
if redis.call("LREM", KEYS[1], 0, ARGV[1]) == 0 then
|
if redis.call("LREM", KEYS[1], 0, ARGV[1]) == 0 then
|
||||||
return redis.error_reply("NOT FOUND")
|
return redis.error_reply("NOT FOUND")
|
||||||
end
|
end
|
||||||
@@ -416,10 +416,9 @@ if tonumber(m) == 1 then
|
|||||||
end
|
end
|
||||||
return redis.status_reply("OK")`)
|
return redis.status_reply("OK")`)
|
||||||
|
|
||||||
// Kill sends the task to "dead" queue from active queue, assigning
|
// Archive sends the given task to archive, attaching the error message to the task.
|
||||||
// the error message to the task.
|
// It also trims the archive by timestamp and set size.
|
||||||
// It also trims the set by timestamp and set size.
|
func (r *RDB) Archive(msg *base.TaskMessage, errMsg string) error {
|
||||||
func (r *RDB) Kill(msg *base.TaskMessage, errMsg string) error {
|
|
||||||
msgToRemove, err := base.EncodeMessage(msg)
|
msgToRemove, err := base.EncodeMessage(msg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -431,13 +430,13 @@ func (r *RDB) Kill(msg *base.TaskMessage, errMsg string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
limit := now.AddDate(0, 0, -deadExpirationInDays).Unix() // 90 days ago
|
limit := now.AddDate(0, 0, -archivedExpirationInDays).Unix() // 90 days ago
|
||||||
processedKey := base.ProcessedKey(msg.Queue, now)
|
processedKey := base.ProcessedKey(msg.Queue, now)
|
||||||
failedKey := base.FailedKey(msg.Queue, now)
|
failedKey := base.FailedKey(msg.Queue, now)
|
||||||
expireAt := now.Add(statsTTL)
|
expireAt := now.Add(statsTTL)
|
||||||
return killCmd.Run(r.client,
|
return archiveCmd.Run(r.client,
|
||||||
[]string{base.ActiveKey(msg.Queue), base.DeadlinesKey(msg.Queue), base.DeadKey(msg.Queue), processedKey, failedKey},
|
[]string{base.ActiveKey(msg.Queue), base.DeadlinesKey(msg.Queue), base.ArchivedKey(msg.Queue), processedKey, failedKey},
|
||||||
msgToRemove, msgToAdd, now.Unix(), limit, maxDeadTasks, expireAt.Unix()).Err()
|
msgToRemove, msgToAdd, now.Unix(), limit, maxArchiveSize, expireAt.Unix()).Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
// CheckAndEnqueue checks for scheduled/retry tasks for the given queues
|
// CheckAndEnqueue checks for scheduled/retry tasks for the given queues
|
||||||
@@ -575,6 +574,45 @@ func (r *RDB) ClearServerState(host string, pid int, serverID string) error {
|
|||||||
return clearServerStateCmd.Run(r.client, []string{skey, wkey}).Err()
|
return clearServerStateCmd.Run(r.client, []string{skey, wkey}).Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// KEYS[1] -> asynq:schedulers:{<schedulerID>}
|
||||||
|
// ARGV[1] -> TTL in seconds
|
||||||
|
// ARGV[2:] -> schedler entries
|
||||||
|
var writeSchedulerEntriesCmd = redis.NewScript(`
|
||||||
|
redis.call("DEL", KEYS[1])
|
||||||
|
for i = 2, #ARGV do
|
||||||
|
redis.call("LPUSH", KEYS[1], ARGV[i])
|
||||||
|
end
|
||||||
|
redis.call("EXPIRE", KEYS[1], ARGV[1])
|
||||||
|
return redis.status_reply("OK")`)
|
||||||
|
|
||||||
|
// WriteSchedulerEntries writes scheduler entries data to redis with expiration set to the value ttl.
|
||||||
|
func (r *RDB) WriteSchedulerEntries(schedulerID string, entries []*base.SchedulerEntry, ttl time.Duration) error {
|
||||||
|
args := []interface{}{ttl.Seconds()}
|
||||||
|
for _, e := range entries {
|
||||||
|
bytes, err := json.Marshal(e)
|
||||||
|
if err != nil {
|
||||||
|
continue // skip bad data
|
||||||
|
}
|
||||||
|
args = append(args, bytes)
|
||||||
|
}
|
||||||
|
exp := time.Now().Add(ttl).UTC()
|
||||||
|
key := base.SchedulerEntriesKey(schedulerID)
|
||||||
|
err := r.client.ZAdd(base.AllSchedulers, &redis.Z{Score: float64(exp.Unix()), Member: key}).Err()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return writeSchedulerEntriesCmd.Run(r.client, []string{key}, args...).Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearSchedulerEntries deletes scheduler entries data from redis.
|
||||||
|
func (r *RDB) ClearSchedulerEntries(scheduelrID string) error {
|
||||||
|
key := base.SchedulerEntriesKey(scheduelrID)
|
||||||
|
if err := r.client.ZRem(base.AllSchedulers, key).Err(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return r.client.Del(key).Err()
|
||||||
|
}
|
||||||
|
|
||||||
// CancelationPubSub returns a pubsub for cancelation messages.
|
// CancelationPubSub returns a pubsub for cancelation messages.
|
||||||
func (r *RDB) CancelationPubSub() (*redis.PubSub, error) {
|
func (r *RDB) CancelationPubSub() (*redis.PubSub, error) {
|
||||||
pubsub := r.client.Subscribe(base.CancelChannel)
|
pubsub := r.client.Subscribe(base.CancelChannel)
|
||||||
@@ -590,3 +628,32 @@ func (r *RDB) CancelationPubSub() (*redis.PubSub, error) {
|
|||||||
func (r *RDB) PublishCancelation(id string) error {
|
func (r *RDB) PublishCancelation(id string) error {
|
||||||
return r.client.Publish(base.CancelChannel, id).Err()
|
return r.client.Publish(base.CancelChannel, id).Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// KEYS[1] -> asynq:scheduler_history:<entryID>
|
||||||
|
// ARGV[1] -> enqueued_at timestamp
|
||||||
|
// ARGV[2] -> serialized SchedulerEnqueueEvent data
|
||||||
|
// ARGV[3] -> max number of events to be persisted
|
||||||
|
var recordSchedulerEnqueueEventCmd = redis.NewScript(`
|
||||||
|
redis.call("ZREMRANGEBYRANK", KEYS[1], 0, -ARGV[3])
|
||||||
|
redis.call("ZADD", KEYS[1], ARGV[1], ARGV[2])
|
||||||
|
return redis.status_reply("OK")`)
|
||||||
|
|
||||||
|
// Maximum number of enqueue events to store per entry.
|
||||||
|
const maxEvents = 1000
|
||||||
|
|
||||||
|
// RecordSchedulerEnqueueEvent records the time when the given task was enqueued.
|
||||||
|
func (r *RDB) RecordSchedulerEnqueueEvent(entryID string, event *base.SchedulerEnqueueEvent) error {
|
||||||
|
key := base.SchedulerHistoryKey(entryID)
|
||||||
|
data, err := json.Marshal(event)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return recordSchedulerEnqueueEventCmd.Run(
|
||||||
|
r.client, []string{key}, event.EnqueuedAt.Unix(), data, maxEvents).Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearSchedulerHistory deletes the enqueue event history for the given scheduler entry.
|
||||||
|
func (r *RDB) ClearSchedulerHistory(entryID string) error {
|
||||||
|
key := base.SchedulerHistoryKey(entryID)
|
||||||
|
return r.client.Del(key).Err()
|
||||||
|
}
|
||||||
|
@@ -1008,7 +1008,7 @@ func TestRetry(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestKill(t *testing.T) {
|
func TestArchive(t *testing.T) {
|
||||||
r := setup(t)
|
r := setup(t)
|
||||||
defer r.Close()
|
defer r.Close()
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
@@ -1058,11 +1058,11 @@ func TestKill(t *testing.T) {
|
|||||||
tests := []struct {
|
tests := []struct {
|
||||||
inProgress map[string][]*base.TaskMessage
|
inProgress map[string][]*base.TaskMessage
|
||||||
deadlines map[string][]base.Z
|
deadlines map[string][]base.Z
|
||||||
dead map[string][]base.Z
|
archived map[string][]base.Z
|
||||||
target *base.TaskMessage // task to kill
|
target *base.TaskMessage // task to archive
|
||||||
wantActive map[string][]*base.TaskMessage
|
wantActive map[string][]*base.TaskMessage
|
||||||
wantDeadlines map[string][]base.Z
|
wantDeadlines map[string][]base.Z
|
||||||
wantDead map[string][]base.Z
|
wantArchived map[string][]base.Z
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
inProgress: map[string][]*base.TaskMessage{
|
inProgress: map[string][]*base.TaskMessage{
|
||||||
@@ -1074,7 +1074,7 @@ func TestKill(t *testing.T) {
|
|||||||
{Message: t2, Score: t2Deadline},
|
{Message: t2, Score: t2Deadline},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
dead: map[string][]base.Z{
|
archived: map[string][]base.Z{
|
||||||
"default": {
|
"default": {
|
||||||
{Message: t3, Score: now.Add(-time.Hour).Unix()},
|
{Message: t3, Score: now.Add(-time.Hour).Unix()},
|
||||||
},
|
},
|
||||||
@@ -1086,7 +1086,7 @@ func TestKill(t *testing.T) {
|
|||||||
wantDeadlines: map[string][]base.Z{
|
wantDeadlines: map[string][]base.Z{
|
||||||
"default": {{Message: t2, Score: t2Deadline}},
|
"default": {{Message: t2, Score: t2Deadline}},
|
||||||
},
|
},
|
||||||
wantDead: map[string][]base.Z{
|
wantArchived: map[string][]base.Z{
|
||||||
"default": {
|
"default": {
|
||||||
{Message: h.TaskMessageWithError(*t1, errMsg), Score: now.Unix()},
|
{Message: h.TaskMessageWithError(*t1, errMsg), Score: now.Unix()},
|
||||||
{Message: t3, Score: now.Add(-time.Hour).Unix()},
|
{Message: t3, Score: now.Add(-time.Hour).Unix()},
|
||||||
@@ -1104,7 +1104,7 @@ func TestKill(t *testing.T) {
|
|||||||
{Message: t3, Score: t3Deadline},
|
{Message: t3, Score: t3Deadline},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
dead: map[string][]base.Z{
|
archived: map[string][]base.Z{
|
||||||
"default": {},
|
"default": {},
|
||||||
},
|
},
|
||||||
target: t1,
|
target: t1,
|
||||||
@@ -1117,7 +1117,7 @@ func TestKill(t *testing.T) {
|
|||||||
{Message: t3, Score: t3Deadline},
|
{Message: t3, Score: t3Deadline},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
wantDead: map[string][]base.Z{
|
wantArchived: map[string][]base.Z{
|
||||||
"default": {
|
"default": {
|
||||||
{Message: h.TaskMessageWithError(*t1, errMsg), Score: now.Unix()},
|
{Message: h.TaskMessageWithError(*t1, errMsg), Score: now.Unix()},
|
||||||
},
|
},
|
||||||
@@ -1136,7 +1136,7 @@ func TestKill(t *testing.T) {
|
|||||||
{Message: t4, Score: t4Deadline},
|
{Message: t4, Score: t4Deadline},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
dead: map[string][]base.Z{
|
archived: map[string][]base.Z{
|
||||||
"default": {},
|
"default": {},
|
||||||
"custom": {},
|
"custom": {},
|
||||||
},
|
},
|
||||||
@@ -1149,7 +1149,7 @@ func TestKill(t *testing.T) {
|
|||||||
"default": {{Message: t1, Score: t1Deadline}},
|
"default": {{Message: t1, Score: t1Deadline}},
|
||||||
"custom": {},
|
"custom": {},
|
||||||
},
|
},
|
||||||
wantDead: map[string][]base.Z{
|
wantArchived: map[string][]base.Z{
|
||||||
"default": {},
|
"default": {},
|
||||||
"custom": {
|
"custom": {
|
||||||
{Message: h.TaskMessageWithError(*t4, errMsg), Score: now.Unix()},
|
{Message: h.TaskMessageWithError(*t4, errMsg), Score: now.Unix()},
|
||||||
@@ -1162,11 +1162,11 @@ func TestKill(t *testing.T) {
|
|||||||
h.FlushDB(t, r.client) // clean up db before each test case
|
h.FlushDB(t, r.client) // clean up db before each test case
|
||||||
h.SeedAllActiveQueues(t, r.client, tc.inProgress)
|
h.SeedAllActiveQueues(t, r.client, tc.inProgress)
|
||||||
h.SeedAllDeadlines(t, r.client, tc.deadlines)
|
h.SeedAllDeadlines(t, r.client, tc.deadlines)
|
||||||
h.SeedAllDeadQueues(t, r.client, tc.dead)
|
h.SeedAllArchivedQueues(t, r.client, tc.archived)
|
||||||
|
|
||||||
err := r.Kill(tc.target, errMsg)
|
err := r.Archive(tc.target, errMsg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("(*RDB).Kill(%v, %v) = %v, want nil", tc.target, errMsg, err)
|
t.Errorf("(*RDB).Archive(%v, %v) = %v, want nil", tc.target, errMsg, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1179,13 +1179,13 @@ func TestKill(t *testing.T) {
|
|||||||
for queue, want := range tc.wantDeadlines {
|
for queue, want := range tc.wantDeadlines {
|
||||||
gotDeadlines := h.GetDeadlinesEntries(t, r.client, queue)
|
gotDeadlines := h.GetDeadlinesEntries(t, r.client, queue)
|
||||||
if diff := cmp.Diff(want, gotDeadlines, h.SortZSetEntryOpt); diff != "" {
|
if diff := cmp.Diff(want, gotDeadlines, h.SortZSetEntryOpt); diff != "" {
|
||||||
t.Errorf("mismatch found in %q after calling (*RDB).Kill: (-want, +got):\n%s", base.DeadlinesKey(queue), diff)
|
t.Errorf("mismatch found in %q after calling (*RDB).Archive: (-want, +got):\n%s", base.DeadlinesKey(queue), diff)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for queue, want := range tc.wantDead {
|
for queue, want := range tc.wantArchived {
|
||||||
gotDead := h.GetDeadEntries(t, r.client, queue)
|
gotArchived := h.GetArchivedEntries(t, r.client, queue)
|
||||||
if diff := cmp.Diff(want, gotDead, h.SortZSetEntryOpt, zScoreCmpOpt); diff != "" {
|
if diff := cmp.Diff(want, gotArchived, h.SortZSetEntryOpt, zScoreCmpOpt); diff != "" {
|
||||||
t.Errorf("mismatch found in %q after calling (*RDB).Kill: (-want, +got):\n%s", base.DeadKey(queue), diff)
|
t.Errorf("mismatch found in %q after calling (*RDB).Archive: (-want, +got):\n%s", base.ArchivedKey(queue), diff)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -117,13 +117,13 @@ func (tb *TestBroker) Retry(msg *base.TaskMessage, processAt time.Time, errMsg s
|
|||||||
return tb.real.Retry(msg, processAt, errMsg)
|
return tb.real.Retry(msg, processAt, errMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tb *TestBroker) Kill(msg *base.TaskMessage, errMsg string) error {
|
func (tb *TestBroker) Archive(msg *base.TaskMessage, errMsg string) error {
|
||||||
tb.mu.Lock()
|
tb.mu.Lock()
|
||||||
defer tb.mu.Unlock()
|
defer tb.mu.Unlock()
|
||||||
if tb.sleeping {
|
if tb.sleeping {
|
||||||
return errRedisDown
|
return errRedisDown
|
||||||
}
|
}
|
||||||
return tb.real.Kill(msg, errMsg)
|
return tb.real.Archive(msg, errMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tb *TestBroker) CheckAndEnqueue(qnames ...string) error {
|
func (tb *TestBroker) CheckAndEnqueue(qnames ...string) error {
|
||||||
|
10
payload.go
10
payload.go
@@ -44,6 +44,16 @@ func toInt(v interface{}) (int, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// String returns a string representation of payload data.
|
||||||
|
func (p Payload) String() string {
|
||||||
|
return fmt.Sprint(p.data)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalJSON returns the JSON encoding of payload data.
|
||||||
|
func (p Payload) MarshalJSON() ([]byte, error) {
|
||||||
|
return json.Marshal(p.data)
|
||||||
|
}
|
||||||
|
|
||||||
// GetString returns a string value if a string type is associated with
|
// GetString returns a string value if a string type is associated with
|
||||||
// the key, otherwise reports an error.
|
// the key, otherwise reports an error.
|
||||||
func (p Payload) GetString(key string) (string, error) {
|
func (p Payload) GetString(key string) (string, error) {
|
||||||
|
@@ -6,6 +6,7 @@ package asynq
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -645,3 +646,30 @@ func TestPayloadHas(t *testing.T) {
|
|||||||
t.Errorf("Payload.Has(%q) = true, want false", "name")
|
t.Errorf("Payload.Has(%q) = true, want false", "name")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestPayloadDebuggingStrings(t *testing.T) {
|
||||||
|
data := map[string]interface{}{
|
||||||
|
"foo": 123,
|
||||||
|
"bar": "hello",
|
||||||
|
"baz": false,
|
||||||
|
}
|
||||||
|
payload := Payload{data: data}
|
||||||
|
|
||||||
|
if payload.String() != fmt.Sprint(data) {
|
||||||
|
t.Errorf("Payload.String() = %q, want %q",
|
||||||
|
payload.String(), fmt.Sprint(data))
|
||||||
|
}
|
||||||
|
|
||||||
|
got, err := payload.MarshalJSON()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
want, err := json.Marshal(data)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if diff := cmp.Diff(got, want); diff != "" {
|
||||||
|
t.Errorf("Payload.MarhsalJSON() = %s, want %s; (-want,+got)\n%s",
|
||||||
|
got, want, diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
54
processor.go
54
processor.go
@@ -6,9 +6,13 @@ package asynq
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
|
"runtime"
|
||||||
|
"runtime/debug"
|
||||||
"sort"
|
"sort"
|
||||||
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -29,7 +33,7 @@ type processor struct {
|
|||||||
// orderedQueues is set only in strict-priority mode.
|
// orderedQueues is set only in strict-priority mode.
|
||||||
orderedQueues []string
|
orderedQueues []string
|
||||||
|
|
||||||
retryDelayFunc retryDelayFunc
|
retryDelayFunc RetryDelayFunc
|
||||||
|
|
||||||
errHandler ErrorHandler
|
errHandler ErrorHandler
|
||||||
|
|
||||||
@@ -59,16 +63,14 @@ type processor struct {
|
|||||||
// cancelations is a set of cancel functions for all active tasks.
|
// cancelations is a set of cancel functions for all active tasks.
|
||||||
cancelations *base.Cancelations
|
cancelations *base.Cancelations
|
||||||
|
|
||||||
starting chan<- *base.TaskMessage
|
starting chan<- *workerInfo
|
||||||
finished chan<- *base.TaskMessage
|
finished chan<- *base.TaskMessage
|
||||||
}
|
}
|
||||||
|
|
||||||
type retryDelayFunc func(n int, err error, task *Task) time.Duration
|
|
||||||
|
|
||||||
type processorParams struct {
|
type processorParams struct {
|
||||||
logger *log.Logger
|
logger *log.Logger
|
||||||
broker base.Broker
|
broker base.Broker
|
||||||
retryDelayFunc retryDelayFunc
|
retryDelayFunc RetryDelayFunc
|
||||||
syncCh chan<- *syncRequest
|
syncCh chan<- *syncRequest
|
||||||
cancelations *base.Cancelations
|
cancelations *base.Cancelations
|
||||||
concurrency int
|
concurrency int
|
||||||
@@ -76,7 +78,7 @@ type processorParams struct {
|
|||||||
strictPriority bool
|
strictPriority bool
|
||||||
errHandler ErrorHandler
|
errHandler ErrorHandler
|
||||||
shutdownTimeout time.Duration
|
shutdownTimeout time.Duration
|
||||||
starting chan<- *base.TaskMessage
|
starting chan<- *workerInfo
|
||||||
finished chan<- *base.TaskMessage
|
finished chan<- *base.TaskMessage
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -102,6 +104,7 @@ func newProcessor(params processorParams) *processor {
|
|||||||
abort: make(chan struct{}),
|
abort: make(chan struct{}),
|
||||||
errHandler: params.errHandler,
|
errHandler: params.errHandler,
|
||||||
handler: HandlerFunc(func(ctx context.Context, t *Task) error { return fmt.Errorf("handler not set") }),
|
handler: HandlerFunc(func(ctx context.Context, t *Task) error { return fmt.Errorf("handler not set") }),
|
||||||
|
shutdownTimeout: params.shutdownTimeout,
|
||||||
starting: params.starting,
|
starting: params.starting,
|
||||||
finished: params.finished,
|
finished: params.finished,
|
||||||
}
|
}
|
||||||
@@ -177,7 +180,7 @@ func (p *processor) exec() {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
p.starting <- msg
|
p.starting <- &workerInfo{msg, time.Now(), deadline}
|
||||||
go func() {
|
go func() {
|
||||||
defer func() {
|
defer func() {
|
||||||
p.finished <- msg
|
p.finished <- msg
|
||||||
@@ -202,7 +205,7 @@ func (p *processor) exec() {
|
|||||||
|
|
||||||
resCh := make(chan error, 1)
|
resCh := make(chan error, 1)
|
||||||
go func() {
|
go func() {
|
||||||
resCh <- perform(ctx, NewTask(msg.Type, msg.Payload), p.handler)
|
resCh <- p.perform(ctx, NewTask(msg.Type, msg.Payload))
|
||||||
}()
|
}()
|
||||||
|
|
||||||
select {
|
select {
|
||||||
@@ -218,7 +221,7 @@ func (p *processor) exec() {
|
|||||||
// Note: One of three things should happen.
|
// Note: One of three things should happen.
|
||||||
// 1) Done -> Removes the message from Active
|
// 1) Done -> Removes the message from Active
|
||||||
// 2) Retry -> Removes the message from Active & Adds the message to Retry
|
// 2) Retry -> Removes the message from Active & Adds the message to Retry
|
||||||
// 3) Kill -> Removes the message from Active & Adds the message to Dead
|
// 3) Archive -> Removes the message from Active & Adds the message to archive
|
||||||
if resErr != nil {
|
if resErr != nil {
|
||||||
p.retryOrKill(ctx, msg, resErr)
|
p.retryOrKill(ctx, msg, resErr)
|
||||||
return
|
return
|
||||||
@@ -257,13 +260,17 @@ func (p *processor) markAsDone(ctx context.Context, msg *base.TaskMessage) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SkipRetry is used as a return value from Handler.ProcessTask to indicate that
|
||||||
|
// the task should not be retried and should be archived instead.
|
||||||
|
var SkipRetry = errors.New("skip retry for the task")
|
||||||
|
|
||||||
func (p *processor) retryOrKill(ctx context.Context, msg *base.TaskMessage, err error) {
|
func (p *processor) retryOrKill(ctx context.Context, msg *base.TaskMessage, err error) {
|
||||||
if p.errHandler != nil {
|
if p.errHandler != nil {
|
||||||
p.errHandler.HandleError(ctx, NewTask(msg.Type, msg.Payload), err)
|
p.errHandler.HandleError(ctx, NewTask(msg.Type, msg.Payload), err)
|
||||||
}
|
}
|
||||||
if msg.Retried >= msg.Retry {
|
if msg.Retried >= msg.Retry || errors.Is(err, SkipRetry) {
|
||||||
p.logger.Warnf("Retry exhausted for task id=%s", msg.ID)
|
p.logger.Warnf("Retry exhausted for task id=%s", msg.ID)
|
||||||
p.kill(ctx, msg, err)
|
p.archive(ctx, msg, err)
|
||||||
} else {
|
} else {
|
||||||
p.retry(ctx, msg, err)
|
p.retry(ctx, msg, err)
|
||||||
}
|
}
|
||||||
@@ -290,10 +297,10 @@ func (p *processor) retry(ctx context.Context, msg *base.TaskMessage, e error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *processor) kill(ctx context.Context, msg *base.TaskMessage, e error) {
|
func (p *processor) archive(ctx context.Context, msg *base.TaskMessage, e error) {
|
||||||
err := p.broker.Kill(msg, e.Error())
|
err := p.broker.Archive(msg, e.Error())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errMsg := fmt.Sprintf("Could not move task id=%s from %q to %q", msg.ID, base.ActiveKey(msg.Queue), base.DeadKey(msg.Queue))
|
errMsg := fmt.Sprintf("Could not move task id=%s from %q to %q", msg.ID, base.ActiveKey(msg.Queue), base.ArchivedKey(msg.Queue))
|
||||||
deadline, ok := ctx.Deadline()
|
deadline, ok := ctx.Deadline()
|
||||||
if !ok {
|
if !ok {
|
||||||
panic("asynq: internal error: missing deadline in context")
|
panic("asynq: internal error: missing deadline in context")
|
||||||
@@ -301,7 +308,7 @@ func (p *processor) kill(ctx context.Context, msg *base.TaskMessage, e error) {
|
|||||||
p.logger.Warnf("%s; Will retry syncing", errMsg)
|
p.logger.Warnf("%s; Will retry syncing", errMsg)
|
||||||
p.syncRequestCh <- &syncRequest{
|
p.syncRequestCh <- &syncRequest{
|
||||||
fn: func() error {
|
fn: func() error {
|
||||||
return p.broker.Kill(msg, e.Error())
|
return p.broker.Archive(msg, e.Error())
|
||||||
},
|
},
|
||||||
errMsg: errMsg,
|
errMsg: errMsg,
|
||||||
deadline: deadline,
|
deadline: deadline,
|
||||||
@@ -339,13 +346,26 @@ func (p *processor) queues() []string {
|
|||||||
// perform calls the handler with the given task.
|
// perform calls the handler with the given task.
|
||||||
// If the call returns without panic, it simply returns the value,
|
// If the call returns without panic, it simply returns the value,
|
||||||
// otherwise, it recovers from panic and returns an error.
|
// otherwise, it recovers from panic and returns an error.
|
||||||
func perform(ctx context.Context, task *Task, h Handler) (err error) {
|
func (p *processor) perform(ctx context.Context, task *Task) (err error) {
|
||||||
defer func() {
|
defer func() {
|
||||||
if x := recover(); x != nil {
|
if x := recover(); x != nil {
|
||||||
|
p.logger.Errorf("recovering from panic. See the stack trace below for details:\n%s", string(debug.Stack()))
|
||||||
|
_, file, line, ok := runtime.Caller(1) // skip the first frame (panic itself)
|
||||||
|
if ok && strings.Contains(file, "runtime/") {
|
||||||
|
// The panic came from the runtime, most likely due to incorrect
|
||||||
|
// map/slice usage. The parent frame should have the real trigger.
|
||||||
|
_, file, line, ok = runtime.Caller(2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Include the file and line number info in the error, if runtime.Caller returned ok.
|
||||||
|
if ok {
|
||||||
|
err = fmt.Errorf("panic [%s:%d]: %v", file, line, x)
|
||||||
|
} else {
|
||||||
err = fmt.Errorf("panic: %v", x)
|
err = fmt.Errorf("panic: %v", x)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
}()
|
}()
|
||||||
return h.ProcessTask(ctx, task)
|
return p.handler.ProcessTask(ctx, task)
|
||||||
}
|
}
|
||||||
|
|
||||||
// uniq dedupes elements and returns a slice of unique names of length l.
|
// uniq dedupes elements and returns a slice of unique names of length l.
|
||||||
|
@@ -20,7 +20,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// fakeHeartbeater receives from starting and finished channels and do nothing.
|
// fakeHeartbeater receives from starting and finished channels and do nothing.
|
||||||
func fakeHeartbeater(starting, finished <-chan *base.TaskMessage, done <-chan struct{}) {
|
func fakeHeartbeater(starting <-chan *workerInfo, finished <-chan *base.TaskMessage, done <-chan struct{}) {
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-starting:
|
case <-starting:
|
||||||
@@ -86,7 +86,7 @@ func TestProcessorSuccessWithSingleQueue(t *testing.T) {
|
|||||||
processed = append(processed, task)
|
processed = append(processed, task)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
starting := make(chan *base.TaskMessage)
|
starting := make(chan *workerInfo)
|
||||||
finished := make(chan *base.TaskMessage)
|
finished := make(chan *base.TaskMessage)
|
||||||
syncCh := make(chan *syncRequest)
|
syncCh := make(chan *syncRequest)
|
||||||
done := make(chan struct{})
|
done := make(chan struct{})
|
||||||
@@ -96,7 +96,7 @@ func TestProcessorSuccessWithSingleQueue(t *testing.T) {
|
|||||||
p := newProcessor(processorParams{
|
p := newProcessor(processorParams{
|
||||||
logger: testLogger,
|
logger: testLogger,
|
||||||
broker: rdbClient,
|
broker: rdbClient,
|
||||||
retryDelayFunc: defaultDelayFunc,
|
retryDelayFunc: DefaultRetryDelayFunc,
|
||||||
syncCh: syncCh,
|
syncCh: syncCh,
|
||||||
cancelations: base.NewCancelations(),
|
cancelations: base.NewCancelations(),
|
||||||
concurrency: 10,
|
concurrency: 10,
|
||||||
@@ -177,7 +177,7 @@ func TestProcessorSuccessWithMultipleQueues(t *testing.T) {
|
|||||||
processed = append(processed, task)
|
processed = append(processed, task)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
starting := make(chan *base.TaskMessage)
|
starting := make(chan *workerInfo)
|
||||||
finished := make(chan *base.TaskMessage)
|
finished := make(chan *base.TaskMessage)
|
||||||
syncCh := make(chan *syncRequest)
|
syncCh := make(chan *syncRequest)
|
||||||
done := make(chan struct{})
|
done := make(chan struct{})
|
||||||
@@ -187,7 +187,7 @@ func TestProcessorSuccessWithMultipleQueues(t *testing.T) {
|
|||||||
p := newProcessor(processorParams{
|
p := newProcessor(processorParams{
|
||||||
logger: testLogger,
|
logger: testLogger,
|
||||||
broker: rdbClient,
|
broker: rdbClient,
|
||||||
retryDelayFunc: defaultDelayFunc,
|
retryDelayFunc: DefaultRetryDelayFunc,
|
||||||
syncCh: syncCh,
|
syncCh: syncCh,
|
||||||
cancelations: base.NewCancelations(),
|
cancelations: base.NewCancelations(),
|
||||||
concurrency: 10,
|
concurrency: 10,
|
||||||
@@ -258,7 +258,7 @@ func TestProcessTasksWithLargeNumberInPayload(t *testing.T) {
|
|||||||
processed = append(processed, task)
|
processed = append(processed, task)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
starting := make(chan *base.TaskMessage)
|
starting := make(chan *workerInfo)
|
||||||
finished := make(chan *base.TaskMessage)
|
finished := make(chan *base.TaskMessage)
|
||||||
syncCh := make(chan *syncRequest)
|
syncCh := make(chan *syncRequest)
|
||||||
done := make(chan struct{})
|
done := make(chan struct{})
|
||||||
@@ -268,7 +268,7 @@ func TestProcessTasksWithLargeNumberInPayload(t *testing.T) {
|
|||||||
p := newProcessor(processorParams{
|
p := newProcessor(processorParams{
|
||||||
logger: testLogger,
|
logger: testLogger,
|
||||||
broker: rdbClient,
|
broker: rdbClient,
|
||||||
retryDelayFunc: defaultDelayFunc,
|
retryDelayFunc: DefaultRetryDelayFunc,
|
||||||
syncCh: syncCh,
|
syncCh: syncCh,
|
||||||
cancelations: base.NewCancelations(),
|
cancelations: base.NewCancelations(),
|
||||||
concurrency: 10,
|
concurrency: 10,
|
||||||
@@ -307,19 +307,22 @@ func TestProcessorRetry(t *testing.T) {
|
|||||||
m4 := h.NewTaskMessage("sync", nil)
|
m4 := h.NewTaskMessage("sync", nil)
|
||||||
|
|
||||||
errMsg := "something went wrong"
|
errMsg := "something went wrong"
|
||||||
|
wrappedSkipRetry := fmt.Errorf("%s:%w", errMsg, SkipRetry)
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
|
desc string // test description
|
||||||
pending []*base.TaskMessage // initial default queue state
|
pending []*base.TaskMessage // initial default queue state
|
||||||
incoming []*base.TaskMessage // tasks to be enqueued during run
|
incoming []*base.TaskMessage // tasks to be enqueued during run
|
||||||
delay time.Duration // retry delay duration
|
delay time.Duration // retry delay duration
|
||||||
handler Handler // task handler
|
handler Handler // task handler
|
||||||
wait time.Duration // wait duration between starting and stopping processor for this test case
|
wait time.Duration // wait duration between starting and stopping processor for this test case
|
||||||
wantRetry []base.Z // tasks in retry queue at the end
|
wantRetry []base.Z // tasks in retry queue at the end
|
||||||
wantDead []*base.TaskMessage // tasks in dead queue at the end
|
wantArchived []*base.TaskMessage // tasks in archived queue at the end
|
||||||
wantErrCount int // number of times error handler should be called
|
wantErrCount int // number of times error handler should be called
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
|
desc: "Should automatically retry errored tasks",
|
||||||
pending: []*base.TaskMessage{m1, m2},
|
pending: []*base.TaskMessage{m1, m2},
|
||||||
incoming: []*base.TaskMessage{m3, m4},
|
incoming: []*base.TaskMessage{m3, m4},
|
||||||
delay: time.Minute,
|
delay: time.Minute,
|
||||||
@@ -332,9 +335,41 @@ func TestProcessorRetry(t *testing.T) {
|
|||||||
{Message: h.TaskMessageAfterRetry(*m3, errMsg), Score: now.Add(time.Minute).Unix()},
|
{Message: h.TaskMessageAfterRetry(*m3, errMsg), Score: now.Add(time.Minute).Unix()},
|
||||||
{Message: h.TaskMessageAfterRetry(*m4, errMsg), Score: now.Add(time.Minute).Unix()},
|
{Message: h.TaskMessageAfterRetry(*m4, errMsg), Score: now.Add(time.Minute).Unix()},
|
||||||
},
|
},
|
||||||
wantDead: []*base.TaskMessage{h.TaskMessageWithError(*m1, errMsg)},
|
wantArchived: []*base.TaskMessage{h.TaskMessageWithError(*m1, errMsg)},
|
||||||
wantErrCount: 4,
|
wantErrCount: 4,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
desc: "Should skip retry errored tasks",
|
||||||
|
pending: []*base.TaskMessage{m1, m2},
|
||||||
|
incoming: []*base.TaskMessage{},
|
||||||
|
delay: time.Minute,
|
||||||
|
handler: HandlerFunc(func(ctx context.Context, task *Task) error {
|
||||||
|
return SkipRetry // return SkipRetry without wrapping
|
||||||
|
}),
|
||||||
|
wait: 2 * time.Second,
|
||||||
|
wantRetry: []base.Z{},
|
||||||
|
wantArchived: []*base.TaskMessage{
|
||||||
|
h.TaskMessageWithError(*m1, SkipRetry.Error()),
|
||||||
|
h.TaskMessageWithError(*m2, SkipRetry.Error()),
|
||||||
|
},
|
||||||
|
wantErrCount: 2, // ErrorHandler should still be called with SkipRetry error
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "Should skip retry errored tasks (with error wrapping)",
|
||||||
|
pending: []*base.TaskMessage{m1, m2},
|
||||||
|
incoming: []*base.TaskMessage{},
|
||||||
|
delay: time.Minute,
|
||||||
|
handler: HandlerFunc(func(ctx context.Context, task *Task) error {
|
||||||
|
return wrappedSkipRetry
|
||||||
|
}),
|
||||||
|
wait: 2 * time.Second,
|
||||||
|
wantRetry: []base.Z{},
|
||||||
|
wantArchived: []*base.TaskMessage{
|
||||||
|
h.TaskMessageWithError(*m1, wrappedSkipRetry.Error()),
|
||||||
|
h.TaskMessageWithError(*m2, wrappedSkipRetry.Error()),
|
||||||
|
},
|
||||||
|
wantErrCount: 2, // ErrorHandler should still be called with SkipRetry error
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
@@ -354,7 +389,7 @@ func TestProcessorRetry(t *testing.T) {
|
|||||||
defer mu.Unlock()
|
defer mu.Unlock()
|
||||||
n++
|
n++
|
||||||
}
|
}
|
||||||
starting := make(chan *base.TaskMessage)
|
starting := make(chan *workerInfo)
|
||||||
finished := make(chan *base.TaskMessage)
|
finished := make(chan *base.TaskMessage)
|
||||||
done := make(chan struct{})
|
done := make(chan struct{})
|
||||||
defer func() { close(done) }()
|
defer func() { close(done) }()
|
||||||
@@ -389,16 +424,16 @@ func TestProcessorRetry(t *testing.T) {
|
|||||||
cmpOpt := h.EquateInt64Approx(1) // allow up to a second difference in zset score
|
cmpOpt := h.EquateInt64Approx(1) // allow up to a second difference in zset score
|
||||||
gotRetry := h.GetRetryEntries(t, r, base.DefaultQueueName)
|
gotRetry := h.GetRetryEntries(t, r, base.DefaultQueueName)
|
||||||
if diff := cmp.Diff(tc.wantRetry, gotRetry, h.SortZSetEntryOpt, cmpOpt); diff != "" {
|
if diff := cmp.Diff(tc.wantRetry, gotRetry, h.SortZSetEntryOpt, cmpOpt); diff != "" {
|
||||||
t.Errorf("mismatch found in %q after running processor; (-want, +got)\n%s", base.RetryKey(base.DefaultQueueName), diff)
|
t.Errorf("%s: mismatch found in %q after running processor; (-want, +got)\n%s", tc.desc, base.RetryKey(base.DefaultQueueName), diff)
|
||||||
}
|
}
|
||||||
|
|
||||||
gotDead := h.GetDeadMessages(t, r, base.DefaultQueueName)
|
gotDead := h.GetArchivedMessages(t, r, base.DefaultQueueName)
|
||||||
if diff := cmp.Diff(tc.wantDead, gotDead, h.SortMsgOpt); diff != "" {
|
if diff := cmp.Diff(tc.wantArchived, gotDead, h.SortMsgOpt); diff != "" {
|
||||||
t.Errorf("mismatch found in %q after running processor; (-want, +got)\n%s", base.DeadKey(base.DefaultQueueName), diff)
|
t.Errorf("%s: mismatch found in %q after running processor; (-want, +got)\n%s", tc.desc, base.ArchivedKey(base.DefaultQueueName), diff)
|
||||||
}
|
}
|
||||||
|
|
||||||
if l := r.LLen(base.ActiveKey(base.DefaultQueueName)).Val(); l != 0 {
|
if l := r.LLen(base.ActiveKey(base.DefaultQueueName)).Val(); l != 0 {
|
||||||
t.Errorf("%q has %d tasks, want 0", base.ActiveKey(base.DefaultQueueName), l)
|
t.Errorf("%s: %q has %d tasks, want 0", base.ActiveKey(base.DefaultQueueName), tc.desc, l)
|
||||||
}
|
}
|
||||||
|
|
||||||
if n != tc.wantErrCount {
|
if n != tc.wantErrCount {
|
||||||
@@ -435,7 +470,7 @@ func TestProcessorQueues(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
starting := make(chan *base.TaskMessage)
|
starting := make(chan *workerInfo)
|
||||||
finished := make(chan *base.TaskMessage)
|
finished := make(chan *base.TaskMessage)
|
||||||
done := make(chan struct{})
|
done := make(chan struct{})
|
||||||
defer func() { close(done) }()
|
defer func() { close(done) }()
|
||||||
@@ -443,7 +478,7 @@ func TestProcessorQueues(t *testing.T) {
|
|||||||
p := newProcessor(processorParams{
|
p := newProcessor(processorParams{
|
||||||
logger: testLogger,
|
logger: testLogger,
|
||||||
broker: nil,
|
broker: nil,
|
||||||
retryDelayFunc: defaultDelayFunc,
|
retryDelayFunc: DefaultRetryDelayFunc,
|
||||||
syncCh: nil,
|
syncCh: nil,
|
||||||
cancelations: base.NewCancelations(),
|
cancelations: base.NewCancelations(),
|
||||||
concurrency: 10,
|
concurrency: 10,
|
||||||
@@ -524,7 +559,7 @@ func TestProcessorWithStrictPriority(t *testing.T) {
|
|||||||
"critical": 3,
|
"critical": 3,
|
||||||
"low": 1,
|
"low": 1,
|
||||||
}
|
}
|
||||||
starting := make(chan *base.TaskMessage)
|
starting := make(chan *workerInfo)
|
||||||
finished := make(chan *base.TaskMessage)
|
finished := make(chan *base.TaskMessage)
|
||||||
syncCh := make(chan *syncRequest)
|
syncCh := make(chan *syncRequest)
|
||||||
done := make(chan struct{})
|
done := make(chan struct{})
|
||||||
@@ -534,7 +569,7 @@ func TestProcessorWithStrictPriority(t *testing.T) {
|
|||||||
p := newProcessor(processorParams{
|
p := newProcessor(processorParams{
|
||||||
logger: testLogger,
|
logger: testLogger,
|
||||||
broker: rdbClient,
|
broker: rdbClient,
|
||||||
retryDelayFunc: defaultDelayFunc,
|
retryDelayFunc: DefaultRetryDelayFunc,
|
||||||
syncCh: syncCh,
|
syncCh: syncCh,
|
||||||
cancelations: base.NewCancelations(),
|
cancelations: base.NewCancelations(),
|
||||||
concurrency: 1, // Set concurrency to 1 to make sure tasks are processed one at a time.
|
concurrency: 1, // Set concurrency to 1 to make sure tasks are processed one at a time.
|
||||||
@@ -564,7 +599,7 @@ func TestProcessorWithStrictPriority(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPerform(t *testing.T) {
|
func TestProcessorPerform(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
desc string
|
desc string
|
||||||
handler HandlerFunc
|
handler HandlerFunc
|
||||||
@@ -596,9 +631,16 @@ func TestPerform(t *testing.T) {
|
|||||||
wantErr: true,
|
wantErr: true,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
// Note: We don't need to fully initialize the processor since we are only testing
|
||||||
|
// perform method.
|
||||||
|
p := newProcessor(processorParams{
|
||||||
|
logger: testLogger,
|
||||||
|
queues: defaultQueueConfig,
|
||||||
|
})
|
||||||
|
|
||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
got := perform(context.Background(), tc.task, tc.handler)
|
p.handler = tc.handler
|
||||||
|
got := p.perform(context.Background(), tc.task)
|
||||||
if !tc.wantErr && got != nil {
|
if !tc.wantErr && got != nil {
|
||||||
t.Errorf("%s: perform() = %v, want nil", tc.desc, got)
|
t.Errorf("%s: perform() = %v, want nil", tc.desc, got)
|
||||||
continue
|
continue
|
||||||
|
12
recoverer.go
12
recoverer.go
@@ -16,7 +16,7 @@ import (
|
|||||||
type recoverer struct {
|
type recoverer struct {
|
||||||
logger *log.Logger
|
logger *log.Logger
|
||||||
broker base.Broker
|
broker base.Broker
|
||||||
retryDelayFunc retryDelayFunc
|
retryDelayFunc RetryDelayFunc
|
||||||
|
|
||||||
// channel to communicate back to the long running "recoverer" goroutine.
|
// channel to communicate back to the long running "recoverer" goroutine.
|
||||||
done chan struct{}
|
done chan struct{}
|
||||||
@@ -33,7 +33,7 @@ type recovererParams struct {
|
|||||||
broker base.Broker
|
broker base.Broker
|
||||||
queues []string
|
queues []string
|
||||||
interval time.Duration
|
interval time.Duration
|
||||||
retryDelayFunc retryDelayFunc
|
retryDelayFunc RetryDelayFunc
|
||||||
}
|
}
|
||||||
|
|
||||||
func newRecoverer(params recovererParams) *recoverer {
|
func newRecoverer(params recovererParams) *recoverer {
|
||||||
@@ -75,7 +75,7 @@ func (r *recoverer) start(wg *sync.WaitGroup) {
|
|||||||
const errMsg = "deadline exceeded" // TODO: better error message
|
const errMsg = "deadline exceeded" // TODO: better error message
|
||||||
for _, msg := range msgs {
|
for _, msg := range msgs {
|
||||||
if msg.Retried >= msg.Retry {
|
if msg.Retried >= msg.Retry {
|
||||||
r.kill(msg, errMsg)
|
r.archive(msg, errMsg)
|
||||||
} else {
|
} else {
|
||||||
r.retry(msg, errMsg)
|
r.retry(msg, errMsg)
|
||||||
}
|
}
|
||||||
@@ -94,8 +94,8 @@ func (r *recoverer) retry(msg *base.TaskMessage, errMsg string) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *recoverer) kill(msg *base.TaskMessage, errMsg string) {
|
func (r *recoverer) archive(msg *base.TaskMessage, errMsg string) {
|
||||||
if err := r.broker.Kill(msg, errMsg); err != nil {
|
if err := r.broker.Archive(msg, errMsg); err != nil {
|
||||||
r.logger.Warnf("recoverer: could not move task to dead queue: %v", err)
|
r.logger.Warnf("recoverer: could not move task to archive: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -37,11 +37,11 @@ func TestRecoverer(t *testing.T) {
|
|||||||
inProgress map[string][]*base.TaskMessage
|
inProgress map[string][]*base.TaskMessage
|
||||||
deadlines map[string][]base.Z
|
deadlines map[string][]base.Z
|
||||||
retry map[string][]base.Z
|
retry map[string][]base.Z
|
||||||
dead map[string][]base.Z
|
archived map[string][]base.Z
|
||||||
wantActive map[string][]*base.TaskMessage
|
wantActive map[string][]*base.TaskMessage
|
||||||
wantDeadlines map[string][]base.Z
|
wantDeadlines map[string][]base.Z
|
||||||
wantRetry map[string][]*base.TaskMessage
|
wantRetry map[string][]*base.TaskMessage
|
||||||
wantDead map[string][]*base.TaskMessage
|
wantArchived map[string][]*base.TaskMessage
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
desc: "with one active task",
|
desc: "with one active task",
|
||||||
@@ -54,7 +54,7 @@ func TestRecoverer(t *testing.T) {
|
|||||||
retry: map[string][]base.Z{
|
retry: map[string][]base.Z{
|
||||||
"default": {},
|
"default": {},
|
||||||
},
|
},
|
||||||
dead: map[string][]base.Z{
|
archived: map[string][]base.Z{
|
||||||
"default": {},
|
"default": {},
|
||||||
},
|
},
|
||||||
wantActive: map[string][]*base.TaskMessage{
|
wantActive: map[string][]*base.TaskMessage{
|
||||||
@@ -66,7 +66,7 @@ func TestRecoverer(t *testing.T) {
|
|||||||
wantRetry: map[string][]*base.TaskMessage{
|
wantRetry: map[string][]*base.TaskMessage{
|
||||||
"default": {h.TaskMessageAfterRetry(*t1, "deadline exceeded")},
|
"default": {h.TaskMessageAfterRetry(*t1, "deadline exceeded")},
|
||||||
},
|
},
|
||||||
wantDead: map[string][]*base.TaskMessage{
|
wantArchived: map[string][]*base.TaskMessage{
|
||||||
"default": {},
|
"default": {},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@@ -84,7 +84,7 @@ func TestRecoverer(t *testing.T) {
|
|||||||
"default": {},
|
"default": {},
|
||||||
"critical": {},
|
"critical": {},
|
||||||
},
|
},
|
||||||
dead: map[string][]base.Z{
|
archived: map[string][]base.Z{
|
||||||
"default": {},
|
"default": {},
|
||||||
"critical": {},
|
"critical": {},
|
||||||
},
|
},
|
||||||
@@ -100,7 +100,7 @@ func TestRecoverer(t *testing.T) {
|
|||||||
"default": {},
|
"default": {},
|
||||||
"critical": {},
|
"critical": {},
|
||||||
},
|
},
|
||||||
wantDead: map[string][]*base.TaskMessage{
|
wantArchived: map[string][]*base.TaskMessage{
|
||||||
"default": {h.TaskMessageWithError(*t4, "deadline exceeded")},
|
"default": {h.TaskMessageWithError(*t4, "deadline exceeded")},
|
||||||
"critical": {},
|
"critical": {},
|
||||||
},
|
},
|
||||||
@@ -124,7 +124,7 @@ func TestRecoverer(t *testing.T) {
|
|||||||
"default": {},
|
"default": {},
|
||||||
"critical": {},
|
"critical": {},
|
||||||
},
|
},
|
||||||
dead: map[string][]base.Z{
|
archived: map[string][]base.Z{
|
||||||
"default": {},
|
"default": {},
|
||||||
"critical": {},
|
"critical": {},
|
||||||
},
|
},
|
||||||
@@ -140,7 +140,7 @@ func TestRecoverer(t *testing.T) {
|
|||||||
"default": {h.TaskMessageAfterRetry(*t1, "deadline exceeded")},
|
"default": {h.TaskMessageAfterRetry(*t1, "deadline exceeded")},
|
||||||
"critical": {},
|
"critical": {},
|
||||||
},
|
},
|
||||||
wantDead: map[string][]*base.TaskMessage{
|
wantArchived: map[string][]*base.TaskMessage{
|
||||||
"default": {},
|
"default": {},
|
||||||
"critical": {},
|
"critical": {},
|
||||||
},
|
},
|
||||||
@@ -164,7 +164,7 @@ func TestRecoverer(t *testing.T) {
|
|||||||
"default": {},
|
"default": {},
|
||||||
"cricial": {},
|
"cricial": {},
|
||||||
},
|
},
|
||||||
dead: map[string][]base.Z{
|
archived: map[string][]base.Z{
|
||||||
"default": {},
|
"default": {},
|
||||||
"cricial": {},
|
"cricial": {},
|
||||||
},
|
},
|
||||||
@@ -179,7 +179,7 @@ func TestRecoverer(t *testing.T) {
|
|||||||
"default": {h.TaskMessageAfterRetry(*t1, "deadline exceeded")},
|
"default": {h.TaskMessageAfterRetry(*t1, "deadline exceeded")},
|
||||||
"critical": {h.TaskMessageAfterRetry(*t3, "deadline exceeded")},
|
"critical": {h.TaskMessageAfterRetry(*t3, "deadline exceeded")},
|
||||||
},
|
},
|
||||||
wantDead: map[string][]*base.TaskMessage{
|
wantArchived: map[string][]*base.TaskMessage{
|
||||||
"default": {},
|
"default": {},
|
||||||
"critical": {},
|
"critical": {},
|
||||||
},
|
},
|
||||||
@@ -198,7 +198,7 @@ func TestRecoverer(t *testing.T) {
|
|||||||
"default": {},
|
"default": {},
|
||||||
"critical": {},
|
"critical": {},
|
||||||
},
|
},
|
||||||
dead: map[string][]base.Z{
|
archived: map[string][]base.Z{
|
||||||
"default": {},
|
"default": {},
|
||||||
"critical": {},
|
"critical": {},
|
||||||
},
|
},
|
||||||
@@ -214,7 +214,7 @@ func TestRecoverer(t *testing.T) {
|
|||||||
"default": {},
|
"default": {},
|
||||||
"critical": {},
|
"critical": {},
|
||||||
},
|
},
|
||||||
wantDead: map[string][]*base.TaskMessage{
|
wantArchived: map[string][]*base.TaskMessage{
|
||||||
"default": {},
|
"default": {},
|
||||||
"critical": {},
|
"critical": {},
|
||||||
},
|
},
|
||||||
@@ -226,7 +226,7 @@ func TestRecoverer(t *testing.T) {
|
|||||||
h.SeedAllActiveQueues(t, r, tc.inProgress)
|
h.SeedAllActiveQueues(t, r, tc.inProgress)
|
||||||
h.SeedAllDeadlines(t, r, tc.deadlines)
|
h.SeedAllDeadlines(t, r, tc.deadlines)
|
||||||
h.SeedAllRetryQueues(t, r, tc.retry)
|
h.SeedAllRetryQueues(t, r, tc.retry)
|
||||||
h.SeedAllDeadQueues(t, r, tc.dead)
|
h.SeedAllArchivedQueues(t, r, tc.archived)
|
||||||
|
|
||||||
recoverer := newRecoverer(recovererParams{
|
recoverer := newRecoverer(recovererParams{
|
||||||
logger: testLogger,
|
logger: testLogger,
|
||||||
@@ -259,10 +259,10 @@ func TestRecoverer(t *testing.T) {
|
|||||||
t.Errorf("%s; mismatch found in %q: (-want, +got)\n%s", tc.desc, base.RetryKey(qname), diff)
|
t.Errorf("%s; mismatch found in %q: (-want, +got)\n%s", tc.desc, base.RetryKey(qname), diff)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for qname, want := range tc.wantDead {
|
for qname, want := range tc.wantArchived {
|
||||||
gotDead := h.GetDeadMessages(t, r, qname)
|
gotDead := h.GetArchivedMessages(t, r, qname)
|
||||||
if diff := cmp.Diff(want, gotDead, h.SortMsgOpt); diff != "" {
|
if diff := cmp.Diff(want, gotDead, h.SortMsgOpt); diff != "" {
|
||||||
t.Errorf("%s; mismatch found in %q: (-want, +got)\n%s", tc.desc, base.DeadKey(qname), diff)
|
t.Errorf("%s; mismatch found in %q: (-want, +got)\n%s", tc.desc, base.ArchivedKey(qname), diff)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
255
scheduler.go
255
scheduler.go
@@ -5,69 +5,250 @@
|
|||||||
package asynq
|
package asynq
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/go-redis/redis/v7"
|
||||||
|
"github.com/google/uuid"
|
||||||
"github.com/hibiken/asynq/internal/base"
|
"github.com/hibiken/asynq/internal/base"
|
||||||
"github.com/hibiken/asynq/internal/log"
|
"github.com/hibiken/asynq/internal/log"
|
||||||
|
"github.com/hibiken/asynq/internal/rdb"
|
||||||
|
"github.com/robfig/cron/v3"
|
||||||
)
|
)
|
||||||
|
|
||||||
type scheduler struct {
|
// A Scheduler kicks off tasks at regular intervals based on the user defined schedule.
|
||||||
|
type Scheduler struct {
|
||||||
|
id string
|
||||||
|
status *base.ServerStatus
|
||||||
logger *log.Logger
|
logger *log.Logger
|
||||||
broker base.Broker
|
client *Client
|
||||||
|
rdb *rdb.RDB
|
||||||
// channel to communicate back to the long running "scheduler" goroutine.
|
cron *cron.Cron
|
||||||
|
location *time.Location
|
||||||
done chan struct{}
|
done chan struct{}
|
||||||
|
wg sync.WaitGroup
|
||||||
// list of queue names to check and enqueue.
|
errHandler func(task *Task, opts []Option, err error)
|
||||||
queues []string
|
|
||||||
|
|
||||||
// poll interval on average
|
|
||||||
avgInterval time.Duration
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type schedulerParams struct {
|
// NewScheduler returns a new Scheduler instance given the redis connection option.
|
||||||
logger *log.Logger
|
// The parameter opts is optional, defaults will be used if opts is set to nil
|
||||||
broker base.Broker
|
func NewScheduler(r RedisConnOpt, opts *SchedulerOpts) *Scheduler {
|
||||||
queues []string
|
c, ok := r.MakeRedisClient().(redis.UniversalClient)
|
||||||
interval time.Duration
|
if !ok {
|
||||||
|
panic(fmt.Sprintf("asynq: unsupported RedisConnOpt type %T", r))
|
||||||
|
}
|
||||||
|
if opts == nil {
|
||||||
|
opts = &SchedulerOpts{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func newScheduler(params schedulerParams) *scheduler {
|
logger := log.NewLogger(opts.Logger)
|
||||||
return &scheduler{
|
loglevel := opts.LogLevel
|
||||||
logger: params.logger,
|
if loglevel == level_unspecified {
|
||||||
broker: params.broker,
|
loglevel = InfoLevel
|
||||||
|
}
|
||||||
|
logger.SetLevel(toInternalLogLevel(loglevel))
|
||||||
|
|
||||||
|
loc := opts.Location
|
||||||
|
if loc == nil {
|
||||||
|
loc = time.UTC
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Scheduler{
|
||||||
|
id: generateSchedulerID(),
|
||||||
|
status: base.NewServerStatus(base.StatusIdle),
|
||||||
|
logger: logger,
|
||||||
|
client: NewClient(r),
|
||||||
|
rdb: rdb.NewRDB(c),
|
||||||
|
cron: cron.New(cron.WithLocation(loc)),
|
||||||
|
location: loc,
|
||||||
done: make(chan struct{}),
|
done: make(chan struct{}),
|
||||||
queues: params.queues,
|
errHandler: opts.EnqueueErrorHandler,
|
||||||
avgInterval: params.interval,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *scheduler) terminate() {
|
func generateSchedulerID() string {
|
||||||
s.logger.Debug("Scheduler shutting down...")
|
host, err := os.Hostname()
|
||||||
// Signal the scheduler goroutine to stop polling.
|
if err != nil {
|
||||||
s.done <- struct{}{}
|
host = "unknown-host"
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%s:%d:%v", host, os.Getpid(), uuid.New())
|
||||||
}
|
}
|
||||||
|
|
||||||
// start starts the "scheduler" goroutine.
|
// SchedulerOpts specifies scheduler options.
|
||||||
func (s *scheduler) start(wg *sync.WaitGroup) {
|
type SchedulerOpts struct {
|
||||||
wg.Add(1)
|
// Logger specifies the logger used by the scheduler instance.
|
||||||
go func() {
|
//
|
||||||
defer wg.Done()
|
// If unset, the default logger is used.
|
||||||
|
Logger Logger
|
||||||
|
|
||||||
|
// LogLevel specifies the minimum log level to enable.
|
||||||
|
//
|
||||||
|
// If unset, InfoLevel is used by default.
|
||||||
|
LogLevel LogLevel
|
||||||
|
|
||||||
|
// Location specifies the time zone location.
|
||||||
|
//
|
||||||
|
// If unset, the UTC time zone (time.UTC) is used.
|
||||||
|
Location *time.Location
|
||||||
|
|
||||||
|
// EnqueueErrorHandler gets called when scheduler cannot enqueue a registered task
|
||||||
|
// due to an error.
|
||||||
|
EnqueueErrorHandler func(task *Task, opts []Option, err error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// enqueueJob encapsulates the job of enqueing a task and recording the event.
|
||||||
|
type enqueueJob struct {
|
||||||
|
id uuid.UUID
|
||||||
|
cronspec string
|
||||||
|
task *Task
|
||||||
|
opts []Option
|
||||||
|
location *time.Location
|
||||||
|
logger *log.Logger
|
||||||
|
client *Client
|
||||||
|
rdb *rdb.RDB
|
||||||
|
errHandler func(task *Task, opts []Option, err error)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (j *enqueueJob) Run() {
|
||||||
|
res, err := j.client.Enqueue(j.task, j.opts...)
|
||||||
|
if err != nil {
|
||||||
|
j.logger.Errorf("scheduler could not enqueue a task %+v: %v", j.task, err)
|
||||||
|
if j.errHandler != nil {
|
||||||
|
j.errHandler(j.task, j.opts, err)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
j.logger.Debugf("scheduler enqueued a task: %+v", res)
|
||||||
|
event := &base.SchedulerEnqueueEvent{
|
||||||
|
TaskID: res.ID,
|
||||||
|
EnqueuedAt: res.EnqueuedAt.In(j.location),
|
||||||
|
}
|
||||||
|
err = j.rdb.RecordSchedulerEnqueueEvent(j.id.String(), event)
|
||||||
|
if err != nil {
|
||||||
|
j.logger.Errorf("scheduler could not record enqueue event of enqueued task %+v: %v", j.task, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Register registers a task to be enqueued on the given schedule specified by the cronspec.
|
||||||
|
// It returns an ID of the newly registered entry.
|
||||||
|
func (s *Scheduler) Register(cronspec string, task *Task, opts ...Option) (entryID string, err error) {
|
||||||
|
job := &enqueueJob{
|
||||||
|
id: uuid.New(),
|
||||||
|
cronspec: cronspec,
|
||||||
|
task: task,
|
||||||
|
opts: opts,
|
||||||
|
location: s.location,
|
||||||
|
client: s.client,
|
||||||
|
rdb: s.rdb,
|
||||||
|
logger: s.logger,
|
||||||
|
errHandler: s.errHandler,
|
||||||
|
}
|
||||||
|
if _, err = s.cron.AddJob(cronspec, job); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return job.id.String(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run starts the scheduler until an os signal to exit the program is received.
|
||||||
|
// It returns an error if scheduler is already running or has been stopped.
|
||||||
|
func (s *Scheduler) Run() error {
|
||||||
|
if err := s.Start(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
s.waitForSignals()
|
||||||
|
return s.Stop()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start starts the scheduler.
|
||||||
|
// It returns an error if the scheduler is already running or has been stopped.
|
||||||
|
func (s *Scheduler) Start() error {
|
||||||
|
switch s.status.Get() {
|
||||||
|
case base.StatusRunning:
|
||||||
|
return fmt.Errorf("asynq: the scheduler is already running")
|
||||||
|
case base.StatusStopped:
|
||||||
|
return fmt.Errorf("asynq: the scheduler has already been stopped")
|
||||||
|
}
|
||||||
|
s.logger.Info("Scheduler starting")
|
||||||
|
s.logger.Infof("Scheduler timezone is set to %v", s.location)
|
||||||
|
s.cron.Start()
|
||||||
|
s.wg.Add(1)
|
||||||
|
go s.runHeartbeater()
|
||||||
|
s.status.Set(base.StatusRunning)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop stops the scheduler.
|
||||||
|
// It returns an error if the scheduler is not currently running.
|
||||||
|
func (s *Scheduler) Stop() error {
|
||||||
|
if s.status.Get() != base.StatusRunning {
|
||||||
|
return fmt.Errorf("asynq: the scheduler is not running")
|
||||||
|
}
|
||||||
|
s.logger.Info("Scheduler shutting down")
|
||||||
|
close(s.done) // signal heartbeater to stop
|
||||||
|
ctx := s.cron.Stop()
|
||||||
|
<-ctx.Done()
|
||||||
|
s.wg.Wait()
|
||||||
|
|
||||||
|
s.clearHistory()
|
||||||
|
s.client.Close()
|
||||||
|
s.rdb.Close()
|
||||||
|
s.status.Set(base.StatusStopped)
|
||||||
|
s.logger.Info("Scheduler stopped")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Scheduler) runHeartbeater() {
|
||||||
|
defer s.wg.Done()
|
||||||
|
ticker := time.NewTicker(5 * time.Second)
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-s.done:
|
case <-s.done:
|
||||||
s.logger.Debug("Scheduler done")
|
s.logger.Debugf("Scheduler heatbeater shutting down")
|
||||||
|
s.rdb.ClearSchedulerEntries(s.id)
|
||||||
return
|
return
|
||||||
case <-time.After(s.avgInterval):
|
case <-ticker.C:
|
||||||
s.exec()
|
s.beat()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *scheduler) exec() {
|
// beat writes a snapshot of entries to redis.
|
||||||
if err := s.broker.CheckAndEnqueue(s.queues...); err != nil {
|
func (s *Scheduler) beat() {
|
||||||
s.logger.Errorf("Could not enqueue scheduled tasks: %v", err)
|
var entries []*base.SchedulerEntry
|
||||||
|
for _, entry := range s.cron.Entries() {
|
||||||
|
job := entry.Job.(*enqueueJob)
|
||||||
|
e := &base.SchedulerEntry{
|
||||||
|
ID: job.id.String(),
|
||||||
|
Spec: job.cronspec,
|
||||||
|
Type: job.task.Type,
|
||||||
|
Payload: job.task.Payload.data,
|
||||||
|
Opts: stringifyOptions(job.opts),
|
||||||
|
Next: entry.Next,
|
||||||
|
Prev: entry.Prev,
|
||||||
|
}
|
||||||
|
entries = append(entries, e)
|
||||||
|
}
|
||||||
|
s.logger.Debugf("Writing entries %v", entries)
|
||||||
|
if err := s.rdb.WriteSchedulerEntries(s.id, entries, 5*time.Second); err != nil {
|
||||||
|
s.logger.Warnf("Scheduler could not write heartbeat data: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func stringifyOptions(opts []Option) []string {
|
||||||
|
var res []string
|
||||||
|
for _, opt := range opts {
|
||||||
|
res = append(res, opt.String())
|
||||||
|
}
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Scheduler) clearHistory() {
|
||||||
|
for _, entry := range s.cron.Entries() {
|
||||||
|
job := entry.Job.(*enqueueJob)
|
||||||
|
if err := s.rdb.ClearSchedulerHistory(job.id.String()); err != nil {
|
||||||
|
s.logger.Warnf("Could not clear scheduler history for entry %q: %v", job.id.String(), err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -10,128 +10,109 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/google/go-cmp/cmp"
|
"github.com/google/go-cmp/cmp"
|
||||||
h "github.com/hibiken/asynq/internal/asynqtest"
|
"github.com/hibiken/asynq/internal/asynqtest"
|
||||||
"github.com/hibiken/asynq/internal/base"
|
"github.com/hibiken/asynq/internal/base"
|
||||||
"github.com/hibiken/asynq/internal/rdb"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestScheduler(t *testing.T) {
|
func TestScheduler(t *testing.T) {
|
||||||
r := setup(t)
|
|
||||||
defer r.Close()
|
|
||||||
rdbClient := rdb.NewRDB(r)
|
|
||||||
const pollInterval = time.Second
|
|
||||||
s := newScheduler(schedulerParams{
|
|
||||||
logger: testLogger,
|
|
||||||
broker: rdbClient,
|
|
||||||
queues: []string{"default", "critical"},
|
|
||||||
interval: pollInterval,
|
|
||||||
})
|
|
||||||
t1 := h.NewTaskMessageWithQueue("gen_thumbnail", nil, "default")
|
|
||||||
t2 := h.NewTaskMessageWithQueue("send_email", nil, "critical")
|
|
||||||
t3 := h.NewTaskMessageWithQueue("reindex", nil, "default")
|
|
||||||
t4 := h.NewTaskMessageWithQueue("sync", nil, "critical")
|
|
||||||
now := time.Now()
|
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
initScheduled map[string][]base.Z // scheduled queue initial state
|
cronspec string
|
||||||
initRetry map[string][]base.Z // retry queue initial state
|
task *Task
|
||||||
initPending map[string][]*base.TaskMessage // default queue initial state
|
opts []Option
|
||||||
wait time.Duration // wait duration before checking for final state
|
wait time.Duration
|
||||||
wantScheduled map[string][]*base.TaskMessage // schedule queue final state
|
queue string
|
||||||
wantRetry map[string][]*base.TaskMessage // retry queue final state
|
want []*base.TaskMessage
|
||||||
wantPending map[string][]*base.TaskMessage // default queue final state
|
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
initScheduled: map[string][]base.Z{
|
cronspec: "@every 3s",
|
||||||
"default": {{Message: t1, Score: now.Add(time.Hour).Unix()}},
|
task: NewTask("task1", nil),
|
||||||
"critical": {{Message: t2, Score: now.Add(-2 * time.Second).Unix()}},
|
opts: []Option{MaxRetry(10)},
|
||||||
},
|
wait: 10 * time.Second,
|
||||||
initRetry: map[string][]base.Z{
|
queue: "default",
|
||||||
"default": {{Message: t3, Score: time.Now().Add(-500 * time.Millisecond).Unix()}},
|
want: []*base.TaskMessage{
|
||||||
"critical": {},
|
{
|
||||||
},
|
Type: "task1",
|
||||||
initPending: map[string][]*base.TaskMessage{
|
Payload: nil,
|
||||||
"default": {},
|
Retry: 10,
|
||||||
"critical": {t4},
|
Timeout: int64(defaultTimeout.Seconds()),
|
||||||
},
|
Queue: "default",
|
||||||
wait: pollInterval * 2,
|
|
||||||
wantScheduled: map[string][]*base.TaskMessage{
|
|
||||||
"default": {t1},
|
|
||||||
"critical": {},
|
|
||||||
},
|
|
||||||
wantRetry: map[string][]*base.TaskMessage{
|
|
||||||
"default": {},
|
|
||||||
"critical": {},
|
|
||||||
},
|
|
||||||
wantPending: map[string][]*base.TaskMessage{
|
|
||||||
"default": {t3},
|
|
||||||
"critical": {t2, t4},
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
initScheduled: map[string][]base.Z{
|
Type: "task1",
|
||||||
"default": {
|
Payload: nil,
|
||||||
{Message: t1, Score: now.Unix()},
|
Retry: 10,
|
||||||
{Message: t3, Score: now.Add(-500 * time.Millisecond).Unix()},
|
Timeout: int64(defaultTimeout.Seconds()),
|
||||||
|
Queue: "default",
|
||||||
},
|
},
|
||||||
"critical": {
|
{
|
||||||
{Message: t2, Score: now.Add(-2 * time.Second).Unix()},
|
Type: "task1",
|
||||||
|
Payload: nil,
|
||||||
|
Retry: 10,
|
||||||
|
Timeout: int64(defaultTimeout.Seconds()),
|
||||||
|
Queue: "default",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
initRetry: map[string][]base.Z{
|
|
||||||
"default": {},
|
|
||||||
"critical": {},
|
|
||||||
},
|
|
||||||
initPending: map[string][]*base.TaskMessage{
|
|
||||||
"default": {},
|
|
||||||
"critical": {t4},
|
|
||||||
},
|
|
||||||
wait: pollInterval * 2,
|
|
||||||
wantScheduled: map[string][]*base.TaskMessage{
|
|
||||||
"default": {},
|
|
||||||
"critical": {},
|
|
||||||
},
|
|
||||||
wantRetry: map[string][]*base.TaskMessage{
|
|
||||||
"default": {},
|
|
||||||
"critical": {},
|
|
||||||
},
|
|
||||||
wantPending: map[string][]*base.TaskMessage{
|
|
||||||
"default": {t1, t3},
|
|
||||||
"critical": {t2, t4},
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
r := setup(t)
|
||||||
|
|
||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
h.FlushDB(t, r) // clean up db before each test case.
|
scheduler := NewScheduler(getRedisConnOpt(t), nil)
|
||||||
h.SeedAllScheduledQueues(t, r, tc.initScheduled) // initialize scheduled queue
|
if _, err := scheduler.Register(tc.cronspec, tc.task, tc.opts...); err != nil {
|
||||||
h.SeedAllRetryQueues(t, r, tc.initRetry) // initialize retry queue
|
t.Fatal(err)
|
||||||
h.SeedAllPendingQueues(t, r, tc.initPending) // initialize default queue
|
}
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
if err := scheduler.Start(); err != nil {
|
||||||
s.start(&wg)
|
t.Fatal(err)
|
||||||
|
}
|
||||||
time.Sleep(tc.wait)
|
time.Sleep(tc.wait)
|
||||||
s.terminate()
|
if err := scheduler.Stop(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
for qname, want := range tc.wantScheduled {
|
got := asynqtest.GetPendingMessages(t, r, tc.queue)
|
||||||
gotScheduled := h.GetScheduledMessages(t, r, qname)
|
if diff := cmp.Diff(tc.want, got, asynqtest.IgnoreIDOpt); diff != "" {
|
||||||
if diff := cmp.Diff(want, gotScheduled, h.SortMsgOpt); diff != "" {
|
t.Errorf("mismatch found in queue %q: (-want,+got)\n%s", tc.queue, diff)
|
||||||
t.Errorf("mismatch found in %q after running scheduler: (-want, +got)\n%s", base.ScheduledKey(qname), diff)
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for qname, want := range tc.wantRetry {
|
func TestSchedulerWhenRedisDown(t *testing.T) {
|
||||||
gotRetry := h.GetRetryMessages(t, r, qname)
|
var (
|
||||||
if diff := cmp.Diff(want, gotRetry, h.SortMsgOpt); diff != "" {
|
mu sync.Mutex
|
||||||
t.Errorf("mismatch found in %q after running scheduler: (-want, +got)\n%s", base.RetryKey(qname), diff)
|
counter int
|
||||||
}
|
)
|
||||||
|
errorHandler := func(task *Task, opts []Option, err error) {
|
||||||
|
mu.Lock()
|
||||||
|
counter++
|
||||||
|
mu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
for qname, want := range tc.wantPending {
|
// Connect to non-existent redis instance to simulate a redis server being down.
|
||||||
gotPending := h.GetPendingMessages(t, r, qname)
|
scheduler := NewScheduler(
|
||||||
if diff := cmp.Diff(want, gotPending, h.SortMsgOpt); diff != "" {
|
RedisClientOpt{Addr: ":9876"},
|
||||||
t.Errorf("mismatch found in %q after running scheduler: (-want, +got)\n%s", base.QueueKey(qname), diff)
|
&SchedulerOpts{EnqueueErrorHandler: errorHandler},
|
||||||
|
)
|
||||||
|
|
||||||
|
task := NewTask("test", nil)
|
||||||
|
|
||||||
|
if _, err := scheduler.Register("@every 3s", task); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := scheduler.Start(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
// Scheduler should attempt to enqueue the task three times (every 3s).
|
||||||
|
time.Sleep(10 * time.Second)
|
||||||
|
if err := scheduler.Stop(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mu.Lock()
|
||||||
|
if counter != 3 {
|
||||||
|
t.Errorf("EnqueueErrorHandler was called %d times, want 3", counter)
|
||||||
|
}
|
||||||
|
mu.Unlock()
|
||||||
}
|
}
|
||||||
|
66
server.go
66
server.go
@@ -15,23 +15,23 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/go-redis/redis/v7"
|
||||||
"github.com/hibiken/asynq/internal/base"
|
"github.com/hibiken/asynq/internal/base"
|
||||||
"github.com/hibiken/asynq/internal/log"
|
"github.com/hibiken/asynq/internal/log"
|
||||||
"github.com/hibiken/asynq/internal/rdb"
|
"github.com/hibiken/asynq/internal/rdb"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Server is responsible for managing the background-task processing.
|
// Server is responsible for managing the task processing.
|
||||||
//
|
//
|
||||||
// Server pulls tasks off queues and processes them.
|
// Server pulls tasks off queues and processes them.
|
||||||
// If the processing of a task is unsuccessful, server will
|
// If the processing of a task is unsuccessful, server will schedule it for a retry.
|
||||||
// schedule it for a retry.
|
|
||||||
// A task will be retried until either the task gets processed successfully
|
// A task will be retried until either the task gets processed successfully
|
||||||
// or until it reaches its max retry count.
|
// or until it reaches its max retry count.
|
||||||
//
|
//
|
||||||
// If a task exhausts its retries, it will be moved to the "dead" queue and
|
// If a task exhausts its retries, it will be moved to the archive and
|
||||||
// will be kept in the queue for some time until a certain condition is met
|
// will be kept in the archive for some time until a certain condition is met
|
||||||
// (e.g., queue size reaches a certain limit, or the task has been in the
|
// (e.g., archive size reaches a certain limit, or the task has been in the
|
||||||
// queue for a certain amount of time).
|
// archive for a certain amount of time).
|
||||||
type Server struct {
|
type Server struct {
|
||||||
logger *log.Logger
|
logger *log.Logger
|
||||||
|
|
||||||
@@ -41,7 +41,7 @@ type Server struct {
|
|||||||
|
|
||||||
// wait group to wait for all goroutines to finish.
|
// wait group to wait for all goroutines to finish.
|
||||||
wg sync.WaitGroup
|
wg sync.WaitGroup
|
||||||
scheduler *scheduler
|
forwarder *forwarder
|
||||||
processor *processor
|
processor *processor
|
||||||
syncer *syncer
|
syncer *syncer
|
||||||
heartbeater *heartbeater
|
heartbeater *heartbeater
|
||||||
@@ -61,11 +61,7 @@ type Config struct {
|
|||||||
// Function to calculate retry delay for a failed task.
|
// Function to calculate retry delay for a failed task.
|
||||||
//
|
//
|
||||||
// By default, it uses exponential backoff algorithm to calculate the delay.
|
// By default, it uses exponential backoff algorithm to calculate the delay.
|
||||||
//
|
RetryDelayFunc RetryDelayFunc
|
||||||
// n is the number of times the task has been retried.
|
|
||||||
// e is the error returned by the task handler.
|
|
||||||
// t is the task in question.
|
|
||||||
RetryDelayFunc func(n int, e error, t *Task) time.Duration
|
|
||||||
|
|
||||||
// List of queues to process with given priority value. Keys are the names of the
|
// List of queues to process with given priority value. Keys are the names of the
|
||||||
// queues and values are associated priority value.
|
// queues and values are associated priority value.
|
||||||
@@ -75,11 +71,13 @@ type Config struct {
|
|||||||
// Priority is treated as follows to avoid starving low priority queues.
|
// Priority is treated as follows to avoid starving low priority queues.
|
||||||
//
|
//
|
||||||
// Example:
|
// Example:
|
||||||
|
//
|
||||||
// Queues: map[string]int{
|
// Queues: map[string]int{
|
||||||
// "critical": 6,
|
// "critical": 6,
|
||||||
// "default": 3,
|
// "default": 3,
|
||||||
// "low": 1,
|
// "low": 1,
|
||||||
// }
|
// }
|
||||||
|
//
|
||||||
// With the above config and given that all queues are not empty, the tasks
|
// With the above config and given that all queues are not empty, the tasks
|
||||||
// in "critical", "default", "low" should be processed 60%, 30%, 10% of
|
// in "critical", "default", "low" should be processed 60%, 30%, 10% of
|
||||||
// the time respectively.
|
// the time respectively.
|
||||||
@@ -99,7 +97,10 @@ type Config struct {
|
|||||||
// HandleError is invoked only if the task handler returns a non-nil error.
|
// HandleError is invoked only if the task handler returns a non-nil error.
|
||||||
//
|
//
|
||||||
// Example:
|
// Example:
|
||||||
// func reportError(task *asynq.Task, err error, retried, maxRetry int) {
|
//
|
||||||
|
// func reportError(ctx context, task *asynq.Task, err error) {
|
||||||
|
// retried, _ := asynq.GetRetryCount(ctx)
|
||||||
|
// maxRetry, _ := asynq.GetMaxRetry(ctx)
|
||||||
// if retried >= maxRetry {
|
// if retried >= maxRetry {
|
||||||
// err = fmt.Errorf("retry exhausted for task %s: %w", task.Type, err)
|
// err = fmt.Errorf("retry exhausted for task %s: %w", task.Type, err)
|
||||||
// }
|
// }
|
||||||
@@ -149,6 +150,14 @@ func (fn ErrorHandlerFunc) HandleError(ctx context.Context, task *Task, err erro
|
|||||||
fn(ctx, task, err)
|
fn(ctx, task, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RetryDelayFunc calculates the retry delay duration for a failed task given
|
||||||
|
// the retry count, error, and the task.
|
||||||
|
//
|
||||||
|
// n is the number of times the task has been retried.
|
||||||
|
// e is the error returned by the task handler.
|
||||||
|
// t is the task in question.
|
||||||
|
type RetryDelayFunc func(n int, e error, t *Task) time.Duration
|
||||||
|
|
||||||
// Logger supports logging at various log levels.
|
// Logger supports logging at various log levels.
|
||||||
type Logger interface {
|
type Logger interface {
|
||||||
// Debug logs a message at Debug level.
|
// Debug logs a message at Debug level.
|
||||||
@@ -249,9 +258,11 @@ func toInternalLogLevel(l LogLevel) log.Level {
|
|||||||
panic(fmt.Sprintf("asynq: unexpected log level: %v", l))
|
panic(fmt.Sprintf("asynq: unexpected log level: %v", l))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Formula taken from https://github.com/mperham/sidekiq.
|
// DefaultRetryDelayFunc is the default RetryDelayFunc used if one is not specified in Config.
|
||||||
func defaultDelayFunc(n int, e error, t *Task) time.Duration {
|
// It uses exponential back-off strategy to calculate the retry delay.
|
||||||
|
func DefaultRetryDelayFunc(n int, e error, t *Task) time.Duration {
|
||||||
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||||
|
// Formula taken from https://github.com/mperham/sidekiq.
|
||||||
s := int(math.Pow(float64(n), 4)) + 15 + (r.Intn(30) * (n + 1))
|
s := int(math.Pow(float64(n), 4)) + 15 + (r.Intn(30) * (n + 1))
|
||||||
return time.Duration(s) * time.Second
|
return time.Duration(s) * time.Second
|
||||||
}
|
}
|
||||||
@@ -269,13 +280,17 @@ const (
|
|||||||
// NewServer returns a new Server given a redis connection option
|
// NewServer returns a new Server given a redis connection option
|
||||||
// and background processing configuration.
|
// and background processing configuration.
|
||||||
func NewServer(r RedisConnOpt, cfg Config) *Server {
|
func NewServer(r RedisConnOpt, cfg Config) *Server {
|
||||||
|
c, ok := r.MakeRedisClient().(redis.UniversalClient)
|
||||||
|
if !ok {
|
||||||
|
panic(fmt.Sprintf("asynq: unsupported RedisConnOpt type %T", r))
|
||||||
|
}
|
||||||
n := cfg.Concurrency
|
n := cfg.Concurrency
|
||||||
if n < 1 {
|
if n < 1 {
|
||||||
n = runtime.NumCPU()
|
n = runtime.NumCPU()
|
||||||
}
|
}
|
||||||
delayFunc := cfg.RetryDelayFunc
|
delayFunc := cfg.RetryDelayFunc
|
||||||
if delayFunc == nil {
|
if delayFunc == nil {
|
||||||
delayFunc = defaultDelayFunc
|
delayFunc = DefaultRetryDelayFunc
|
||||||
}
|
}
|
||||||
queues := make(map[string]int)
|
queues := make(map[string]int)
|
||||||
for qname, p := range cfg.Queues {
|
for qname, p := range cfg.Queues {
|
||||||
@@ -287,7 +302,7 @@ func NewServer(r RedisConnOpt, cfg Config) *Server {
|
|||||||
queues = defaultQueueConfig
|
queues = defaultQueueConfig
|
||||||
}
|
}
|
||||||
var qnames []string
|
var qnames []string
|
||||||
for q, _ := range queues {
|
for q := range queues {
|
||||||
qnames = append(qnames, q)
|
qnames = append(qnames, q)
|
||||||
}
|
}
|
||||||
shutdownTimeout := cfg.ShutdownTimeout
|
shutdownTimeout := cfg.ShutdownTimeout
|
||||||
@@ -305,8 +320,8 @@ func NewServer(r RedisConnOpt, cfg Config) *Server {
|
|||||||
}
|
}
|
||||||
logger.SetLevel(toInternalLogLevel(loglevel))
|
logger.SetLevel(toInternalLogLevel(loglevel))
|
||||||
|
|
||||||
rdb := rdb.NewRDB(createRedisClient(r))
|
rdb := rdb.NewRDB(c)
|
||||||
starting := make(chan *base.TaskMessage)
|
starting := make(chan *workerInfo)
|
||||||
finished := make(chan *base.TaskMessage)
|
finished := make(chan *base.TaskMessage)
|
||||||
syncCh := make(chan *syncRequest)
|
syncCh := make(chan *syncRequest)
|
||||||
status := base.NewServerStatus(base.StatusIdle)
|
status := base.NewServerStatus(base.StatusIdle)
|
||||||
@@ -328,7 +343,7 @@ func NewServer(r RedisConnOpt, cfg Config) *Server {
|
|||||||
starting: starting,
|
starting: starting,
|
||||||
finished: finished,
|
finished: finished,
|
||||||
})
|
})
|
||||||
scheduler := newScheduler(schedulerParams{
|
forwarder := newForwarder(forwarderParams{
|
||||||
logger: logger,
|
logger: logger,
|
||||||
broker: rdb,
|
broker: rdb,
|
||||||
queues: qnames,
|
queues: qnames,
|
||||||
@@ -370,7 +385,7 @@ func NewServer(r RedisConnOpt, cfg Config) *Server {
|
|||||||
logger: logger,
|
logger: logger,
|
||||||
broker: rdb,
|
broker: rdb,
|
||||||
status: status,
|
status: status,
|
||||||
scheduler: scheduler,
|
forwarder: forwarder,
|
||||||
processor: processor,
|
processor: processor,
|
||||||
syncer: syncer,
|
syncer: syncer,
|
||||||
heartbeater: heartbeater,
|
heartbeater: heartbeater,
|
||||||
@@ -387,6 +402,9 @@ func NewServer(r RedisConnOpt, cfg Config) *Server {
|
|||||||
//
|
//
|
||||||
// If ProcessTask return a non-nil error or panics, the task
|
// If ProcessTask return a non-nil error or panics, the task
|
||||||
// will be retried after delay.
|
// will be retried after delay.
|
||||||
|
// One exception to this rule is when ProcessTask returns SkipRetry error.
|
||||||
|
// If the returned error is SkipRetry or the error wraps SkipRetry, retry is
|
||||||
|
// skipped and task will be archived instead.
|
||||||
type Handler interface {
|
type Handler interface {
|
||||||
ProcessTask(context.Context, *Task) error
|
ProcessTask(context.Context, *Task) error
|
||||||
}
|
}
|
||||||
@@ -448,7 +466,7 @@ func (srv *Server) Start(handler Handler) error {
|
|||||||
srv.subscriber.start(&srv.wg)
|
srv.subscriber.start(&srv.wg)
|
||||||
srv.syncer.start(&srv.wg)
|
srv.syncer.start(&srv.wg)
|
||||||
srv.recoverer.start(&srv.wg)
|
srv.recoverer.start(&srv.wg)
|
||||||
srv.scheduler.start(&srv.wg)
|
srv.forwarder.start(&srv.wg)
|
||||||
srv.processor.start(&srv.wg)
|
srv.processor.start(&srv.wg)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -469,7 +487,7 @@ func (srv *Server) Stop() {
|
|||||||
// Sender goroutines should be terminated before the receiver goroutines.
|
// Sender goroutines should be terminated before the receiver goroutines.
|
||||||
// processor -> syncer (via syncCh)
|
// processor -> syncer (via syncCh)
|
||||||
// processor -> heartbeater (via starting, finished channels)
|
// processor -> heartbeater (via starting, finished channels)
|
||||||
srv.scheduler.terminate()
|
srv.forwarder.terminate()
|
||||||
srv.processor.terminate()
|
srv.processor.terminate()
|
||||||
srv.recoverer.terminate()
|
srv.recoverer.terminate()
|
||||||
srv.syncer.terminate()
|
srv.syncer.terminate()
|
||||||
|
@@ -127,7 +127,7 @@ func TestServerWithRedisDown(t *testing.T) {
|
|||||||
testBroker := testbroker.NewTestBroker(r)
|
testBroker := testbroker.NewTestBroker(r)
|
||||||
srv := NewServer(RedisClientOpt{Addr: ":6379"}, Config{LogLevel: testLogLevel})
|
srv := NewServer(RedisClientOpt{Addr: ":6379"}, Config{LogLevel: testLogLevel})
|
||||||
srv.broker = testBroker
|
srv.broker = testBroker
|
||||||
srv.scheduler.broker = testBroker
|
srv.forwarder.broker = testBroker
|
||||||
srv.heartbeater.broker = testBroker
|
srv.heartbeater.broker = testBroker
|
||||||
srv.processor.broker = testBroker
|
srv.processor.broker = testBroker
|
||||||
srv.subscriber.broker = testBroker
|
srv.subscriber.broker = testBroker
|
||||||
@@ -160,7 +160,7 @@ func TestServerWithFlakyBroker(t *testing.T) {
|
|||||||
redisConnOpt := getRedisConnOpt(t)
|
redisConnOpt := getRedisConnOpt(t)
|
||||||
srv := NewServer(redisConnOpt, Config{LogLevel: testLogLevel})
|
srv := NewServer(redisConnOpt, Config{LogLevel: testLogLevel})
|
||||||
srv.broker = testBroker
|
srv.broker = testBroker
|
||||||
srv.scheduler.broker = testBroker
|
srv.forwarder.broker = testBroker
|
||||||
srv.heartbeater.broker = testBroker
|
srv.heartbeater.broker = testBroker
|
||||||
srv.processor.broker = testBroker
|
srv.processor.broker = testBroker
|
||||||
srv.subscriber.broker = testBroker
|
srv.subscriber.broker = testBroker
|
||||||
|
@@ -28,3 +28,10 @@ func (srv *Server) waitForSignals() {
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *Scheduler) waitForSignals() {
|
||||||
|
s.logger.Info("Send signal TERM or INT to stop the scheduler")
|
||||||
|
sigs := make(chan os.Signal, 1)
|
||||||
|
signal.Notify(sigs, unix.SIGTERM, unix.SIGINT)
|
||||||
|
<-sigs
|
||||||
|
}
|
||||||
|
@@ -20,3 +20,10 @@ func (srv *Server) waitForSignals() {
|
|||||||
signal.Notify(sigs, windows.SIGTERM, windows.SIGINT)
|
signal.Notify(sigs, windows.SIGTERM, windows.SIGINT)
|
||||||
<-sigs
|
<-sigs
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *Scheduler) waitForSignals() {
|
||||||
|
s.logger.Info("Send signal TERM or INT to stop the scheduler")
|
||||||
|
sigs := make(chan os.Signal, 1)
|
||||||
|
signal.Notify(sigs, windows.SIGTERM, windows.SIGINT)
|
||||||
|
<-sigs
|
||||||
|
}
|
||||||
|
@@ -24,7 +24,7 @@ To view details on any command, use `asynq help <command> <subcommand>`.
|
|||||||
|
|
||||||
- `asynq stats`
|
- `asynq stats`
|
||||||
- `asynq queue [ls inspect history rm pause unpause]`
|
- `asynq queue [ls inspect history rm pause unpause]`
|
||||||
- `asynq task [ls cancel delete kill run delete-all kill-all run-all]`
|
- `asynq task [ls cancel delete archive run delete-all archive-all run-all]`
|
||||||
- `asynq server [ls]`
|
- `asynq server [ls]`
|
||||||
|
|
||||||
### Global flags
|
### Global flags
|
||||||
|
129
tools/asynq/cmd/cron.go
Normal file
129
tools/asynq/cmd/cron.go
Normal file
@@ -0,0 +1,129 @@
|
|||||||
|
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license
|
||||||
|
// that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package cmd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"sort"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/hibiken/asynq/inspeq"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
rootCmd.AddCommand(cronCmd)
|
||||||
|
cronCmd.AddCommand(cronListCmd)
|
||||||
|
cronCmd.AddCommand(cronHistoryCmd)
|
||||||
|
cronHistoryCmd.Flags().Int("page", 1, "page number")
|
||||||
|
cronHistoryCmd.Flags().Int("size", 30, "page size")
|
||||||
|
}
|
||||||
|
|
||||||
|
var cronCmd = &cobra.Command{
|
||||||
|
Use: "cron",
|
||||||
|
Short: "Manage cron",
|
||||||
|
}
|
||||||
|
|
||||||
|
var cronListCmd = &cobra.Command{
|
||||||
|
Use: "ls",
|
||||||
|
Short: "List cron entries",
|
||||||
|
Run: cronList,
|
||||||
|
}
|
||||||
|
|
||||||
|
var cronHistoryCmd = &cobra.Command{
|
||||||
|
Use: "history [ENTRY_ID...]",
|
||||||
|
Short: "Show history of each cron tasks",
|
||||||
|
Args: cobra.MinimumNArgs(1),
|
||||||
|
Run: cronHistory,
|
||||||
|
}
|
||||||
|
|
||||||
|
func cronList(cmd *cobra.Command, args []string) {
|
||||||
|
inspector := createInspector()
|
||||||
|
|
||||||
|
entries, err := inspector.SchedulerEntries()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
if len(entries) == 0 {
|
||||||
|
fmt.Println("No scheduler entries")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort entries by spec.
|
||||||
|
sort.Slice(entries, func(i, j int) bool {
|
||||||
|
x, y := entries[i], entries[j]
|
||||||
|
return x.Spec < y.Spec
|
||||||
|
})
|
||||||
|
|
||||||
|
cols := []string{"EntryID", "Spec", "Type", "Payload", "Options", "Next", "Prev"}
|
||||||
|
printRows := func(w io.Writer, tmpl string) {
|
||||||
|
for _, e := range entries {
|
||||||
|
fmt.Fprintf(w, tmpl, e.ID, e.Spec, e.Task.Type, e.Task.Payload, e.Opts,
|
||||||
|
nextEnqueue(e.Next), prevEnqueue(e.Prev))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
printTable(cols, printRows)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns a string describing when the next enqueue will happen.
|
||||||
|
func nextEnqueue(nextEnqueueAt time.Time) string {
|
||||||
|
d := nextEnqueueAt.Sub(time.Now()).Round(time.Second)
|
||||||
|
if d < 0 {
|
||||||
|
return "Now"
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("In %v", d)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns a string describing when the previous enqueue was.
|
||||||
|
func prevEnqueue(prevEnqueuedAt time.Time) string {
|
||||||
|
if prevEnqueuedAt.IsZero() {
|
||||||
|
return "N/A"
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%v ago", time.Since(prevEnqueuedAt).Round(time.Second))
|
||||||
|
}
|
||||||
|
|
||||||
|
func cronHistory(cmd *cobra.Command, args []string) {
|
||||||
|
pageNum, err := cmd.Flags().GetInt("page")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
pageSize, err := cmd.Flags().GetInt("size")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
inspector := createInspector()
|
||||||
|
for i, entryID := range args {
|
||||||
|
if i > 0 {
|
||||||
|
fmt.Printf("\n%s\n", separator)
|
||||||
|
}
|
||||||
|
fmt.Println()
|
||||||
|
|
||||||
|
fmt.Printf("Entry: %s\n\n", entryID)
|
||||||
|
|
||||||
|
events, err := inspector.ListSchedulerEnqueueEvents(
|
||||||
|
entryID, inspeq.PageSize(pageSize), inspeq.Page(pageNum))
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("error: %v\n", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if len(events) == 0 {
|
||||||
|
fmt.Printf("No scheduler enqueue events found for entry: %s\n", entryID)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
cols := []string{"TaskID", "EnqueuedAt"}
|
||||||
|
printRows := func(w io.Writer, tmpl string) {
|
||||||
|
for _, e := range events {
|
||||||
|
fmt.Fprintf(w, tmpl, e.TaskID, e.EnqueuedAt)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
printTable(cols, printRows)
|
||||||
|
}
|
||||||
|
}
|
@@ -98,7 +98,9 @@ func migrate(cmd *cobra.Command, args []string) {
|
|||||||
printError(err)
|
printError(err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
if err := partitionZSetMembersByQueue(c, "asynq:dead", base.DeadKey); err != nil {
|
// Note: base.DeadKey function was renamed in v0.14. We define the legacy function here since we need it for this migration script.
|
||||||
|
deadKeyFunc := func(qname string) string { return fmt.Sprintf("asynq:{%s}:dead", qname) }
|
||||||
|
if err := partitionZSetMembersByQueue(c, "asynq:dead", deadKeyFunc); err != nil {
|
||||||
printError(err)
|
printError(err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
@@ -113,7 +115,7 @@ func migrate(cmd *cobra.Command, args []string) {
|
|||||||
|
|
||||||
paused, err := c.SMembers("asynq:paused").Result()
|
paused, err := c.SMembers("asynq:paused").Result()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
printError(fmt.Errorf("command SMEMBERS asynq:paused failed: ", err))
|
printError(fmt.Errorf("command SMEMBERS asynq:paused failed: %v", err))
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
for _, qkey := range paused {
|
for _, qkey := range paused {
|
||||||
@@ -136,6 +138,27 @@ func migrate(cmd *cobra.Command, args []string) {
|
|||||||
printError(err)
|
printError(err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*** Migrate from 0.13 to 0.14 compatible ***/
|
||||||
|
|
||||||
|
// Move all dead tasks to archived ZSET.
|
||||||
|
for _, qname := range allQueues {
|
||||||
|
zs, err := c.ZRangeWithScores(deadKeyFunc(qname), 0, -1).Result()
|
||||||
|
if err != nil {
|
||||||
|
printError(err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
for _, z := range zs {
|
||||||
|
if err := c.ZAdd(base.ArchivedKey(qname), &z).Err(); err != nil {
|
||||||
|
printError(err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := deleteKey(c, deadKeyFunc(qname)); err != nil {
|
||||||
|
printError(err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func backupKey(key string) string {
|
func backupKey(key string) string {
|
||||||
|
@@ -10,7 +10,7 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/fatih/color"
|
"github.com/fatih/color"
|
||||||
"github.com/hibiken/asynq"
|
"github.com/hibiken/asynq/inspeq"
|
||||||
"github.com/hibiken/asynq/internal/rdb"
|
"github.com/hibiken/asynq/internal/rdb"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
)
|
)
|
||||||
@@ -82,7 +82,7 @@ func queueList(cmd *cobra.Command, args []string) {
|
|||||||
type queueInfo struct {
|
type queueInfo struct {
|
||||||
name string
|
name string
|
||||||
keyslot int64
|
keyslot int64
|
||||||
nodes []asynq.ClusterNode
|
nodes []inspeq.ClusterNode
|
||||||
}
|
}
|
||||||
inspector := createInspector()
|
inspector := createInspector()
|
||||||
queues, err := inspector.Queues()
|
queues, err := inspector.Queues()
|
||||||
@@ -141,7 +141,7 @@ func queueInspect(cmd *cobra.Command, args []string) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func printQueueStats(s *asynq.QueueStats) {
|
func printQueueStats(s *inspeq.QueueStats) {
|
||||||
bold := color.New(color.Bold)
|
bold := color.New(color.Bold)
|
||||||
bold.Println("Queue Info")
|
bold.Println("Queue Info")
|
||||||
fmt.Printf("Name: %s\n", s.Queue)
|
fmt.Printf("Name: %s\n", s.Queue)
|
||||||
@@ -149,9 +149,9 @@ func printQueueStats(s *asynq.QueueStats) {
|
|||||||
fmt.Printf("Paused: %t\n\n", s.Paused)
|
fmt.Printf("Paused: %t\n\n", s.Paused)
|
||||||
bold.Println("Task Count by State")
|
bold.Println("Task Count by State")
|
||||||
printTable(
|
printTable(
|
||||||
[]string{"active", "pending", "scheduled", "retry", "dead"},
|
[]string{"active", "pending", "scheduled", "retry", "archived"},
|
||||||
func(w io.Writer, tmpl string) {
|
func(w io.Writer, tmpl string) {
|
||||||
fmt.Fprintf(w, tmpl, s.Active, s.Pending, s.Scheduled, s.Retry, s.Dead)
|
fmt.Fprintf(w, tmpl, s.Active, s.Pending, s.Scheduled, s.Retry, s.Archived)
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
@@ -191,7 +191,7 @@ func queueHistory(cmd *cobra.Command, args []string) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func printDailyStats(stats []*asynq.DailyStats) {
|
func printDailyStats(stats []*inspeq.DailyStats) {
|
||||||
printTable(
|
printTable(
|
||||||
[]string{"date (UTC)", "processed", "failed", "error rate"},
|
[]string{"date (UTC)", "processed", "failed", "error rate"},
|
||||||
func(w io.Writer, tmpl string) {
|
func(w io.Writer, tmpl string) {
|
||||||
|
@@ -5,6 +5,7 @@
|
|||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"crypto/tls"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
@@ -13,6 +14,7 @@ import (
|
|||||||
|
|
||||||
"github.com/go-redis/redis/v7"
|
"github.com/go-redis/redis/v7"
|
||||||
"github.com/hibiken/asynq"
|
"github.com/hibiken/asynq"
|
||||||
|
"github.com/hibiken/asynq/inspeq"
|
||||||
"github.com/hibiken/asynq/internal/base"
|
"github.com/hibiken/asynq/internal/base"
|
||||||
"github.com/hibiken/asynq/internal/rdb"
|
"github.com/hibiken/asynq/internal/rdb"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
@@ -31,6 +33,7 @@ var (
|
|||||||
|
|
||||||
useRedisCluster bool
|
useRedisCluster bool
|
||||||
clusterAddrs string
|
clusterAddrs string
|
||||||
|
tlsServerName string
|
||||||
)
|
)
|
||||||
|
|
||||||
// rootCmd represents the base command when called without any subcommands
|
// rootCmd represents the base command when called without any subcommands
|
||||||
@@ -74,12 +77,15 @@ func init() {
|
|||||||
rootCmd.PersistentFlags().StringVar(&clusterAddrs, "cluster_addrs",
|
rootCmd.PersistentFlags().StringVar(&clusterAddrs, "cluster_addrs",
|
||||||
"127.0.0.1:7000,127.0.0.1:7001,127.0.0.1:7002,127.0.0.1:7003,127.0.0.1:7004,127.0.0.1:7005",
|
"127.0.0.1:7000,127.0.0.1:7001,127.0.0.1:7002,127.0.0.1:7003,127.0.0.1:7004,127.0.0.1:7005",
|
||||||
"list of comma-separated redis server addresses")
|
"list of comma-separated redis server addresses")
|
||||||
|
rootCmd.PersistentFlags().StringVar(&tlsServerName, "tls_server",
|
||||||
|
"", "server name for TLS validation")
|
||||||
// Bind flags with config.
|
// Bind flags with config.
|
||||||
viper.BindPFlag("uri", rootCmd.PersistentFlags().Lookup("uri"))
|
viper.BindPFlag("uri", rootCmd.PersistentFlags().Lookup("uri"))
|
||||||
viper.BindPFlag("db", rootCmd.PersistentFlags().Lookup("db"))
|
viper.BindPFlag("db", rootCmd.PersistentFlags().Lookup("db"))
|
||||||
viper.BindPFlag("password", rootCmd.PersistentFlags().Lookup("password"))
|
viper.BindPFlag("password", rootCmd.PersistentFlags().Lookup("password"))
|
||||||
viper.BindPFlag("cluster", rootCmd.PersistentFlags().Lookup("cluster"))
|
viper.BindPFlag("cluster", rootCmd.PersistentFlags().Lookup("cluster"))
|
||||||
viper.BindPFlag("cluster_addrs", rootCmd.PersistentFlags().Lookup("cluster_addrs"))
|
viper.BindPFlag("cluster_addrs", rootCmd.PersistentFlags().Lookup("cluster_addrs"))
|
||||||
|
viper.BindPFlag("tls_server", rootCmd.PersistentFlags().Lookup("tls_server"))
|
||||||
}
|
}
|
||||||
|
|
||||||
// initConfig reads in config file and ENV variables if set.
|
// initConfig reads in config file and ENV variables if set.
|
||||||
@@ -116,34 +122,46 @@ func createRDB() *rdb.RDB {
|
|||||||
c = redis.NewClusterClient(&redis.ClusterOptions{
|
c = redis.NewClusterClient(&redis.ClusterOptions{
|
||||||
Addrs: addrs,
|
Addrs: addrs,
|
||||||
Password: viper.GetString("password"),
|
Password: viper.GetString("password"),
|
||||||
|
TLSConfig: getTLSConfig(),
|
||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
c = redis.NewClient(&redis.Options{
|
c = redis.NewClient(&redis.Options{
|
||||||
Addr: viper.GetString("uri"),
|
Addr: viper.GetString("uri"),
|
||||||
DB: viper.GetInt("db"),
|
DB: viper.GetInt("db"),
|
||||||
Password: viper.GetString("password"),
|
Password: viper.GetString("password"),
|
||||||
|
TLSConfig: getTLSConfig(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
return rdb.NewRDB(c)
|
return rdb.NewRDB(c)
|
||||||
}
|
}
|
||||||
|
|
||||||
// createRDB creates a Inspector instance using flag values and returns it.
|
// createRDB creates a Inspector instance using flag values and returns it.
|
||||||
func createInspector() *asynq.Inspector {
|
func createInspector() *inspeq.Inspector {
|
||||||
var connOpt asynq.RedisConnOpt
|
var connOpt asynq.RedisConnOpt
|
||||||
if useRedisCluster {
|
if useRedisCluster {
|
||||||
addrs := strings.Split(viper.GetString("cluster_addrs"), ",")
|
addrs := strings.Split(viper.GetString("cluster_addrs"), ",")
|
||||||
connOpt = asynq.RedisClusterClientOpt{
|
connOpt = asynq.RedisClusterClientOpt{
|
||||||
Addrs: addrs,
|
Addrs: addrs,
|
||||||
Password: viper.GetString("password"),
|
Password: viper.GetString("password"),
|
||||||
|
TLSConfig: getTLSConfig(),
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
connOpt = asynq.RedisClientOpt{
|
connOpt = asynq.RedisClientOpt{
|
||||||
Addr: viper.GetString("uri"),
|
Addr: viper.GetString("uri"),
|
||||||
DB: viper.GetInt("db"),
|
DB: viper.GetInt("db"),
|
||||||
Password: viper.GetString("password"),
|
Password: viper.GetString("password"),
|
||||||
|
TLSConfig: getTLSConfig(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return asynq.NewInspector(connOpt)
|
return inspeq.New(connOpt)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getTLSConfig() *tls.Config {
|
||||||
|
tlsServer := viper.GetString("tls_server")
|
||||||
|
if tlsServer == "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &tls.Config{ServerName: tlsServer}
|
||||||
}
|
}
|
||||||
|
|
||||||
// printTable is a helper function to print data in table format.
|
// printTable is a helper function to print data in table format.
|
||||||
|
@@ -57,7 +57,7 @@ type AggregateStats struct {
|
|||||||
Pending int
|
Pending int
|
||||||
Scheduled int
|
Scheduled int
|
||||||
Retry int
|
Retry int
|
||||||
Dead int
|
Archived int
|
||||||
Processed int
|
Processed int
|
||||||
Failed int
|
Failed int
|
||||||
Timestamp time.Time
|
Timestamp time.Time
|
||||||
@@ -84,7 +84,7 @@ func stats(cmd *cobra.Command, args []string) {
|
|||||||
aggStats.Pending += s.Pending
|
aggStats.Pending += s.Pending
|
||||||
aggStats.Scheduled += s.Scheduled
|
aggStats.Scheduled += s.Scheduled
|
||||||
aggStats.Retry += s.Retry
|
aggStats.Retry += s.Retry
|
||||||
aggStats.Dead += s.Dead
|
aggStats.Archived += s.Archived
|
||||||
aggStats.Processed += s.Processed
|
aggStats.Processed += s.Processed
|
||||||
aggStats.Failed += s.Failed
|
aggStats.Failed += s.Failed
|
||||||
aggStats.Timestamp = s.Timestamp
|
aggStats.Timestamp = s.Timestamp
|
||||||
@@ -126,9 +126,9 @@ func stats(cmd *cobra.Command, args []string) {
|
|||||||
func printStatsByState(s *AggregateStats) {
|
func printStatsByState(s *AggregateStats) {
|
||||||
format := strings.Repeat("%v\t", 5) + "\n"
|
format := strings.Repeat("%v\t", 5) + "\n"
|
||||||
tw := new(tabwriter.Writer).Init(os.Stdout, 0, 8, 2, ' ', 0)
|
tw := new(tabwriter.Writer).Init(os.Stdout, 0, 8, 2, ' ', 0)
|
||||||
fmt.Fprintf(tw, format, "active", "pending", "scheduled", "retry", "dead")
|
fmt.Fprintf(tw, format, "active", "pending", "scheduled", "retry", "archived")
|
||||||
fmt.Fprintf(tw, format, "----------", "--------", "---------", "-----", "----")
|
fmt.Fprintf(tw, format, "----------", "--------", "---------", "-----", "----")
|
||||||
fmt.Fprintf(tw, format, s.Active, s.Pending, s.Scheduled, s.Retry, s.Dead)
|
fmt.Fprintf(tw, format, s.Active, s.Pending, s.Scheduled, s.Retry, s.Archived)
|
||||||
tw.Flush()
|
tw.Flush()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -10,7 +10,7 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/hibiken/asynq"
|
"github.com/hibiken/asynq/inspeq"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -26,11 +26,11 @@ func init() {
|
|||||||
|
|
||||||
taskCmd.AddCommand(taskCancelCmd)
|
taskCmd.AddCommand(taskCancelCmd)
|
||||||
|
|
||||||
taskCmd.AddCommand(taskKillCmd)
|
taskCmd.AddCommand(taskArchiveCmd)
|
||||||
taskKillCmd.Flags().StringP("queue", "q", "", "queue to which the task belongs")
|
taskArchiveCmd.Flags().StringP("queue", "q", "", "queue to which the task belongs")
|
||||||
taskKillCmd.Flags().StringP("key", "k", "", "key of the task")
|
taskArchiveCmd.Flags().StringP("key", "k", "", "key of the task")
|
||||||
taskKillCmd.MarkFlagRequired("queue")
|
taskArchiveCmd.MarkFlagRequired("queue")
|
||||||
taskKillCmd.MarkFlagRequired("key")
|
taskArchiveCmd.MarkFlagRequired("key")
|
||||||
|
|
||||||
taskCmd.AddCommand(taskDeleteCmd)
|
taskCmd.AddCommand(taskDeleteCmd)
|
||||||
taskDeleteCmd.Flags().StringP("queue", "q", "", "queue to which the task belongs")
|
taskDeleteCmd.Flags().StringP("queue", "q", "", "queue to which the task belongs")
|
||||||
@@ -44,11 +44,11 @@ func init() {
|
|||||||
taskRunCmd.MarkFlagRequired("queue")
|
taskRunCmd.MarkFlagRequired("queue")
|
||||||
taskRunCmd.MarkFlagRequired("key")
|
taskRunCmd.MarkFlagRequired("key")
|
||||||
|
|
||||||
taskCmd.AddCommand(taskKillAllCmd)
|
taskCmd.AddCommand(taskArchiveAllCmd)
|
||||||
taskKillAllCmd.Flags().StringP("queue", "q", "", "queue to which the tasks belong")
|
taskArchiveAllCmd.Flags().StringP("queue", "q", "", "queue to which the tasks belong")
|
||||||
taskKillAllCmd.Flags().StringP("state", "s", "", "state of the tasks")
|
taskArchiveAllCmd.Flags().StringP("state", "s", "", "state of the tasks")
|
||||||
taskKillAllCmd.MarkFlagRequired("queue")
|
taskArchiveAllCmd.MarkFlagRequired("queue")
|
||||||
taskKillAllCmd.MarkFlagRequired("state")
|
taskArchiveAllCmd.MarkFlagRequired("state")
|
||||||
|
|
||||||
taskCmd.AddCommand(taskDeleteAllCmd)
|
taskCmd.AddCommand(taskDeleteAllCmd)
|
||||||
taskDeleteAllCmd.Flags().StringP("queue", "q", "", "queue to which the tasks belong")
|
taskDeleteAllCmd.Flags().StringP("queue", "q", "", "queue to which the tasks belong")
|
||||||
@@ -78,7 +78,7 @@ The value for the state flag should be one of:
|
|||||||
- pending
|
- pending
|
||||||
- scheduled
|
- scheduled
|
||||||
- retry
|
- retry
|
||||||
- dead
|
- archived
|
||||||
|
|
||||||
List opeartion paginates the result set.
|
List opeartion paginates the result set.
|
||||||
By default, the command fetches the first 30 tasks.
|
By default, the command fetches the first 30 tasks.
|
||||||
@@ -100,11 +100,11 @@ var taskCancelCmd = &cobra.Command{
|
|||||||
Run: taskCancel,
|
Run: taskCancel,
|
||||||
}
|
}
|
||||||
|
|
||||||
var taskKillCmd = &cobra.Command{
|
var taskArchiveCmd = &cobra.Command{
|
||||||
Use: "kill --queue=QUEUE --key=KEY",
|
Use: "archive --queue=QUEUE --key=KEY",
|
||||||
Short: "Kill a task with the given key",
|
Short: "Archive a task with the given key",
|
||||||
Args: cobra.NoArgs,
|
Args: cobra.NoArgs,
|
||||||
Run: taskKill,
|
Run: taskArchive,
|
||||||
}
|
}
|
||||||
|
|
||||||
var taskDeleteCmd = &cobra.Command{
|
var taskDeleteCmd = &cobra.Command{
|
||||||
@@ -121,11 +121,11 @@ var taskRunCmd = &cobra.Command{
|
|||||||
Run: taskRun,
|
Run: taskRun,
|
||||||
}
|
}
|
||||||
|
|
||||||
var taskKillAllCmd = &cobra.Command{
|
var taskArchiveAllCmd = &cobra.Command{
|
||||||
Use: "kill-all --queue=QUEUE --state=STATE",
|
Use: "archive-all --queue=QUEUE --state=STATE",
|
||||||
Short: "Kill all tasks in the given state",
|
Short: "Archive all tasks in the given state",
|
||||||
Args: cobra.NoArgs,
|
Args: cobra.NoArgs,
|
||||||
Run: taskKillAll,
|
Run: taskArchiveAll,
|
||||||
}
|
}
|
||||||
|
|
||||||
var taskDeleteAllCmd = &cobra.Command{
|
var taskDeleteAllCmd = &cobra.Command{
|
||||||
@@ -173,8 +173,8 @@ func taskList(cmd *cobra.Command, args []string) {
|
|||||||
listScheduledTasks(qname, pageNum, pageSize)
|
listScheduledTasks(qname, pageNum, pageSize)
|
||||||
case "retry":
|
case "retry":
|
||||||
listRetryTasks(qname, pageNum, pageSize)
|
listRetryTasks(qname, pageNum, pageSize)
|
||||||
case "dead":
|
case "archived":
|
||||||
listDeadTasks(qname, pageNum, pageSize)
|
listArchivedTasks(qname, pageNum, pageSize)
|
||||||
default:
|
default:
|
||||||
fmt.Printf("error: state=%q is not supported\n", state)
|
fmt.Printf("error: state=%q is not supported\n", state)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
@@ -183,7 +183,7 @@ func taskList(cmd *cobra.Command, args []string) {
|
|||||||
|
|
||||||
func listActiveTasks(qname string, pageNum, pageSize int) {
|
func listActiveTasks(qname string, pageNum, pageSize int) {
|
||||||
i := createInspector()
|
i := createInspector()
|
||||||
tasks, err := i.ListActiveTasks(qname, asynq.PageSize(pageSize), asynq.Page(pageNum))
|
tasks, err := i.ListActiveTasks(qname, inspeq.PageSize(pageSize), inspeq.Page(pageNum))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println(err)
|
fmt.Println(err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
@@ -204,7 +204,7 @@ func listActiveTasks(qname string, pageNum, pageSize int) {
|
|||||||
|
|
||||||
func listPendingTasks(qname string, pageNum, pageSize int) {
|
func listPendingTasks(qname string, pageNum, pageSize int) {
|
||||||
i := createInspector()
|
i := createInspector()
|
||||||
tasks, err := i.ListPendingTasks(qname, asynq.PageSize(pageSize), asynq.Page(pageNum))
|
tasks, err := i.ListPendingTasks(qname, inspeq.PageSize(pageSize), inspeq.Page(pageNum))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println(err)
|
fmt.Println(err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
@@ -214,10 +214,10 @@ func listPendingTasks(qname string, pageNum, pageSize int) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
printTable(
|
printTable(
|
||||||
[]string{"ID", "Type", "Payload"},
|
[]string{"Key", "Type", "Payload"},
|
||||||
func(w io.Writer, tmpl string) {
|
func(w io.Writer, tmpl string) {
|
||||||
for _, t := range tasks {
|
for _, t := range tasks {
|
||||||
fmt.Fprintf(w, tmpl, t.ID, t.Type, t.Payload)
|
fmt.Fprintf(w, tmpl, t.Key(), t.Type, t.Payload)
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
@@ -225,7 +225,7 @@ func listPendingTasks(qname string, pageNum, pageSize int) {
|
|||||||
|
|
||||||
func listScheduledTasks(qname string, pageNum, pageSize int) {
|
func listScheduledTasks(qname string, pageNum, pageSize int) {
|
||||||
i := createInspector()
|
i := createInspector()
|
||||||
tasks, err := i.ListScheduledTasks(qname, asynq.PageSize(pageSize), asynq.Page(pageNum))
|
tasks, err := i.ListScheduledTasks(qname, inspeq.PageSize(pageSize), inspeq.Page(pageNum))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println(err)
|
fmt.Println(err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
@@ -248,7 +248,7 @@ func listScheduledTasks(qname string, pageNum, pageSize int) {
|
|||||||
|
|
||||||
func listRetryTasks(qname string, pageNum, pageSize int) {
|
func listRetryTasks(qname string, pageNum, pageSize int) {
|
||||||
i := createInspector()
|
i := createInspector()
|
||||||
tasks, err := i.ListRetryTasks(qname, asynq.PageSize(pageSize), asynq.Page(pageNum))
|
tasks, err := i.ListRetryTasks(qname, inspeq.PageSize(pageSize), inspeq.Page(pageNum))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println(err)
|
fmt.Println(err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
@@ -267,28 +267,28 @@ func listRetryTasks(qname string, pageNum, pageSize int) {
|
|||||||
} else {
|
} else {
|
||||||
nextRetry = "right now"
|
nextRetry = "right now"
|
||||||
}
|
}
|
||||||
fmt.Fprintf(w, tmpl, t.Key(), t.Type, t.Payload, nextRetry, t.ErrorMsg, t.Retried, t.MaxRetry)
|
fmt.Fprintf(w, tmpl, t.Key(), t.Type, t.Payload, nextRetry, t.LastError, t.Retried, t.MaxRetry)
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
func listDeadTasks(qname string, pageNum, pageSize int) {
|
func listArchivedTasks(qname string, pageNum, pageSize int) {
|
||||||
i := createInspector()
|
i := createInspector()
|
||||||
tasks, err := i.ListDeadTasks(qname, asynq.PageSize(pageSize), asynq.Page(pageNum))
|
tasks, err := i.ListArchivedTasks(qname, inspeq.PageSize(pageSize), inspeq.Page(pageNum))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println(err)
|
fmt.Println(err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
if len(tasks) == 0 {
|
if len(tasks) == 0 {
|
||||||
fmt.Printf("No dead tasks in %q queue\n", qname)
|
fmt.Printf("No archived tasks in %q queue\n", qname)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
printTable(
|
printTable(
|
||||||
[]string{"Key", "Type", "Payload", "Last Failed", "Last Error"},
|
[]string{"Key", "Type", "Payload", "Last Failed", "Last Error"},
|
||||||
func(w io.Writer, tmpl string) {
|
func(w io.Writer, tmpl string) {
|
||||||
for _, t := range tasks {
|
for _, t := range tasks {
|
||||||
fmt.Fprintf(w, tmpl, t.Key(), t.Type, t.Payload, t.LastFailedAt, t.ErrorMsg)
|
fmt.Fprintf(w, tmpl, t.Key(), t.Type, t.Payload, t.LastFailedAt, t.LastError)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -305,7 +305,7 @@ func taskCancel(cmd *cobra.Command, args []string) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func taskKill(cmd *cobra.Command, args []string) {
|
func taskArchive(cmd *cobra.Command, args []string) {
|
||||||
qname, err := cmd.Flags().GetString("queue")
|
qname, err := cmd.Flags().GetString("queue")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Printf("error: %v\n", err)
|
fmt.Printf("error: %v\n", err)
|
||||||
@@ -318,12 +318,12 @@ func taskKill(cmd *cobra.Command, args []string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
i := createInspector()
|
i := createInspector()
|
||||||
err = i.KillTaskByKey(qname, key)
|
err = i.ArchiveTaskByKey(qname, key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Printf("error: %v\n", err)
|
fmt.Printf("error: %v\n", err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
fmt.Println("task transitioned to dead state")
|
fmt.Println("task archived")
|
||||||
}
|
}
|
||||||
|
|
||||||
func taskDelete(cmd *cobra.Command, args []string) {
|
func taskDelete(cmd *cobra.Command, args []string) {
|
||||||
@@ -365,10 +365,10 @@ func taskRun(cmd *cobra.Command, args []string) {
|
|||||||
fmt.Printf("error: %v\n", err)
|
fmt.Printf("error: %v\n", err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
fmt.Println("task transitioned to pending state")
|
fmt.Println("task is now pending")
|
||||||
}
|
}
|
||||||
|
|
||||||
func taskKillAll(cmd *cobra.Command, args []string) {
|
func taskArchiveAll(cmd *cobra.Command, args []string) {
|
||||||
qname, err := cmd.Flags().GetString("queue")
|
qname, err := cmd.Flags().GetString("queue")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Printf("error: %v\n", err)
|
fmt.Printf("error: %v\n", err)
|
||||||
@@ -383,10 +383,12 @@ func taskKillAll(cmd *cobra.Command, args []string) {
|
|||||||
i := createInspector()
|
i := createInspector()
|
||||||
var n int
|
var n int
|
||||||
switch state {
|
switch state {
|
||||||
|
case "pending":
|
||||||
|
n, err = i.ArchiveAllPendingTasks(qname)
|
||||||
case "scheduled":
|
case "scheduled":
|
||||||
n, err = i.KillAllScheduledTasks(qname)
|
n, err = i.ArchiveAllScheduledTasks(qname)
|
||||||
case "retry":
|
case "retry":
|
||||||
n, err = i.KillAllRetryTasks(qname)
|
n, err = i.ArchiveAllRetryTasks(qname)
|
||||||
default:
|
default:
|
||||||
fmt.Printf("error: unsupported state %q\n", state)
|
fmt.Printf("error: unsupported state %q\n", state)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
@@ -395,7 +397,7 @@ func taskKillAll(cmd *cobra.Command, args []string) {
|
|||||||
fmt.Printf("error: %v\n", err)
|
fmt.Printf("error: %v\n", err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
fmt.Printf("%d tasks transitioned to dead state\n", n)
|
fmt.Printf("%d tasks archived\n", n)
|
||||||
}
|
}
|
||||||
|
|
||||||
func taskDeleteAll(cmd *cobra.Command, args []string) {
|
func taskDeleteAll(cmd *cobra.Command, args []string) {
|
||||||
@@ -413,12 +415,14 @@ func taskDeleteAll(cmd *cobra.Command, args []string) {
|
|||||||
i := createInspector()
|
i := createInspector()
|
||||||
var n int
|
var n int
|
||||||
switch state {
|
switch state {
|
||||||
|
case "pending":
|
||||||
|
n, err = i.DeleteAllPendingTasks(qname)
|
||||||
case "scheduled":
|
case "scheduled":
|
||||||
n, err = i.DeleteAllScheduledTasks(qname)
|
n, err = i.DeleteAllScheduledTasks(qname)
|
||||||
case "retry":
|
case "retry":
|
||||||
n, err = i.DeleteAllRetryTasks(qname)
|
n, err = i.DeleteAllRetryTasks(qname)
|
||||||
case "dead":
|
case "archived":
|
||||||
n, err = i.DeleteAllDeadTasks(qname)
|
n, err = i.DeleteAllArchivedTasks(qname)
|
||||||
default:
|
default:
|
||||||
fmt.Printf("error: unsupported state %q\n", state)
|
fmt.Printf("error: unsupported state %q\n", state)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
@@ -449,8 +453,8 @@ func taskRunAll(cmd *cobra.Command, args []string) {
|
|||||||
n, err = i.RunAllScheduledTasks(qname)
|
n, err = i.RunAllScheduledTasks(qname)
|
||||||
case "retry":
|
case "retry":
|
||||||
n, err = i.RunAllRetryTasks(qname)
|
n, err = i.RunAllRetryTasks(qname)
|
||||||
case "dead":
|
case "archived":
|
||||||
n, err = i.RunAllDeadTasks(qname)
|
n, err = i.RunAllArchivedTasks(qname)
|
||||||
default:
|
default:
|
||||||
fmt.Printf("error: unsupported state %q\n", state)
|
fmt.Printf("error: unsupported state %q\n", state)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
@@ -459,5 +463,5 @@ func taskRunAll(cmd *cobra.Command, args []string) {
|
|||||||
fmt.Printf("error: %v\n", err)
|
fmt.Printf("error: %v\n", err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
fmt.Printf("%d tasks transitioned to pending state\n", n)
|
fmt.Printf("%d tasks are now pending\n", n)
|
||||||
}
|
}
|
||||||
|
@@ -3,17 +3,20 @@ module github.com/hibiken/asynq/tools
|
|||||||
go 1.13
|
go 1.13
|
||||||
|
|
||||||
require (
|
require (
|
||||||
|
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6 // indirect
|
||||||
github.com/coreos/go-etcd v2.0.0+incompatible // indirect
|
github.com/coreos/go-etcd v2.0.0+incompatible // indirect
|
||||||
github.com/cpuguy83/go-md2man v1.0.10 // indirect
|
github.com/cpuguy83/go-md2man v1.0.10 // indirect
|
||||||
github.com/fatih/color v1.9.0
|
github.com/fatih/color v1.9.0
|
||||||
github.com/go-redis/redis/v7 v7.4.0
|
github.com/go-redis/redis/v7 v7.4.0
|
||||||
github.com/google/uuid v1.1.1
|
github.com/google/uuid v1.1.1
|
||||||
github.com/hibiken/asynq v0.4.0
|
github.com/hibiken/asynq v0.14.0
|
||||||
github.com/mitchellh/go-homedir v1.1.0
|
github.com/mitchellh/go-homedir v1.1.0
|
||||||
github.com/spf13/cast v1.3.1
|
github.com/spf13/cast v1.3.1
|
||||||
github.com/spf13/cobra v1.0.0
|
github.com/spf13/cobra v1.1.1
|
||||||
github.com/spf13/viper v1.6.2
|
github.com/spf13/viper v1.7.0
|
||||||
|
github.com/ugorji/go v1.1.4 // indirect
|
||||||
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8 // indirect
|
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8 // indirect
|
||||||
|
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77 // indirect
|
||||||
)
|
)
|
||||||
|
|
||||||
replace github.com/hibiken/asynq => ./..
|
replace github.com/hibiken/asynq => ./..
|
||||||
|
165
tools/go.sum
165
tools/go.sum
@@ -1,31 +1,54 @@
|
|||||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||||
|
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||||
|
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
||||||
|
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
|
||||||
|
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
|
||||||
|
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
|
||||||
|
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
|
||||||
|
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||||
|
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||||
|
cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
|
||||||
|
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
||||||
|
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
|
||||||
|
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||||
|
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||||
|
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
|
||||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||||
|
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
|
||||||
|
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||||
|
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||||
|
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
|
||||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||||
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||||
|
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||||
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
|
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
|
||||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||||
|
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||||
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||||
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||||
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||||
|
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||||
github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s=
|
github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s=
|
||||||
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
|
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
|
||||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||||
|
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||||
@@ -39,29 +62,61 @@ github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zV
|
|||||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||||
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||||
|
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||||
|
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
|
||||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
||||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
|
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||||
|
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
|
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
|
||||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||||
|
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||||
|
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||||
|
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||||
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
|
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
|
||||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
|
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||||
|
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
|
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
|
||||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||||
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||||
|
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||||
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||||
|
github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
|
||||||
|
github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
|
||||||
|
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||||
|
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||||
|
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||||
|
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
|
||||||
|
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
|
||||||
|
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
|
||||||
|
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
|
||||||
|
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
|
||||||
|
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||||
|
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||||
|
github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
|
||||||
|
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||||
|
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||||
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
|
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
|
||||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||||
|
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
|
||||||
|
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
|
||||||
|
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
|
||||||
|
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
|
||||||
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
||||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||||
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
|
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
|
||||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||||
|
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||||
|
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||||
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
|
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
|
||||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||||
@@ -77,16 +132,27 @@ github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
|||||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||||
github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=
|
github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=
|
||||||
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||||
|
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||||
github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA=
|
github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA=
|
||||||
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||||
|
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||||
github.com/mattn/go-isatty v0.0.11 h1:FxPOTFNqGkuDUGi3H/qkUbQO4ZiBa2brKq5r0l8TGeM=
|
github.com/mattn/go-isatty v0.0.11 h1:FxPOTFNqGkuDUGi3H/qkUbQO4ZiBa2brKq5r0l8TGeM=
|
||||||
github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
|
github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||||
|
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||||
|
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
|
||||||
|
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||||
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
||||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||||
|
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
|
||||||
|
github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
|
||||||
|
github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
|
||||||
|
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||||
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
|
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
|
||||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||||
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
|
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||||
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||||
@@ -94,11 +160,14 @@ github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo=
|
|||||||
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||||
github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME=
|
github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME=
|
||||||
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||||
|
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||||
github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
|
github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
|
||||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
|
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
|
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
|
||||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||||
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
|
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
|
||||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||||
@@ -108,9 +177,14 @@ github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8
|
|||||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||||
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||||
|
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
|
||||||
|
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
|
||||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||||
|
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||||
|
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||||
|
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
|
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
|
||||||
@@ -128,17 +202,25 @@ github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s=
|
|||||||
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
|
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
|
||||||
github.com/spf13/cobra v1.0.0 h1:6m/oheQuQ13N9ks4hubMG6BnvwOeaJrqSPLahSnczz8=
|
github.com/spf13/cobra v1.0.0 h1:6m/oheQuQ13N9ks4hubMG6BnvwOeaJrqSPLahSnczz8=
|
||||||
github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
|
github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
|
||||||
|
github.com/spf13/cobra v1.1.1 h1:KfztREH0tPxJJ+geloSLaAkaPkr4ki2Er5quFV1TDo4=
|
||||||
|
github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI=
|
||||||
github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=
|
github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=
|
||||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||||
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
|
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
|
||||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||||
|
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||||
|
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||||
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
|
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
|
||||||
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
|
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
|
||||||
github.com/spf13/viper v1.6.2 h1:7aKfF+e8/k68gda3LOjo5RxiUqddoFxVq4BKBPrxk5E=
|
github.com/spf13/viper v1.6.2 h1:7aKfF+e8/k68gda3LOjo5RxiUqddoFxVq4BKBPrxk5E=
|
||||||
github.com/spf13/viper v1.6.2/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k=
|
github.com/spf13/viper v1.6.2/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k=
|
||||||
|
github.com/spf13/viper v1.7.0 h1:xVKxvI7ouOI5I+U9s2eeiUfMaWBVoXA3AWskkrqK0VM=
|
||||||
|
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
|
||||||
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
|
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
|
||||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||||
|
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||||
github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
|
github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
|
||||||
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
|
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
|
||||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||||
@@ -147,60 +229,137 @@ github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljT
|
|||||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||||
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||||
|
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||||
|
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||||
go.uber.org/goleak v0.10.0/go.mod h1:VCZuO8V8mFPlL0F5J5GK1rtHV3DrFcQ1R8ryq7FK0aI=
|
go.uber.org/goleak v0.10.0/go.mod h1:VCZuO8V8mFPlL0F5J5GK1rtHV3DrFcQ1R8ryq7FK0aI=
|
||||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||||
|
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||||
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
|
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
|
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
|
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
|
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
|
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||||
|
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
|
||||||
|
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
|
||||||
|
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||||
|
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||||
|
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||||
|
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||||
|
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||||
|
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||||
|
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||||
|
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
||||||
|
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
||||||
|
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||||
|
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
||||||
|
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
|
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
|
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
|
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||||
|
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||||
|
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20190923162816-aa69164e4478 h1:l5EDrHhldLYb3ZRHDUhXF7Om7MvYXnkV9/iQNo1lX6g=
|
golang.org/x/net v0.0.0-20190923162816-aa69164e4478 h1:l5EDrHhldLYb3ZRHDUhXF7Om7MvYXnkV9/iQNo1lX6g=
|
||||||
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e h1:9vRrk9YW2BTzLP0VCB9ZDjU4cPqkg+IDWL7XgxA1yxQ=
|
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e h1:9vRrk9YW2BTzLP0VCB9ZDjU4cPqkg+IDWL7XgxA1yxQ=
|
||||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
|
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||||
|
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
|
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
|
||||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||||
|
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||||
|
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||||
|
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||||
|
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||||
|
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||||
|
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||||
|
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||||
|
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||||
|
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
|
||||||
|
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||||
|
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||||
|
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||||
|
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
|
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
|
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
||||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||||
|
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||||
|
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||||
|
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||||
|
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||||
|
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||||
|
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||||
|
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
|
||||||
|
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||||
|
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||||
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||||
|
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
|
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
|
||||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
|
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||||
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
||||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||||
gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno=
|
gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno=
|
||||||
@@ -214,4 +373,10 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
|||||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo=
|
gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo=
|
||||||
gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
|
||||||
|
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
|
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
|
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
|
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||||
|
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||||
|
Reference in New Issue
Block a user