mirror of
https://github.com/hibiken/asynq.git
synced 2025-10-24 10:36:12 +08:00
Compare commits
285 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
05534c6f24 | ||
|
|
f0db219f6a | ||
|
|
3ae0e7f528 | ||
|
|
421dc584ff | ||
|
|
cfd1a1dfe8 | ||
|
|
c197902dc0 | ||
|
|
e6355bf3f5 | ||
|
|
95c90a5cb8 | ||
|
|
6817af366a | ||
|
|
4bce28d677 | ||
|
|
73f930313c | ||
|
|
bff2a05d59 | ||
|
|
684a7e0c98 | ||
|
|
46b23d6495 | ||
|
|
c0ae62499f | ||
|
|
7744ade362 | ||
|
|
f532c95394 | ||
|
|
ff6768f9bb | ||
|
|
d5e9f3b1bd | ||
|
|
d02b722d8a | ||
|
|
99c7ebeef2 | ||
|
|
bf54621196 | ||
|
|
27baf6de0d | ||
|
|
1bd0bee1e5 | ||
|
|
a9feec5967 | ||
|
|
e01c6379c8 | ||
|
|
a0df047f71 | ||
|
|
68dd6d9a9d | ||
|
|
6cce31a134 | ||
|
|
f9d7af3def | ||
|
|
b0321fb465 | ||
|
|
7776c7ae53 | ||
|
|
709ca79a2b | ||
|
|
08d8f0b37c | ||
|
|
385323b679 | ||
|
|
77604af265 | ||
|
|
4765742e8a | ||
|
|
68839dc9d3 | ||
|
|
8922d2423a | ||
|
|
b358de907e | ||
|
|
8ee1825e67 | ||
|
|
c8bda26bed | ||
|
|
8aeeb61c9d | ||
|
|
96c51fdc23 | ||
|
|
ea9086fd8b | ||
|
|
e63d51da0c | ||
|
|
cd351d49b9 | ||
|
|
87264b66f3 | ||
|
|
62168b8d0d | ||
|
|
840f7245b1 | ||
|
|
12f4c7cf6e | ||
|
|
0ec3b55e6b | ||
|
|
4bcc5ab6aa | ||
|
|
456edb6b71 | ||
|
|
b835090ad8 | ||
|
|
09cbea66f6 | ||
|
|
b9c2572203 | ||
|
|
0bf767cf21 | ||
|
|
1812d05d21 | ||
|
|
4af65d5fa5 | ||
|
|
a19ad19382 | ||
|
|
8117ce8972 | ||
|
|
d98ecdebb4 | ||
|
|
ffe9aa74b3 | ||
|
|
d2d4029aba | ||
|
|
76bd865ebc | ||
|
|
136d1c9ea9 | ||
|
|
52e04355d3 | ||
|
|
cde3e57c6c | ||
|
|
dd66acef1b | ||
|
|
30a3d9641a | ||
|
|
961582cba6 | ||
|
|
430dbb298e | ||
|
|
675826be5f | ||
|
|
62f4e46b73 | ||
|
|
a500f8a534 | ||
|
|
bcfeff38ed | ||
|
|
12a90f6a8d | ||
|
|
807624e7dd | ||
|
|
4d65024bd7 | ||
|
|
76486b5cb4 | ||
|
|
1db516c53c | ||
|
|
cb5bdf245c | ||
|
|
267493ccef | ||
|
|
5d7f1b6a80 | ||
|
|
77ded502ab | ||
|
|
f2284be43d | ||
|
|
3cadab55cb | ||
|
|
298a420f9f | ||
|
|
b1d717c842 | ||
|
|
56e5762eea | ||
|
|
5ec41e388b | ||
|
|
9c95c41651 | ||
|
|
476812475e | ||
|
|
7af3981929 | ||
|
|
2516c4baba | ||
|
|
ebe482a65c | ||
|
|
3e9fc2f972 | ||
|
|
63ce9ed0f9 | ||
|
|
32d3f329b9 | ||
|
|
544c301a8b | ||
|
|
8b997d2fab | ||
|
|
901105a8d7 | ||
|
|
aaa3f1d4fd | ||
|
|
4722ca2d3d | ||
|
|
6a9d9fd717 | ||
|
|
de28c1ea19 | ||
|
|
f618f5b1f5 | ||
|
|
aa936466b3 | ||
|
|
5d1ec70544 | ||
|
|
d1d3be9b00 | ||
|
|
bc77f6fe14 | ||
|
|
efe197a47b | ||
|
|
97b5516183 | ||
|
|
8eafa03ca7 | ||
|
|
430b01c9aa | ||
|
|
14c381dc40 | ||
|
|
e13122723a | ||
|
|
eba7c4e085 | ||
|
|
bfde0b6283 | ||
|
|
afde6a7266 | ||
|
|
6529a1e0b1 | ||
|
|
c9a6ab8ae1 | ||
|
|
557c1a5044 | ||
|
|
0236eb9a1c | ||
|
|
3c2b2cf4a3 | ||
|
|
04df71198d | ||
|
|
2884044e75 | ||
|
|
3719fad396 | ||
|
|
42c7ac0746 | ||
|
|
d331ff055d | ||
|
|
ccb682853e | ||
|
|
7c3ad9e45c | ||
|
|
ea23db4f6b | ||
|
|
00a25ca570 | ||
|
|
7235041128 | ||
|
|
a150d18ed7 | ||
|
|
0712e90f23 | ||
|
|
c5100a9c23 | ||
|
|
196d66f221 | ||
|
|
38509e309f | ||
|
|
f4dd8fe962 | ||
|
|
c06e9de97d | ||
|
|
52d536a8f5 | ||
|
|
f9c0673116 | ||
|
|
b604d25937 | ||
|
|
dfdf530a24 | ||
|
|
e9239260ae | ||
|
|
8f9d5a3352 | ||
|
|
c4dc993241 | ||
|
|
37dfd746d4 | ||
|
|
8d6e4167ab | ||
|
|
476862dd7b | ||
|
|
dcd873fa2a | ||
|
|
2604bb2192 | ||
|
|
942345ee80 | ||
|
|
1f059eeee1 | ||
|
|
4ae73abdaa | ||
|
|
96b2318300 | ||
|
|
8312515e64 | ||
|
|
50e7f38365 | ||
|
|
fadcae76d6 | ||
|
|
a2d4ead989 | ||
|
|
82b6828f43 | ||
|
|
3114987428 | ||
|
|
1ee3b10104 | ||
|
|
6d720d6a05 | ||
|
|
3e6981170d | ||
|
|
a9aa480551 | ||
|
|
9d41de795a | ||
|
|
c43fb21a0a | ||
|
|
a293efcdab | ||
|
|
69d7ec725a | ||
|
|
450a9aa1e2 | ||
|
|
6e294a7013 | ||
|
|
c26b7469bd | ||
|
|
818c2d6f35 | ||
|
|
e09870a08a | ||
|
|
ac3d5b126a | ||
|
|
29e542e591 | ||
|
|
a891ce5568 | ||
|
|
ebe3c4083f | ||
|
|
c8c47fcbf0 | ||
|
|
cca680a7fd | ||
|
|
8076b5ae50 | ||
|
|
a42c174dae | ||
|
|
a88325cb96 | ||
|
|
eb739a0258 | ||
|
|
a9c31553b8 | ||
|
|
dab8295883 | ||
|
|
131ac823fd | ||
|
|
4897dba397 | ||
|
|
6b96459881 | ||
|
|
572eb338d5 | ||
|
|
27f4027447 | ||
|
|
ee1afd12f5 | ||
|
|
3ac548e97c | ||
|
|
f38f94b947 | ||
|
|
d6f389e63f | ||
|
|
118ef27bf2 | ||
|
|
fad0696828 | ||
|
|
4037b41479 | ||
|
|
96f23d88cd | ||
|
|
83bdca5220 | ||
|
|
2f226dfb84 | ||
|
|
3f26122ac0 | ||
|
|
2a18181501 | ||
|
|
aa2676bb57 | ||
|
|
9348a62691 | ||
|
|
f59de9ac56 | ||
|
|
996a6c0ead | ||
|
|
47e9ba4eba | ||
|
|
dbf140a767 | ||
|
|
5f82b4b365 | ||
|
|
44a3d177f0 | ||
|
|
24b13bd865 | ||
|
|
d25090c669 | ||
|
|
b5caefd663 | ||
|
|
becd26479b | ||
|
|
4b81b91d3e | ||
|
|
8e23b865e9 | ||
|
|
a873d488ee | ||
|
|
e0a8f1252a | ||
|
|
650d7fdbe9 | ||
|
|
f6d504939e | ||
|
|
74f08795f8 | ||
|
|
35b2b1782e | ||
|
|
f63dcce0c0 | ||
|
|
565f86ee4f | ||
|
|
94aa878060 | ||
|
|
50b6034bf9 | ||
|
|
154113d0d0 | ||
|
|
669c7995c4 | ||
|
|
6d6a301379 | ||
|
|
53f9475582 | ||
|
|
e8fdbc5a72 | ||
|
|
5f06c308f0 | ||
|
|
a913e6d73f | ||
|
|
6978e93080 | ||
|
|
92d77bbc6e | ||
|
|
a28f61f313 | ||
|
|
9bd3d8e19e | ||
|
|
7382e2aeb8 | ||
|
|
007fac8055 | ||
|
|
8d43fe407a | ||
|
|
34b90ecc8a | ||
|
|
8b60e6a268 | ||
|
|
486dcd799b | ||
|
|
195f4603bb | ||
|
|
2e2c9b9f6b | ||
|
|
199bf4d66a | ||
|
|
7e942ec241 | ||
|
|
379da8f7a2 | ||
|
|
feee87adda | ||
|
|
7657f560ec | ||
|
|
7c7de0d8e0 | ||
|
|
83f1e20d74 | ||
|
|
4e8ac151ae | ||
|
|
08b71672aa | ||
|
|
92af00f9fd | ||
|
|
113451ce6a | ||
|
|
9cd9f3d6b4 | ||
|
|
7b9119c703 | ||
|
|
9b05dea394 | ||
|
|
6cc5bafaba | ||
|
|
716d3d987e | ||
|
|
0527b93432 | ||
|
|
5dddc35d7c | ||
|
|
4e5f596910 | ||
|
|
8bf5917cd9 | ||
|
|
7f30fa2bb6 | ||
|
|
ade6e61f51 | ||
|
|
a2abeedaa0 | ||
|
|
81bb52b08c | ||
|
|
bc2a7635a0 | ||
|
|
f65d408bf9 | ||
|
|
4749b4bbfc | ||
|
|
06c4a1c7f8 | ||
|
|
8af4cbad51 | ||
|
|
4e800a7f68 | ||
|
|
d6a5c84dc6 | ||
|
|
363cfedb49 | ||
|
|
4595bd41c3 | ||
|
|
e236d55477 | ||
|
|
a38f628f3b |
82
.github/workflows/benchstat.yml
vendored
Normal file
82
.github/workflows/benchstat.yml
vendored
Normal file
@@ -0,0 +1,82 @@
|
|||||||
|
# This workflow runs benchmarks against the current branch,
|
||||||
|
# compares them to benchmarks against master,
|
||||||
|
# and uploads the results as an artifact.
|
||||||
|
|
||||||
|
name: benchstat
|
||||||
|
|
||||||
|
on: [pull_request]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
incoming:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
services:
|
||||||
|
redis:
|
||||||
|
image: redis
|
||||||
|
ports:
|
||||||
|
- 6379:6379
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
- name: Set up Go
|
||||||
|
uses: actions/setup-go@v2
|
||||||
|
with:
|
||||||
|
go-version: 1.16.x
|
||||||
|
- name: Benchmark
|
||||||
|
run: go test -run=^$ -bench=. -count=5 -timeout=60m ./... | tee -a new.txt
|
||||||
|
- name: Upload Benchmark
|
||||||
|
uses: actions/upload-artifact@v2
|
||||||
|
with:
|
||||||
|
name: bench-incoming
|
||||||
|
path: new.txt
|
||||||
|
|
||||||
|
current:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
services:
|
||||||
|
redis:
|
||||||
|
image: redis
|
||||||
|
ports:
|
||||||
|
- 6379:6379
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
with:
|
||||||
|
ref: master
|
||||||
|
- name: Set up Go
|
||||||
|
uses: actions/setup-go@v2
|
||||||
|
with:
|
||||||
|
go-version: 1.15.x
|
||||||
|
- name: Benchmark
|
||||||
|
run: go test -run=^$ -bench=. -count=5 -timeout=60m ./... | tee -a old.txt
|
||||||
|
- name: Upload Benchmark
|
||||||
|
uses: actions/upload-artifact@v2
|
||||||
|
with:
|
||||||
|
name: bench-current
|
||||||
|
path: old.txt
|
||||||
|
|
||||||
|
benchstat:
|
||||||
|
needs: [incoming, current]
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
- name: Set up Go
|
||||||
|
uses: actions/setup-go@v2
|
||||||
|
with:
|
||||||
|
go-version: 1.15.x
|
||||||
|
- name: Install benchstat
|
||||||
|
run: go get -u golang.org/x/perf/cmd/benchstat
|
||||||
|
- name: Download Incoming
|
||||||
|
uses: actions/download-artifact@v2
|
||||||
|
with:
|
||||||
|
name: bench-incoming
|
||||||
|
- name: Download Current
|
||||||
|
uses: actions/download-artifact@v2
|
||||||
|
with:
|
||||||
|
name: bench-current
|
||||||
|
- name: Benchstat Results
|
||||||
|
run: benchstat old.txt new.txt | tee -a benchstat.txt
|
||||||
|
- name: Upload benchstat results
|
||||||
|
uses: actions/upload-artifact@v2
|
||||||
|
with:
|
||||||
|
name: benchstat
|
||||||
|
path: benchstat.txt
|
||||||
35
.github/workflows/build.yml
vendored
Normal file
35
.github/workflows/build.yml
vendored
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
name: build
|
||||||
|
|
||||||
|
on: [push, pull_request]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
os: [ubuntu-latest]
|
||||||
|
go-version: [1.13.x, 1.14.x, 1.15.x, 1.16.x]
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
|
services:
|
||||||
|
redis:
|
||||||
|
image: redis
|
||||||
|
ports:
|
||||||
|
- 6379:6379
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
|
||||||
|
- name: Set up Go
|
||||||
|
uses: actions/setup-go@v2
|
||||||
|
with:
|
||||||
|
go-version: ${{ matrix.go-version }}
|
||||||
|
|
||||||
|
- name: Build
|
||||||
|
run: go build -v ./...
|
||||||
|
|
||||||
|
- name: Test
|
||||||
|
run: go test -race -v -coverprofile=coverage.txt -covermode=atomic ./...
|
||||||
|
|
||||||
|
- name: Benchmark Test
|
||||||
|
run: go test -run=^$ -bench=. -loglevel=debug ./...
|
||||||
|
|
||||||
|
- name: Upload coverage to Codecov
|
||||||
|
uses: codecov/codecov-action@v1
|
||||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -19,3 +19,6 @@
|
|||||||
|
|
||||||
# Ignore asynq config file
|
# Ignore asynq config file
|
||||||
.asynq.*
|
.asynq.*
|
||||||
|
|
||||||
|
# Ignore editor config files
|
||||||
|
.vscode
|
||||||
12
.travis.yml
12
.travis.yml
@@ -1,12 +0,0 @@
|
|||||||
language: go
|
|
||||||
go_import_path: github.com/hibiken/asynq
|
|
||||||
git:
|
|
||||||
depth: 1
|
|
||||||
go: [1.13.x, 1.14.x]
|
|
||||||
script:
|
|
||||||
- go test -race -v -coverprofile=coverage.txt -covermode=atomic ./...
|
|
||||||
services:
|
|
||||||
- redis-server
|
|
||||||
after_success:
|
|
||||||
- bash ./.travis/benchcmp.sh
|
|
||||||
- bash <(curl -s https://codecov.io/bash)
|
|
||||||
@@ -1,15 +0,0 @@
|
|||||||
if [ "${TRAVIS_PULL_REQUEST_BRANCH:-$TRAVIS_BRANCH}" != "master" ]; then
|
|
||||||
REMOTE_URL="$(git config --get remote.origin.url)";
|
|
||||||
cd ${TRAVIS_BUILD_DIR}/.. && \
|
|
||||||
git clone ${REMOTE_URL} "${TRAVIS_REPO_SLUG}-bench" && \
|
|
||||||
cd "${TRAVIS_REPO_SLUG}-bench" && \
|
|
||||||
# Benchmark master
|
|
||||||
git checkout master && \
|
|
||||||
go test -run=XXX -bench=. ./... > master.txt && \
|
|
||||||
# Benchmark feature branch
|
|
||||||
git checkout ${TRAVIS_COMMIT} && \
|
|
||||||
go test -run=XXX -bench=. ./... > feature.txt && \
|
|
||||||
go get -u golang.org/x/tools/cmd/benchcmp && \
|
|
||||||
# compare two benchmarks
|
|
||||||
benchcmp master.txt feature.txt;
|
|
||||||
fi
|
|
||||||
263
CHANGELOG.md
263
CHANGELOG.md
@@ -7,6 +7,269 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
|||||||
|
|
||||||
## [Unreleased]
|
## [Unreleased]
|
||||||
|
|
||||||
|
## [0.18.5] - 2020-09-01
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- `IsFailure` config option is added to determine whether error returned from Handler counts as a failure.
|
||||||
|
|
||||||
|
## [0.18.4] - 2020-08-17
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- Scheduler methods are now thread-safe. It's now safe to call `Register` and `Unregister` concurrently.
|
||||||
|
|
||||||
|
## [0.18.3] - 2020-08-09
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- `Client.Enqueue` no longer enqueues tasks with empty typename; Error message is returned.
|
||||||
|
|
||||||
|
## [0.18.2] - 2020-07-15
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- Changed `Queue` function to not to convert the provided queue name to lowercase. Queue names are now case-sensitive.
|
||||||
|
- `QueueInfo.MemoryUsage` is now an approximate usage value.
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- Fixed latency issue around memory usage (see https://github.com/hibiken/asynq/issues/309).
|
||||||
|
|
||||||
|
## [0.18.1] - 2020-07-04
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- Changed to execute task recovering logic when server starts up; Previously it needed to wait for a minute for task recovering logic to exeucte.
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- Fixed task recovering logic to execute every minute
|
||||||
|
|
||||||
|
## [0.18.0] - 2021-06-29
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- NewTask function now takes array of bytes as payload.
|
||||||
|
- Task `Type` and `Payload` should be accessed by a method call.
|
||||||
|
- `Server` API has changed. Renamed `Quiet` to `Stop`. Renamed `Stop` to `Shutdown`. _Note:_ As a result of this renaming, the behavior of `Stop` has changed. Please update the exising code to call `Shutdown` where it used to call `Stop`.
|
||||||
|
- `Scheduler` API has changed. Renamed `Stop` to `Shutdown`.
|
||||||
|
- Requires redis v4.0+ for multiple field/value pair support
|
||||||
|
- `Client.Enqueue` now returns `TaskInfo`
|
||||||
|
- `Inspector.RunTaskByKey` is replaced with `Inspector.RunTask`
|
||||||
|
- `Inspector.DeleteTaskByKey` is replaced with `Inspector.DeleteTask`
|
||||||
|
- `Inspector.ArchiveTaskByKey` is replaced with `Inspector.ArchiveTask`
|
||||||
|
- `inspeq` package is removed. All types and functions from the package is moved to `asynq` package.
|
||||||
|
- `WorkerInfo` field names have changed.
|
||||||
|
- `Inspector.CancelActiveTask` is renamed to `Inspector.CancelProcessing`
|
||||||
|
|
||||||
|
## [0.17.2] - 2021-06-06
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- Free unique lock when task is deleted (https://github.com/hibiken/asynq/issues/275).
|
||||||
|
|
||||||
|
## [0.17.1] - 2021-04-04
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- Fix bug in internal `RDB.memoryUsage` method.
|
||||||
|
|
||||||
|
## [0.17.0] - 2021-03-24
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- `DialTimeout`, `ReadTimeout`, and `WriteTimeout` options are added to `RedisConnOpt`.
|
||||||
|
|
||||||
|
## [0.16.1] - 2021-03-20
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- Replace `KEYS` command with `SCAN` as recommended by [redis doc](https://redis.io/commands/KEYS).
|
||||||
|
|
||||||
|
## [0.16.0] - 2021-03-10
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- `Unregister` method is added to `Scheduler` to remove a registered entry.
|
||||||
|
|
||||||
|
## [0.15.0] - 2021-01-31
|
||||||
|
|
||||||
|
**IMPORTATNT**: All `Inspector` related code are moved to subpackage "github.com/hibiken/asynq/inspeq"
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- `Inspector` related code are moved to subpackage "github.com/hibken/asynq/inspeq".
|
||||||
|
- `RedisConnOpt` interface has changed slightly. If you have been passing `RedisClientOpt`, `RedisFailoverClientOpt`, or `RedisClusterClientOpt` as a pointer,
|
||||||
|
update your code to pass as a value.
|
||||||
|
- `ErrorMsg` field in `RetryTask` and `ArchivedTask` was renamed to `LastError`.
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- `MaxRetry`, `Retried`, `LastError` fields were added to all task types returned from `Inspector`.
|
||||||
|
- `MemoryUsage` field was added to `QueueStats`.
|
||||||
|
- `DeleteAllPendingTasks`, `ArchiveAllPendingTasks` were added to `Inspector`
|
||||||
|
- `DeleteTaskByKey` and `ArchiveTaskByKey` now supports deleting/archiving `PendingTask`.
|
||||||
|
- asynq CLI now supports deleting/archiving pending tasks.
|
||||||
|
|
||||||
|
## [0.14.1] - 2021-01-19
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- `go.mod` file for CLI
|
||||||
|
|
||||||
|
## [0.14.0] - 2021-01-14
|
||||||
|
|
||||||
|
**IMPORTATNT**: Please run `asynq migrate` command to migrate from the previous versions.
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- Renamed `DeadTask` to `ArchivedTask`.
|
||||||
|
- Renamed the operation `Kill` to `Archive` in `Inpsector`.
|
||||||
|
- Print stack trace when Handler panics.
|
||||||
|
- Include a file name and a line number in the error message when recovering from a panic.
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- `DefaultRetryDelayFunc` is now a public API, which can be used in the custom `RetryDelayFunc`.
|
||||||
|
- `SkipRetry` error is added to be used as a return value from `Handler`.
|
||||||
|
- `Servers` method is added to `Inspector`
|
||||||
|
- `CancelActiveTask` method is added to `Inspector`.
|
||||||
|
- `ListSchedulerEnqueueEvents` method is added to `Inspector`.
|
||||||
|
- `SchedulerEntries` method is added to `Inspector`.
|
||||||
|
- `DeleteQueue` method is added to `Inspector`.
|
||||||
|
|
||||||
|
## [0.13.1] - 2020-11-22
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- Fixed processor to wait for specified time duration before forcefully shutdown workers.
|
||||||
|
|
||||||
|
## [0.13.0] - 2020-10-13
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- `Scheduler` type is added to enable periodic tasks. See the godoc for its APIs and [wiki](https://github.com/hibiken/asynq/wiki/Periodic-Tasks) for the getting-started guide.
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- interface `Option` has changed. See the godoc for the new interface.
|
||||||
|
This change would have no impact as long as you are using exported functions (e.g. `MaxRetry`, `Queue`, etc)
|
||||||
|
to create `Option`s.
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- `Payload.String() string` method is added
|
||||||
|
- `Payload.MarshalJSON() ([]byte, error)` method is added
|
||||||
|
|
||||||
|
## [0.12.0] - 2020-09-12
|
||||||
|
|
||||||
|
**IMPORTANT**: If you are upgrading from a previous version, please install the latest version of the CLI `go get -u github.com/hibiken/asynq/tools/asynq` and run `asynq migrate` command. No process should be writing to Redis while you run the migration command.
|
||||||
|
|
||||||
|
## The semantics of queue have changed
|
||||||
|
|
||||||
|
Previously, we called tasks that are ready to be processed _"Enqueued tasks"_, and other tasks that are scheduled to be processed in the future _"Scheduled tasks"_, etc.
|
||||||
|
We changed the semantics of _"Enqueue"_ slightly; All tasks that client pushes to Redis are _Enqueued_ to a queue. Within a queue, tasks will transition from one state to another.
|
||||||
|
Possible task states are:
|
||||||
|
|
||||||
|
- `Pending`: task is ready to be processed (previously called "Enqueued")
|
||||||
|
- `Active`: tasks is currently being processed (previously called "InProgress")
|
||||||
|
- `Scheduled`: task is scheduled to be processed in the future
|
||||||
|
- `Retry`: task failed to be processed and will be retried again in the future
|
||||||
|
- `Dead`: task has exhausted all of its retries and stored for manual inspection purpose
|
||||||
|
|
||||||
|
**These semantics change is reflected in the new `Inspector` API and CLI commands.**
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
#### `Client`
|
||||||
|
|
||||||
|
Use `ProcessIn` or `ProcessAt` option to schedule a task instead of `EnqueueIn` or `EnqueueAt`.
|
||||||
|
|
||||||
|
| Previously | v0.12.0 |
|
||||||
|
| --------------------------- | ------------------------------------------ |
|
||||||
|
| `client.EnqueueAt(t, task)` | `client.Enqueue(task, asynq.ProcessAt(t))` |
|
||||||
|
| `client.EnqueueIn(d, task)` | `client.Enqueue(task, asynq.ProcessIn(d))` |
|
||||||
|
|
||||||
|
#### `Inspector`
|
||||||
|
|
||||||
|
All Inspector methods are scoped to a queue, and the methods take `qname (string)` as the first argument.
|
||||||
|
`EnqueuedTask` is renamed to `PendingTask` and its corresponding methods.
|
||||||
|
`InProgressTask` is renamed to `ActiveTask` and its corresponding methods.
|
||||||
|
Command "Enqueue" is replaced by the verb "Run" (e.g. `EnqueueAllScheduledTasks` --> `RunAllScheduledTasks`)
|
||||||
|
|
||||||
|
#### `CLI`
|
||||||
|
|
||||||
|
CLI commands are restructured to use subcommands. Commands are organized into a few management commands:
|
||||||
|
To view details on any command, use `asynq help <command> <subcommand>`.
|
||||||
|
|
||||||
|
- `asynq stats`
|
||||||
|
- `asynq queue [ls inspect history rm pause unpause]`
|
||||||
|
- `asynq task [ls cancel delete kill run delete-all kill-all run-all]`
|
||||||
|
- `asynq server [ls]`
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
#### `RedisConnOpt`
|
||||||
|
|
||||||
|
- `RedisClusterClientOpt` is added to connect to Redis Cluster.
|
||||||
|
- `Username` field is added to all `RedisConnOpt` types in order to authenticate connection when Redis ACLs are used.
|
||||||
|
|
||||||
|
#### `Client`
|
||||||
|
|
||||||
|
- `ProcessIn(d time.Duration) Option` and `ProcessAt(t time.Time) Option` are added to replace `EnqueueIn` and `EnqueueAt` functionality.
|
||||||
|
|
||||||
|
#### `Inspector`
|
||||||
|
|
||||||
|
- `Queues() ([]string, error)` method is added to get all queue names.
|
||||||
|
- `ClusterKeySlot(qname string) (int64, error)` method is added to get queue's hash slot in Redis cluster.
|
||||||
|
- `ClusterNodes(qname string) ([]ClusterNode, error)` method is added to get a list of Redis cluster nodes for the given queue.
|
||||||
|
- `Close() error` method is added to close connection with redis.
|
||||||
|
|
||||||
|
### `Handler`
|
||||||
|
|
||||||
|
- `GetQueueName(ctx context.Context) (string, bool)` helper is added to extract queue name from a context.
|
||||||
|
|
||||||
|
## [0.11.0] - 2020-07-28
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- `Inspector` type was added to monitor and mutate state of queues and tasks.
|
||||||
|
- `HealthCheckFunc` and `HealthCheckInterval` fields were added to `Config` to allow user to specify a callback
|
||||||
|
function to check for broker connection.
|
||||||
|
|
||||||
|
## [0.10.0] - 2020-07-06
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- All tasks now requires timeout or deadline. By default, timeout is set to 30 mins.
|
||||||
|
- Tasks that exceed its deadline are automatically retried.
|
||||||
|
- Encoding schema for task message has changed. Please install the latest CLI and run `migrate` command if
|
||||||
|
you have tasks enqueued with the previous version of asynq.
|
||||||
|
- API of `(*Client).Enqueue`, `(*Client).EnqueueIn`, and `(*Client).EnqueueAt` has changed to return a `*Result`.
|
||||||
|
- API of `ErrorHandler` has changed. It now takes context as the first argument and removed `retried`, `maxRetry` from the argument list.
|
||||||
|
Use `GetRetryCount` and/or `GetMaxRetry` to get the count values.
|
||||||
|
|
||||||
|
## [0.9.4] - 2020-06-13
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- Fixes issue of same tasks processed by more than one worker (https://github.com/hibiken/asynq/issues/90).
|
||||||
|
|
||||||
|
## [0.9.3] - 2020-06-12
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- Fixes the JSON number overflow issue (https://github.com/hibiken/asynq/issues/166).
|
||||||
|
|
||||||
|
## [0.9.2] - 2020-06-08
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- The `pause` and `unpause` commands were added to the CLI. See README for the CLI for details.
|
||||||
|
|
||||||
## [0.9.1] - 2020-05-29
|
## [0.9.1] - 2020-05-29
|
||||||
|
|
||||||
### Added
|
### Added
|
||||||
|
|||||||
@@ -45,6 +45,7 @@ Thank you! We'll try to respond as quickly as possible.
|
|||||||
6. Create a new pull request
|
6. Create a new pull request
|
||||||
|
|
||||||
Please try to keep your pull request focused in scope and avoid including unrelated commits.
|
Please try to keep your pull request focused in scope and avoid including unrelated commits.
|
||||||
|
Please run tests against redis cluster locally with `--redis_cluster` flag to ensure that code works for Redis cluster. TODO: Run tests using Redis cluster on CI.
|
||||||
|
|
||||||
After you have submitted your pull request, we'll try to get back to you as soon as possible. We may suggest some changes or improvements.
|
After you have submitted your pull request, we'll try to get back to you as soon as possible. We may suggest some changes or improvements.
|
||||||
|
|
||||||
|
|||||||
7
Makefile
Normal file
7
Makefile
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
ROOT_DIR:=$(shell dirname $(realpath $(firstword $(MAKEFILE_LIST))))
|
||||||
|
|
||||||
|
proto: internal/proto/asynq.proto
|
||||||
|
protoc -I=$(ROOT_DIR)/internal/proto \
|
||||||
|
--go_out=$(ROOT_DIR)/internal/proto \
|
||||||
|
--go_opt=module=github.com/hibiken/asynq/internal/proto \
|
||||||
|
$(ROOT_DIR)/internal/proto/asynq.proto
|
||||||
247
README.md
247
README.md
@@ -1,56 +1,64 @@
|
|||||||
# Asynq
|
<img src="https://user-images.githubusercontent.com/11155743/114697792-ffbfa580-9d26-11eb-8e5b-33bef69476dc.png" alt="Asynq logo" width="360px" />
|
||||||
|
|
||||||
|
# Simple, reliable & efficient distributed task queue in Go
|
||||||
|
|
||||||
[](https://travis-ci.com/hibiken/asynq)
|
|
||||||
[](https://opensource.org/licenses/MIT)
|
|
||||||
[](https://goreportcard.com/report/github.com/hibiken/asynq)
|
|
||||||
[](https://godoc.org/github.com/hibiken/asynq)
|
[](https://godoc.org/github.com/hibiken/asynq)
|
||||||
|
[](https://goreportcard.com/report/github.com/hibiken/asynq)
|
||||||
|

|
||||||
|
[](https://opensource.org/licenses/MIT)
|
||||||
[](https://gitter.im/go-asynq/community)
|
[](https://gitter.im/go-asynq/community)
|
||||||
[](https://codecov.io/gh/hibiken/asynq)
|
|
||||||
|
|
||||||
## Overview
|
Asynq is a Go library for queueing tasks and processing them asynchronously with workers. It's backed by [Redis](https://redis.io/) and is designed to be scalable yet easy to get started.
|
||||||
|
|
||||||
Asynq is a Go library for queueing tasks and processing them in the background with workers. It is backed by Redis and it is designed to have a low barrier to entry. It should be integrated in your web stack easily.
|
|
||||||
|
|
||||||
Highlevel overview of how Asynq works:
|
Highlevel overview of how Asynq works:
|
||||||
|
|
||||||
- Client puts task on a queue
|
- Client puts tasks on a queue
|
||||||
- Server pulls task off queues and starts a worker goroutine for each task
|
- Server pulls tasks off queues and starts a worker goroutine for each task
|
||||||
- Tasks are processed concurrently by multiple workers
|
- Tasks are processed concurrently by multiple workers
|
||||||
|
|
||||||
Task queues are used as a mechanism to distribute work across multiple machines.
|
Task queues are used as a mechanism to distribute work across multiple machines. A system can consist of multiple worker servers and brokers, giving way to high availability and horizontal scaling.
|
||||||
A system can consist of multiple worker servers and brokers, giving way to high availability and horizontal scaling.
|
|
||||||
|
|
||||||

|
**Example use case**
|
||||||
|
|
||||||
## Stability and Compatibility
|

|
||||||
|
|
||||||
**Important Note**: Current major version is zero (v0.x.x) to accomodate rapid development and fast iteration while getting early feedback from users (Feedback on APIs are appreciated!). The public API could change without a major version update before v1.0.0 release.
|
|
||||||
|
|
||||||
**Status**: The library is currently undergoing heavy development with frequent, breaking API changes.
|
|
||||||
|
|
||||||
## Features
|
## Features
|
||||||
|
|
||||||
- Guaranteed [at least one execution](https://www.cloudcomputingpatterns.org/at_least_once_delivery/) of a task
|
- Guaranteed [at least one execution](https://www.cloudcomputingpatterns.org/at_least_once_delivery/) of a task
|
||||||
- Scheduling of tasks
|
- Scheduling of tasks
|
||||||
- Durability since tasks are written to Redis
|
|
||||||
- [Retries](https://github.com/hibiken/asynq/wiki/Task-Retry) of failed tasks
|
- [Retries](https://github.com/hibiken/asynq/wiki/Task-Retry) of failed tasks
|
||||||
- [Weighted priority queues](https://github.com/hibiken/asynq/wiki/Priority-Queues#weighted-priority-queues)
|
- Automatic recovery of tasks in the event of a worker crash
|
||||||
- [Strict priority queues](https://github.com/hibiken/asynq/wiki/Priority-Queues#strict-priority-queues)
|
- [Weighted priority queues](https://github.com/hibiken/asynq/wiki/Queue-Priority#weighted-priority)
|
||||||
|
- [Strict priority queues](https://github.com/hibiken/asynq/wiki/Queue-Priority#strict-priority)
|
||||||
- Low latency to add a task since writes are fast in Redis
|
- Low latency to add a task since writes are fast in Redis
|
||||||
- De-duplication of tasks using [unique option](https://github.com/hibiken/asynq/wiki/Unique-Tasks)
|
- De-duplication of tasks using [unique option](https://github.com/hibiken/asynq/wiki/Unique-Tasks)
|
||||||
- Allow [timeout and deadline per task](https://github.com/hibiken/asynq/wiki/Task-Timeout-and-Cancelation)
|
- Allow [timeout and deadline per task](https://github.com/hibiken/asynq/wiki/Task-Timeout-and-Cancelation)
|
||||||
- [Flexible handler interface with support for middlewares](https://github.com/hibiken/asynq/wiki/Handler-Deep-Dive)
|
- [Flexible handler interface with support for middlewares](https://github.com/hibiken/asynq/wiki/Handler-Deep-Dive)
|
||||||
- [Support Redis Sentinels](https://github.com/hibiken/asynq/wiki/Automatic-Failover) for HA
|
- [Ability to pause queue](/tools/asynq/README.md#pause) to stop processing tasks from the queue
|
||||||
|
- [Periodic Tasks](https://github.com/hibiken/asynq/wiki/Periodic-Tasks)
|
||||||
|
- [Support Redis Cluster](https://github.com/hibiken/asynq/wiki/Redis-Cluster) for automatic sharding and high availability
|
||||||
|
- [Support Redis Sentinels](https://github.com/hibiken/asynq/wiki/Automatic-Failover) for high availability
|
||||||
|
- [Web UI](#web-ui) to inspect and remote-control queues and tasks
|
||||||
- [CLI](#command-line-tool) to inspect and remote-control queues and tasks
|
- [CLI](#command-line-tool) to inspect and remote-control queues and tasks
|
||||||
|
|
||||||
|
## Stability and Compatibility
|
||||||
|
|
||||||
|
**Status**: The library is currently undergoing **heavy development** with frequent, breaking API changes.
|
||||||
|
|
||||||
|
> ☝️ **Important Note**: Current major version is zero (`v0.x.x`) to accomodate rapid development and fast iteration while getting early feedback from users (_feedback on APIs are appreciated!_). The public API could change without a major version update before `v1.0.0` release.
|
||||||
|
|
||||||
## Quickstart
|
## Quickstart
|
||||||
|
|
||||||
First, make sure you are running a Redis server locally.
|
Make sure you have Go installed ([download](https://golang.org/dl/)). Version `1.13` or higher is required.
|
||||||
|
|
||||||
|
Initialize your project by creating a folder and then running `go mod init github.com/your/repo` ([learn more](https://blog.golang.org/using-go-modules)) inside the folder. Then install Asynq library with the [`go get`](https://golang.org/cmd/go/#hdr-Add_dependencies_to_current_module_and_install_them) command:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
$ redis-server
|
go get -u github.com/hibiken/asynq
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Make sure you're running a Redis server locally or from a [Docker](https://hub.docker.com/_/redis) container. Version `4.0` or higher is required.
|
||||||
|
|
||||||
Next, write a package that encapsulates task creation and task handling.
|
Next, write a package that encapsulates task creation and task handling.
|
||||||
|
|
||||||
```go
|
```go
|
||||||
@@ -64,23 +72,38 @@ import (
|
|||||||
|
|
||||||
// A list of task types.
|
// A list of task types.
|
||||||
const (
|
const (
|
||||||
EmailDelivery = "email:deliver"
|
TypeEmailDelivery = "email:deliver"
|
||||||
ImageProcessing = "image:process"
|
TypeImageResize = "image:resize"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type EmailDeliveryPayload struct {
|
||||||
|
UserID int
|
||||||
|
TemplateID string
|
||||||
|
}
|
||||||
|
|
||||||
|
type ImageResizePayload struct {
|
||||||
|
SourceURL string
|
||||||
|
}
|
||||||
|
|
||||||
//----------------------------------------------
|
//----------------------------------------------
|
||||||
// Write a function NewXXXTask to create a task.
|
// Write a function NewXXXTask to create a task.
|
||||||
// A task consists of a type and a payload.
|
// A task consists of a type and a payload.
|
||||||
//----------------------------------------------
|
//----------------------------------------------
|
||||||
|
|
||||||
func NewEmailDeliveryTask(userID int, tmplID string) *asynq.Task {
|
func NewEmailDeliveryTask(userID int, tmplID string) (*asynq.Task, error) {
|
||||||
payload := map[string]interface{}{"user_id": userID, "template_id": tmplID}
|
payload, err := json.Marshal(EmailDeliveryPayload{UserID: userID, TemplateID: tmplID})
|
||||||
return asynq.NewTask(EmailDelivery, payload)
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return asynq.NewTask(TypeEmailDelivery, payload), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewImageProcessingTask(src, dst string) *asynq.Task {
|
func NewImageResizeTask(src string) (*asynq.Task, error) {
|
||||||
payload := map[string]interface{}{"src": src, "dst": dst}
|
payload, err := json.Marshal(ImageResizePayload{SourceURL: src})
|
||||||
return asynq.NewTask(ImageProcessing, payload)
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return asynq.NewTask(TypeImageResize, payload), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
//---------------------------------------------------------------
|
//---------------------------------------------------------------
|
||||||
@@ -92,51 +115,42 @@ func NewImageProcessingTask(src, dst string) *asynq.Task {
|
|||||||
//---------------------------------------------------------------
|
//---------------------------------------------------------------
|
||||||
|
|
||||||
func HandleEmailDeliveryTask(ctx context.Context, t *asynq.Task) error {
|
func HandleEmailDeliveryTask(ctx context.Context, t *asynq.Task) error {
|
||||||
userID, err := t.Payload.GetInt("user_id")
|
var p EmailDeliveryPayload
|
||||||
if err != nil {
|
if err := json.Unmarshal(t.Payload(), &p); err != nil {
|
||||||
return err
|
return fmt.Errorf("json.Unmarshal failed: %v: %w", err, asynq.SkipRetry)
|
||||||
}
|
}
|
||||||
tmplID, err := t.Payload.GetString("template_id")
|
log.Printf("Sending Email to User: user_id=%d, template_id=%s", p.UserID, p.TemplateID)
|
||||||
if err != nil {
|
// Email delivery code ...
|
||||||
return err
|
|
||||||
}
|
|
||||||
fmt.Printf("Send Email to User: user_id = %d, template_id = %s\n", userID, tmplID)
|
|
||||||
// Email delivery logic ...
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ImageProcessor implements asynq.Handler interface.
|
// ImageProcessor implements asynq.Handler interface.
|
||||||
type ImageProcesser struct {
|
type ImageProcessor struct {
|
||||||
// ... fields for struct
|
// ... fields for struct
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *ImageProcessor) ProcessTask(ctx context.Context, t *asynq.Task) error {
|
func (processor *ImageProcessor) ProcessTask(ctx context.Context, t *asynq.Task) error {
|
||||||
src, err := t.Payload.GetString("src")
|
var p ImageResizePayload
|
||||||
if err != nil {
|
if err := json.Unmarshal(t.Payload(), &p); err != nil {
|
||||||
return err
|
return fmt.Errorf("json.Unmarshal failed: %v: %w", err, asynq.SkipRetry)
|
||||||
}
|
}
|
||||||
dst, err := t.Payload.GetString("dst")
|
log.Printf("Resizing image: src=%s", p.SourceURL)
|
||||||
if err != nil {
|
// Image resizing code ...
|
||||||
return err
|
|
||||||
}
|
|
||||||
fmt.Printf("Process image: src = %s, dst = %s\n", src, dst)
|
|
||||||
// Image processing logic ...
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewImageProcessor() *ImageProcessor {
|
func NewImageProcessor() *ImageProcessor {
|
||||||
// ... return an instance
|
return &ImageProcessor{}
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
In your web application code, import the above package and use [`Client`](https://pkg.go.dev/github.com/hibiken/asynq?tab=doc#Client) to put tasks on the queue.
|
In your application code, import the above package and use [`Client`](https://pkg.go.dev/github.com/hibiken/asynq?tab=doc#Client) to put tasks on queues.
|
||||||
A task will be processed asynchronously by a background worker as soon as the task gets enqueued.
|
|
||||||
Scheduled tasks will be stored in Redis and will be enqueued at the specified time.
|
|
||||||
|
|
||||||
```go
|
```go
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"log"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/hibiken/asynq"
|
"github.com/hibiken/asynq"
|
||||||
@@ -146,64 +160,70 @@ import (
|
|||||||
const redisAddr = "127.0.0.1:6379"
|
const redisAddr = "127.0.0.1:6379"
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
r := asynq.RedisClientOpt{Addr: redisAddr}
|
client := asynq.NewClient(asynq.RedisClientOpt{Addr: redisAddr})
|
||||||
c := asynq.NewClient(r)
|
defer client.Close()
|
||||||
defer c.Close()
|
|
||||||
|
|
||||||
// ------------------------------------------------------
|
// ------------------------------------------------------
|
||||||
// Example 1: Enqueue task to be processed immediately.
|
// Example 1: Enqueue task to be processed immediately.
|
||||||
// Use (*Client).Enqueue method.
|
// Use (*Client).Enqueue method.
|
||||||
// ------------------------------------------------------
|
// ------------------------------------------------------
|
||||||
|
|
||||||
t := tasks.NewEmailDeliveryTask(42, "some:template:id")
|
task, err := tasks.NewEmailDeliveryTask(42, "some:template:id")
|
||||||
err := c.Enqueue(t)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal("could not enqueue task: %v", err)
|
log.Fatalf("could not create task: %v", err)
|
||||||
}
|
}
|
||||||
|
info, err := client.Enqueue(task)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("could not enqueue task: %v", err)
|
||||||
|
}
|
||||||
|
log.Printf("enqueued task: id=%s queue=%s", info.ID, info.Queue)
|
||||||
|
|
||||||
|
|
||||||
// ------------------------------------------------------------
|
// ------------------------------------------------------------
|
||||||
// Example 2: Schedule task to be processed in the future.
|
// Example 2: Schedule task to be processed in the future.
|
||||||
// Use (*Client).EnqueueIn or (*Client).EnqueueAt.
|
// Use ProcessIn or ProcessAt option.
|
||||||
// ------------------------------------------------------------
|
// ------------------------------------------------------------
|
||||||
|
|
||||||
t = tasks.NewEmailDeliveryTask(42, "other:template:id")
|
info, err = client.Enqueue(task, asynq.ProcessIn(24*time.Hour))
|
||||||
err = c.EnqueueIn(24*time.Hour, t)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal("could not schedule task: %v", err)
|
log.Fatalf("could not schedule task: %v", err)
|
||||||
}
|
}
|
||||||
|
log.Printf("enqueued task: id=%s queue=%s", info.ID, info.Queue)
|
||||||
|
|
||||||
|
|
||||||
// ----------------------------------------------------------------------------
|
// ----------------------------------------------------------------------------
|
||||||
// Example 3: Set options to tune task processing behavior.
|
// Example 3: Set other options to tune task processing behavior.
|
||||||
// Options include MaxRetry, Queue, Timeout, Deadline, Unique etc.
|
// Options include MaxRetry, Queue, Timeout, Deadline, Unique etc.
|
||||||
// ----------------------------------------------------------------------------
|
// ----------------------------------------------------------------------------
|
||||||
|
|
||||||
c.SetDefaultOptions(tasks.ImageProcessing, asynq.MaxRetry(10), asynq.Timeout(time.Minute))
|
client.SetDefaultOptions(tasks.TypeImageResize, asynq.MaxRetry(10), asynq.Timeout(3*time.Minute))
|
||||||
|
|
||||||
t = tasks.NewImageProcessingTask("some/blobstore/url", "other/blobstore/url")
|
task, err = tasks.NewImageResizeTask("https://example.com/myassets/image.jpg")
|
||||||
err = c.Enqueue(t)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal("could not enqueue task: %v", err)
|
log.Fatalf("could not create task: %v", err)
|
||||||
}
|
}
|
||||||
|
info, err = client.Enqueue(task)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("could not enqueue task: %v", err)
|
||||||
|
}
|
||||||
|
log.Printf("enqueued task: id=%s queue=%s", info.ID, info.Queue)
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
// Example 4: Pass options to tune task processing behavior at enqueue time.
|
// Example 4: Pass options to tune task processing behavior at enqueue time.
|
||||||
// Options passed at enqueue time override default ones, if any.
|
// Options passed at enqueue time override default ones.
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
t = tasks.NewImageProcessingTask("some/blobstore/url", "other/blobstore/url")
|
info, err = client.Enqueue(task, asynq.Queue("critical"), asynq.Timeout(30*time.Second))
|
||||||
err = c.Enqueue(t, asynq.Queue("critical"), asynq.Timeout(30*time.Second))
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal("could not enqueue task: %v", err)
|
log.Fatalf("could not enqueue task: %v", err)
|
||||||
}
|
}
|
||||||
|
log.Printf("enqueued task: id=%s queue=%s", info.ID, info.Queue)
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
Next, create a worker server to process these tasks in the background.
|
Next, start a worker server to process these tasks in the background. To start the background workers, use [`Server`](https://pkg.go.dev/github.com/hibiken/asynq?tab=doc#Server) and provide your [`Handler`](https://pkg.go.dev/github.com/hibiken/asynq?tab=doc#Handler) to process the tasks.
|
||||||
To start the background workers, use [`Server`](https://pkg.go.dev/github.com/hibiken/asynq?tab=doc#Server) and provide your [`Handler`](https://pkg.go.dev/github.com/hibiken/asynq?tab=doc#Handler) to process the tasks.
|
|
||||||
|
|
||||||
You can optionally use [`ServeMux`](https://pkg.go.dev/github.com/hibiken/asynq?tab=doc#ServeMux) to create a handler, just as you would with [`"net/http"`](https://golang.org/pkg/net/http/) Handler.
|
You can optionally use [`ServeMux`](https://pkg.go.dev/github.com/hibiken/asynq?tab=doc#ServeMux) to create a handler, just as you would with [`net/http`](https://golang.org/pkg/net/http/) Handler.
|
||||||
|
|
||||||
```go
|
```go
|
||||||
package main
|
package main
|
||||||
@@ -218,9 +238,9 @@ import (
|
|||||||
const redisAddr = "127.0.0.1:6379"
|
const redisAddr = "127.0.0.1:6379"
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
r := asynq.RedisClientOpt{Addr: redisAddr}
|
srv := asynq.NewServer(
|
||||||
|
asynq.RedisClientOpt{Addr: redisAddr},
|
||||||
srv := asynq.NewServer(r, asynq.Config{
|
asynq.Config{
|
||||||
// Specify how many concurrent workers to use
|
// Specify how many concurrent workers to use
|
||||||
Concurrency: 10,
|
Concurrency: 10,
|
||||||
// Optionally specify multiple queues with different priority.
|
// Optionally specify multiple queues with different priority.
|
||||||
@@ -230,12 +250,13 @@ func main() {
|
|||||||
"low": 1,
|
"low": 1,
|
||||||
},
|
},
|
||||||
// See the godoc for other configuration options
|
// See the godoc for other configuration options
|
||||||
})
|
},
|
||||||
|
)
|
||||||
|
|
||||||
// mux maps a type to a handler
|
// mux maps a type to a handler
|
||||||
mux := asynq.NewServeMux()
|
mux := asynq.NewServeMux()
|
||||||
mux.HandleFunc(tasks.EmailDelivery, tasks.HandleEmailDeliveryTask)
|
mux.HandleFunc(tasks.TypeEmailDelivery, tasks.HandleEmailDeliveryTask)
|
||||||
mux.Handle(tasks.ImageProcessing, tasks.NewImageProcessor())
|
mux.Handle(tasks.TypeImageResize, tasks.NewImageProcessor())
|
||||||
// ...register other handlers...
|
// ...register other handlers...
|
||||||
|
|
||||||
if err := srv.Run(mux); err != nil {
|
if err := srv.Run(mux); err != nil {
|
||||||
@@ -244,52 +265,52 @@ func main() {
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
For a more detailed walk-through of the library, see our [Getting Started Guide](https://github.com/hibiken/asynq/wiki/Getting-Started).
|
For a more detailed walk-through of the library, see our [Getting Started](https://github.com/hibiken/asynq/wiki/Getting-Started) guide.
|
||||||
|
|
||||||
To Learn more about `asynq` features and APIs, see our [Wiki](https://github.com/hibiken/asynq/wiki) and [godoc](https://godoc.org/github.com/hibiken/asynq).
|
To learn more about `asynq` features and APIs, see the package [godoc](https://godoc.org/github.com/hibiken/asynq).
|
||||||
|
|
||||||
|
## Web UI
|
||||||
|
|
||||||
|
[Asynqmon](https://github.com/hibiken/asynqmon) is a web based tool for monitoring and administrating Asynq queues and tasks.
|
||||||
|
|
||||||
|
Here's a few screenshots of the Web UI:
|
||||||
|
|
||||||
|
**Queues view**
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
**Tasks view**
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
**Settings and adaptive dark mode**
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
For details on how to use the tool, refer to the tool's [README](https://github.com/hibiken/asynqmon#readme).
|
||||||
|
|
||||||
## Command Line Tool
|
## Command Line Tool
|
||||||
|
|
||||||
Asynq ships with a command line tool to inspect the state of queues and tasks.
|
Asynq ships with a command line tool to inspect the state of queues and tasks.
|
||||||
|
|
||||||
Here's an example of running the `stats` command.
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
For details on how to use the tool, refer to the tool's [README](/tools/asynq/README.md).
|
|
||||||
|
|
||||||
## Installation
|
|
||||||
|
|
||||||
To install `asynq` library, run the following command:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
go get -u github.com/hibiken/asynq
|
|
||||||
```
|
|
||||||
|
|
||||||
To install the CLI tool, run the following command:
|
To install the CLI tool, run the following command:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
go get -u github.com/hibiken/asynq/tools/asynq
|
go get -u github.com/hibiken/asynq/tools/asynq
|
||||||
```
|
```
|
||||||
|
|
||||||
## Requirements
|
Here's an example of running the `asynq stats` command:
|
||||||
|
|
||||||
| Dependency | Version |
|

|
||||||
| -------------------------- | ------- |
|
|
||||||
| [Redis](https://redis.io/) | v2.8+ |
|
For details on how to use the tool, refer to the tool's [README](/tools/asynq/README.md).
|
||||||
| [Go](https://golang.org/) | v1.13+ |
|
|
||||||
|
|
||||||
## Contributing
|
## Contributing
|
||||||
|
|
||||||
We are open to, and grateful for, any contributions (Github issues/pull-requests, feedback on Gitter channel, etc) made by the community.
|
We are open to, and grateful for, any contributions (GitHub issues/PRs, feedback on [Gitter channel](https://gitter.im/go-asynq/community), etc) made by the community.
|
||||||
|
|
||||||
Please see the [Contribution Guide](/CONTRIBUTING.md) before contributing.
|
Please see the [Contribution Guide](/CONTRIBUTING.md) before contributing.
|
||||||
|
|
||||||
## Acknowledgements
|
|
||||||
|
|
||||||
- [Sidekiq](https://github.com/mperham/sidekiq) : Many of the design ideas are taken from sidekiq and its Web UI
|
|
||||||
- [RQ](https://github.com/rq/rq) : Client APIs are inspired by rq library.
|
|
||||||
- [Cobra](https://github.com/spf13/cobra) : Asynq CLI is built with cobra
|
|
||||||
|
|
||||||
## License
|
## License
|
||||||
|
|
||||||
Asynq is released under the MIT license. See [LICENSE](https://github.com/hibiken/asynq/blob/master/LICENSE).
|
Copyright (c) 2019-present [Ken Hibino](https://github.com/hibiken) and [Contributors](https://github.com/hibiken/asynq/graphs/contributors). `Asynq` is free and open-source software licensed under the [MIT License](https://github.com/hibiken/asynq/blob/master/LICENSE). Official logo was created by [Vic Shóstak](https://github.com/koddr) and distributed under [Creative Commons](https://creativecommons.org/publicdomain/zero/1.0/) license (CC0 1.0 Universal).
|
||||||
|
|||||||
340
asynq.go
340
asynq.go
@@ -10,35 +10,163 @@ import (
|
|||||||
"net/url"
|
"net/url"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/go-redis/redis/v7"
|
"github.com/go-redis/redis/v7"
|
||||||
|
"github.com/hibiken/asynq/internal/base"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Task represents a unit of work to be performed.
|
// Task represents a unit of work to be performed.
|
||||||
type Task struct {
|
type Task struct {
|
||||||
// Type indicates the type of task to be performed.
|
// typename indicates the type of task to be performed.
|
||||||
Type string
|
typename string
|
||||||
|
|
||||||
// Payload holds data needed to perform the task.
|
// payload holds data needed to perform the task.
|
||||||
Payload Payload
|
payload []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (t *Task) Type() string { return t.typename }
|
||||||
|
func (t *Task) Payload() []byte { return t.payload }
|
||||||
|
|
||||||
// NewTask returns a new Task given a type name and payload data.
|
// NewTask returns a new Task given a type name and payload data.
|
||||||
//
|
func NewTask(typename string, payload []byte) *Task {
|
||||||
// The payload values must be serializable.
|
|
||||||
func NewTask(typename string, payload map[string]interface{}) *Task {
|
|
||||||
return &Task{
|
return &Task{
|
||||||
Type: typename,
|
typename: typename,
|
||||||
Payload: Payload{payload},
|
payload: payload,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// A TaskInfo describes a task and its metadata.
|
||||||
|
type TaskInfo struct {
|
||||||
|
// ID is the identifier of the task.
|
||||||
|
ID string
|
||||||
|
|
||||||
|
// Queue is the name of the queue in which the task belongs.
|
||||||
|
Queue string
|
||||||
|
|
||||||
|
// Type is the type name of the task.
|
||||||
|
Type string
|
||||||
|
|
||||||
|
// Payload is the payload data of the task.
|
||||||
|
Payload []byte
|
||||||
|
|
||||||
|
// State indicates the task state.
|
||||||
|
State TaskState
|
||||||
|
|
||||||
|
// MaxRetry is the maximum number of times the task can be retried.
|
||||||
|
MaxRetry int
|
||||||
|
|
||||||
|
// Retried is the number of times the task has retried so far.
|
||||||
|
Retried int
|
||||||
|
|
||||||
|
// LastErr is the error message from the last failure.
|
||||||
|
LastErr string
|
||||||
|
|
||||||
|
// LastFailedAt is the time time of the last failure if any.
|
||||||
|
// If the task has no failures, LastFailedAt is zero time (i.e. time.Time{}).
|
||||||
|
LastFailedAt time.Time
|
||||||
|
|
||||||
|
// Timeout is the duration the task can be processed by Handler before being retried,
|
||||||
|
// zero if not specified
|
||||||
|
Timeout time.Duration
|
||||||
|
|
||||||
|
// Deadline is the deadline for the task, zero value if not specified.
|
||||||
|
Deadline time.Time
|
||||||
|
|
||||||
|
// NextProcessAt is the time the task is scheduled to be processed,
|
||||||
|
// zero if not applicable.
|
||||||
|
NextProcessAt time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
func newTaskInfo(msg *base.TaskMessage, state base.TaskState, nextProcessAt time.Time) *TaskInfo {
|
||||||
|
info := TaskInfo{
|
||||||
|
ID: msg.ID.String(),
|
||||||
|
Queue: msg.Queue,
|
||||||
|
Type: msg.Type,
|
||||||
|
Payload: msg.Payload, // Do we need to make a copy?
|
||||||
|
MaxRetry: msg.Retry,
|
||||||
|
Retried: msg.Retried,
|
||||||
|
LastErr: msg.ErrorMsg,
|
||||||
|
Timeout: time.Duration(msg.Timeout) * time.Second,
|
||||||
|
NextProcessAt: nextProcessAt,
|
||||||
|
}
|
||||||
|
if msg.LastFailedAt == 0 {
|
||||||
|
info.LastFailedAt = time.Time{}
|
||||||
|
} else {
|
||||||
|
info.LastFailedAt = time.Unix(msg.LastFailedAt, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
if msg.Deadline == 0 {
|
||||||
|
info.Deadline = time.Time{}
|
||||||
|
} else {
|
||||||
|
info.Deadline = time.Unix(msg.Deadline, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch state {
|
||||||
|
case base.TaskStateActive:
|
||||||
|
info.State = TaskStateActive
|
||||||
|
case base.TaskStatePending:
|
||||||
|
info.State = TaskStatePending
|
||||||
|
case base.TaskStateScheduled:
|
||||||
|
info.State = TaskStateScheduled
|
||||||
|
case base.TaskStateRetry:
|
||||||
|
info.State = TaskStateRetry
|
||||||
|
case base.TaskStateArchived:
|
||||||
|
info.State = TaskStateArchived
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("internal error: unknown state: %d", state))
|
||||||
|
}
|
||||||
|
return &info
|
||||||
|
}
|
||||||
|
|
||||||
|
// TaskState denotes the state of a task.
|
||||||
|
type TaskState int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Indicates that the task is currently being processed by Handler.
|
||||||
|
TaskStateActive TaskState = iota + 1
|
||||||
|
|
||||||
|
// Indicates that the task is ready to be processed by Handler.
|
||||||
|
TaskStatePending
|
||||||
|
|
||||||
|
// Indicates that the task is scheduled to be processed some time in the future.
|
||||||
|
TaskStateScheduled
|
||||||
|
|
||||||
|
// Indicates that the task has previously failed and scheduled to be processed some time in the future.
|
||||||
|
TaskStateRetry
|
||||||
|
|
||||||
|
// Indicates that the task is archived and stored for inspection purposes.
|
||||||
|
TaskStateArchived
|
||||||
|
)
|
||||||
|
|
||||||
|
func (s TaskState) String() string {
|
||||||
|
switch s {
|
||||||
|
case TaskStateActive:
|
||||||
|
return "active"
|
||||||
|
case TaskStatePending:
|
||||||
|
return "pending"
|
||||||
|
case TaskStateScheduled:
|
||||||
|
return "scheduled"
|
||||||
|
case TaskStateRetry:
|
||||||
|
return "retry"
|
||||||
|
case TaskStateArchived:
|
||||||
|
return "archived"
|
||||||
|
}
|
||||||
|
panic("asynq: unknown task state")
|
||||||
|
}
|
||||||
|
|
||||||
// RedisConnOpt is a discriminated union of types that represent Redis connection configuration option.
|
// RedisConnOpt is a discriminated union of types that represent Redis connection configuration option.
|
||||||
//
|
//
|
||||||
// RedisConnOpt represents a sum of following types:
|
// RedisConnOpt represents a sum of following types:
|
||||||
//
|
//
|
||||||
// RedisClientOpt | *RedisClientOpt | RedisFailoverClientOpt | *RedisFailoverClientOpt
|
// - RedisClientOpt
|
||||||
type RedisConnOpt interface{}
|
// - RedisFailoverClientOpt
|
||||||
|
// - RedisClusterClientOpt
|
||||||
|
type RedisConnOpt interface {
|
||||||
|
// MakeRedisClient returns a new redis client instance.
|
||||||
|
// Return value is intentionally opaque to hide the implementation detail of redis client.
|
||||||
|
MakeRedisClient() interface{}
|
||||||
|
}
|
||||||
|
|
||||||
// RedisClientOpt is used to create a redis client that connects
|
// RedisClientOpt is used to create a redis client that connects
|
||||||
// to a redis server directly.
|
// to a redis server directly.
|
||||||
@@ -50,13 +178,38 @@ type RedisClientOpt struct {
|
|||||||
// Redis server address in "host:port" format.
|
// Redis server address in "host:port" format.
|
||||||
Addr string
|
Addr string
|
||||||
|
|
||||||
// Redis server password.
|
// Username to authenticate the current connection when Redis ACLs are used.
|
||||||
|
// See: https://redis.io/commands/auth.
|
||||||
|
Username string
|
||||||
|
|
||||||
|
// Password to authenticate the current connection.
|
||||||
|
// See: https://redis.io/commands/auth.
|
||||||
Password string
|
Password string
|
||||||
|
|
||||||
// Redis DB to select after connecting to a server.
|
// Redis DB to select after connecting to a server.
|
||||||
// See: https://redis.io/commands/select.
|
// See: https://redis.io/commands/select.
|
||||||
DB int
|
DB int
|
||||||
|
|
||||||
|
// Dial timeout for establishing new connections.
|
||||||
|
// Default is 5 seconds.
|
||||||
|
DialTimeout time.Duration
|
||||||
|
|
||||||
|
// Timeout for socket reads.
|
||||||
|
// If timeout is reached, read commands will fail with a timeout error
|
||||||
|
// instead of blocking.
|
||||||
|
//
|
||||||
|
// Use value -1 for no timeout and 0 for default.
|
||||||
|
// Default is 3 seconds.
|
||||||
|
ReadTimeout time.Duration
|
||||||
|
|
||||||
|
// Timeout for socket writes.
|
||||||
|
// If timeout is reached, write commands will fail with a timeout error
|
||||||
|
// instead of blocking.
|
||||||
|
//
|
||||||
|
// Use value -1 for no timeout and 0 for default.
|
||||||
|
// Default is ReadTimout.
|
||||||
|
WriteTimeout time.Duration
|
||||||
|
|
||||||
// Maximum number of socket connections.
|
// Maximum number of socket connections.
|
||||||
// Default is 10 connections per every CPU as reported by runtime.NumCPU.
|
// Default is 10 connections per every CPU as reported by runtime.NumCPU.
|
||||||
PoolSize int
|
PoolSize int
|
||||||
@@ -66,6 +219,21 @@ type RedisClientOpt struct {
|
|||||||
TLSConfig *tls.Config
|
TLSConfig *tls.Config
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (opt RedisClientOpt) MakeRedisClient() interface{} {
|
||||||
|
return redis.NewClient(&redis.Options{
|
||||||
|
Network: opt.Network,
|
||||||
|
Addr: opt.Addr,
|
||||||
|
Username: opt.Username,
|
||||||
|
Password: opt.Password,
|
||||||
|
DB: opt.DB,
|
||||||
|
DialTimeout: opt.DialTimeout,
|
||||||
|
ReadTimeout: opt.ReadTimeout,
|
||||||
|
WriteTimeout: opt.WriteTimeout,
|
||||||
|
PoolSize: opt.PoolSize,
|
||||||
|
TLSConfig: opt.TLSConfig,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// RedisFailoverClientOpt is used to creates a redis client that talks
|
// RedisFailoverClientOpt is used to creates a redis client that talks
|
||||||
// to redis sentinels for service discovery and has an automatic failover
|
// to redis sentinels for service discovery and has an automatic failover
|
||||||
// capability.
|
// capability.
|
||||||
@@ -81,13 +249,38 @@ type RedisFailoverClientOpt struct {
|
|||||||
// Redis sentinel password.
|
// Redis sentinel password.
|
||||||
SentinelPassword string
|
SentinelPassword string
|
||||||
|
|
||||||
// Redis server password.
|
// Username to authenticate the current connection when Redis ACLs are used.
|
||||||
|
// See: https://redis.io/commands/auth.
|
||||||
|
Username string
|
||||||
|
|
||||||
|
// Password to authenticate the current connection.
|
||||||
|
// See: https://redis.io/commands/auth.
|
||||||
Password string
|
Password string
|
||||||
|
|
||||||
// Redis DB to select after connecting to a server.
|
// Redis DB to select after connecting to a server.
|
||||||
// See: https://redis.io/commands/select.
|
// See: https://redis.io/commands/select.
|
||||||
DB int
|
DB int
|
||||||
|
|
||||||
|
// Dial timeout for establishing new connections.
|
||||||
|
// Default is 5 seconds.
|
||||||
|
DialTimeout time.Duration
|
||||||
|
|
||||||
|
// Timeout for socket reads.
|
||||||
|
// If timeout is reached, read commands will fail with a timeout error
|
||||||
|
// instead of blocking.
|
||||||
|
//
|
||||||
|
// Use value -1 for no timeout and 0 for default.
|
||||||
|
// Default is 3 seconds.
|
||||||
|
ReadTimeout time.Duration
|
||||||
|
|
||||||
|
// Timeout for socket writes.
|
||||||
|
// If timeout is reached, write commands will fail with a timeout error
|
||||||
|
// instead of blocking.
|
||||||
|
//
|
||||||
|
// Use value -1 for no timeout and 0 for default.
|
||||||
|
// Default is ReadTimeout
|
||||||
|
WriteTimeout time.Duration
|
||||||
|
|
||||||
// Maximum number of socket connections.
|
// Maximum number of socket connections.
|
||||||
// Default is 10 connections per every CPU as reported by runtime.NumCPU.
|
// Default is 10 connections per every CPU as reported by runtime.NumCPU.
|
||||||
PoolSize int
|
PoolSize int
|
||||||
@@ -97,6 +290,79 @@ type RedisFailoverClientOpt struct {
|
|||||||
TLSConfig *tls.Config
|
TLSConfig *tls.Config
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (opt RedisFailoverClientOpt) MakeRedisClient() interface{} {
|
||||||
|
return redis.NewFailoverClient(&redis.FailoverOptions{
|
||||||
|
MasterName: opt.MasterName,
|
||||||
|
SentinelAddrs: opt.SentinelAddrs,
|
||||||
|
SentinelPassword: opt.SentinelPassword,
|
||||||
|
Username: opt.Username,
|
||||||
|
Password: opt.Password,
|
||||||
|
DB: opt.DB,
|
||||||
|
DialTimeout: opt.DialTimeout,
|
||||||
|
ReadTimeout: opt.ReadTimeout,
|
||||||
|
WriteTimeout: opt.WriteTimeout,
|
||||||
|
PoolSize: opt.PoolSize,
|
||||||
|
TLSConfig: opt.TLSConfig,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// RedisClusterClientOpt is used to creates a redis client that connects to
|
||||||
|
// redis cluster.
|
||||||
|
type RedisClusterClientOpt struct {
|
||||||
|
// A seed list of host:port addresses of cluster nodes.
|
||||||
|
Addrs []string
|
||||||
|
|
||||||
|
// The maximum number of retries before giving up.
|
||||||
|
// Command is retried on network errors and MOVED/ASK redirects.
|
||||||
|
// Default is 8 retries.
|
||||||
|
MaxRedirects int
|
||||||
|
|
||||||
|
// Username to authenticate the current connection when Redis ACLs are used.
|
||||||
|
// See: https://redis.io/commands/auth.
|
||||||
|
Username string
|
||||||
|
|
||||||
|
// Password to authenticate the current connection.
|
||||||
|
// See: https://redis.io/commands/auth.
|
||||||
|
Password string
|
||||||
|
|
||||||
|
// Dial timeout for establishing new connections.
|
||||||
|
// Default is 5 seconds.
|
||||||
|
DialTimeout time.Duration
|
||||||
|
|
||||||
|
// Timeout for socket reads.
|
||||||
|
// If timeout is reached, read commands will fail with a timeout error
|
||||||
|
// instead of blocking.
|
||||||
|
//
|
||||||
|
// Use value -1 for no timeout and 0 for default.
|
||||||
|
// Default is 3 seconds.
|
||||||
|
ReadTimeout time.Duration
|
||||||
|
|
||||||
|
// Timeout for socket writes.
|
||||||
|
// If timeout is reached, write commands will fail with a timeout error
|
||||||
|
// instead of blocking.
|
||||||
|
//
|
||||||
|
// Use value -1 for no timeout and 0 for default.
|
||||||
|
// Default is ReadTimeout.
|
||||||
|
WriteTimeout time.Duration
|
||||||
|
|
||||||
|
// TLS Config used to connect to a server.
|
||||||
|
// TLS will be negotiated only if this field is set.
|
||||||
|
TLSConfig *tls.Config
|
||||||
|
}
|
||||||
|
|
||||||
|
func (opt RedisClusterClientOpt) MakeRedisClient() interface{} {
|
||||||
|
return redis.NewClusterClient(&redis.ClusterOptions{
|
||||||
|
Addrs: opt.Addrs,
|
||||||
|
MaxRedirects: opt.MaxRedirects,
|
||||||
|
Username: opt.Username,
|
||||||
|
Password: opt.Password,
|
||||||
|
DialTimeout: opt.DialTimeout,
|
||||||
|
ReadTimeout: opt.ReadTimeout,
|
||||||
|
WriteTimeout: opt.WriteTimeout,
|
||||||
|
TLSConfig: opt.TLSConfig,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// ParseRedisURI parses redis uri string and returns RedisConnOpt if uri is valid.
|
// ParseRedisURI parses redis uri string and returns RedisConnOpt if uri is valid.
|
||||||
// It returns a non-nil error if uri cannot be parsed.
|
// It returns a non-nil error if uri cannot be parsed.
|
||||||
//
|
//
|
||||||
@@ -169,51 +435,3 @@ func parseRedisSentinelURI(u *url.URL) (RedisConnOpt, error) {
|
|||||||
}
|
}
|
||||||
return RedisFailoverClientOpt{MasterName: master, SentinelAddrs: addrs, Password: password}, nil
|
return RedisFailoverClientOpt{MasterName: master, SentinelAddrs: addrs, Password: password}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// createRedisClient returns a redis client given a redis connection configuration.
|
|
||||||
//
|
|
||||||
// Passing an unexpected type as a RedisConnOpt argument will cause panic.
|
|
||||||
func createRedisClient(r RedisConnOpt) *redis.Client {
|
|
||||||
switch r := r.(type) {
|
|
||||||
case *RedisClientOpt:
|
|
||||||
return redis.NewClient(&redis.Options{
|
|
||||||
Network: r.Network,
|
|
||||||
Addr: r.Addr,
|
|
||||||
Password: r.Password,
|
|
||||||
DB: r.DB,
|
|
||||||
PoolSize: r.PoolSize,
|
|
||||||
TLSConfig: r.TLSConfig,
|
|
||||||
})
|
|
||||||
case RedisClientOpt:
|
|
||||||
return redis.NewClient(&redis.Options{
|
|
||||||
Network: r.Network,
|
|
||||||
Addr: r.Addr,
|
|
||||||
Password: r.Password,
|
|
||||||
DB: r.DB,
|
|
||||||
PoolSize: r.PoolSize,
|
|
||||||
TLSConfig: r.TLSConfig,
|
|
||||||
})
|
|
||||||
case *RedisFailoverClientOpt:
|
|
||||||
return redis.NewFailoverClient(&redis.FailoverOptions{
|
|
||||||
MasterName: r.MasterName,
|
|
||||||
SentinelAddrs: r.SentinelAddrs,
|
|
||||||
SentinelPassword: r.SentinelPassword,
|
|
||||||
Password: r.Password,
|
|
||||||
DB: r.DB,
|
|
||||||
PoolSize: r.PoolSize,
|
|
||||||
TLSConfig: r.TLSConfig,
|
|
||||||
})
|
|
||||||
case RedisFailoverClientOpt:
|
|
||||||
return redis.NewFailoverClient(&redis.FailoverOptions{
|
|
||||||
MasterName: r.MasterName,
|
|
||||||
SentinelAddrs: r.SentinelAddrs,
|
|
||||||
SentinelPassword: r.SentinelPassword,
|
|
||||||
Password: r.Password,
|
|
||||||
DB: r.DB,
|
|
||||||
PoolSize: r.PoolSize,
|
|
||||||
TLSConfig: r.TLSConfig,
|
|
||||||
})
|
|
||||||
default:
|
|
||||||
panic(fmt.Sprintf("asynq: unexpected type %T for RedisConnOpt", r))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ package asynq
|
|||||||
import (
|
import (
|
||||||
"flag"
|
"flag"
|
||||||
"sort"
|
"sort"
|
||||||
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/go-redis/redis/v7"
|
"github.com/go-redis/redis/v7"
|
||||||
@@ -24,6 +25,9 @@ var (
|
|||||||
redisAddr string
|
redisAddr string
|
||||||
redisDB int
|
redisDB int
|
||||||
|
|
||||||
|
useRedisCluster bool
|
||||||
|
redisClusterAddrs string // comma-separated list of host:port
|
||||||
|
|
||||||
testLogLevel = FatalLevel
|
testLogLevel = FatalLevel
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -32,27 +36,56 @@ var testLogger *log.Logger
|
|||||||
func init() {
|
func init() {
|
||||||
flag.StringVar(&redisAddr, "redis_addr", "localhost:6379", "redis address to use in testing")
|
flag.StringVar(&redisAddr, "redis_addr", "localhost:6379", "redis address to use in testing")
|
||||||
flag.IntVar(&redisDB, "redis_db", 14, "redis db number to use in testing")
|
flag.IntVar(&redisDB, "redis_db", 14, "redis db number to use in testing")
|
||||||
|
flag.BoolVar(&useRedisCluster, "redis_cluster", false, "use redis cluster as a broker in testing")
|
||||||
|
flag.StringVar(&redisClusterAddrs, "redis_cluster_addrs", "localhost:7000,localhost:7001,localhost:7002", "comma separated list of redis server addresses")
|
||||||
flag.Var(&testLogLevel, "loglevel", "log level to use in testing")
|
flag.Var(&testLogLevel, "loglevel", "log level to use in testing")
|
||||||
|
|
||||||
testLogger = log.NewLogger(nil)
|
testLogger = log.NewLogger(nil)
|
||||||
testLogger.SetLevel(toInternalLogLevel(testLogLevel))
|
testLogger.SetLevel(toInternalLogLevel(testLogLevel))
|
||||||
}
|
}
|
||||||
|
|
||||||
func setup(tb testing.TB) *redis.Client {
|
func setup(tb testing.TB) (r redis.UniversalClient) {
|
||||||
tb.Helper()
|
tb.Helper()
|
||||||
r := redis.NewClient(&redis.Options{
|
if useRedisCluster {
|
||||||
|
addrs := strings.Split(redisClusterAddrs, ",")
|
||||||
|
if len(addrs) == 0 {
|
||||||
|
tb.Fatal("No redis cluster addresses provided. Please set addresses using --redis_cluster_addrs flag.")
|
||||||
|
}
|
||||||
|
r = redis.NewClusterClient(&redis.ClusterOptions{
|
||||||
|
Addrs: addrs,
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
r = redis.NewClient(&redis.Options{
|
||||||
Addr: redisAddr,
|
Addr: redisAddr,
|
||||||
DB: redisDB,
|
DB: redisDB,
|
||||||
})
|
})
|
||||||
|
}
|
||||||
// Start each test with a clean slate.
|
// Start each test with a clean slate.
|
||||||
h.FlushDB(tb, r)
|
h.FlushDB(tb, r)
|
||||||
return r
|
return r
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getRedisConnOpt(tb testing.TB) RedisConnOpt {
|
||||||
|
tb.Helper()
|
||||||
|
if useRedisCluster {
|
||||||
|
addrs := strings.Split(redisClusterAddrs, ",")
|
||||||
|
if len(addrs) == 0 {
|
||||||
|
tb.Fatal("No redis cluster addresses provided. Please set addresses using --redis_cluster_addrs flag.")
|
||||||
|
}
|
||||||
|
return RedisClusterClientOpt{
|
||||||
|
Addrs: addrs,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return RedisClientOpt{
|
||||||
|
Addr: redisAddr,
|
||||||
|
DB: redisDB,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
var sortTaskOpt = cmp.Transformer("SortMsg", func(in []*Task) []*Task {
|
var sortTaskOpt = cmp.Transformer("SortMsg", func(in []*Task) []*Task {
|
||||||
out := append([]*Task(nil), in...) // Copy input to avoid mutating it
|
out := append([]*Task(nil), in...) // Copy input to avoid mutating it
|
||||||
sort.Slice(out, func(i, j int) bool {
|
sort.Slice(out, func(i, j int) bool {
|
||||||
return out[i].Type < out[j].Type
|
return out[i].Type() < out[j].Type()
|
||||||
})
|
})
|
||||||
return out
|
return out
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -6,37 +6,46 @@ package asynq
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/rand"
|
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
h "github.com/hibiken/asynq/internal/asynqtest"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Creates a new task of type "task<n>" with payload {"data": n}.
|
||||||
|
func makeTask(n int) *Task {
|
||||||
|
b, err := json.Marshal(map[string]int{"data": n})
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return NewTask(fmt.Sprintf("task%d", n), b)
|
||||||
|
}
|
||||||
|
|
||||||
// Simple E2E Benchmark testing with no scheduled tasks and retries.
|
// Simple E2E Benchmark testing with no scheduled tasks and retries.
|
||||||
func BenchmarkEndToEndSimple(b *testing.B) {
|
func BenchmarkEndToEndSimple(b *testing.B) {
|
||||||
const count = 100000
|
const count = 100000
|
||||||
for n := 0; n < b.N; n++ {
|
for n := 0; n < b.N; n++ {
|
||||||
b.StopTimer() // begin setup
|
b.StopTimer() // begin setup
|
||||||
setup(b)
|
setup(b)
|
||||||
redis := &RedisClientOpt{
|
redis := getRedisConnOpt(b)
|
||||||
Addr: redisAddr,
|
|
||||||
DB: redisDB,
|
|
||||||
}
|
|
||||||
client := NewClient(redis)
|
client := NewClient(redis)
|
||||||
srv := NewServer(redis, Config{
|
srv := NewServer(redis, Config{
|
||||||
Concurrency: 10,
|
Concurrency: 10,
|
||||||
RetryDelayFunc: func(n int, err error, t *Task) time.Duration {
|
RetryDelayFunc: func(n int, err error, t *Task) time.Duration {
|
||||||
return time.Second
|
return time.Second
|
||||||
},
|
},
|
||||||
|
LogLevel: testLogLevel,
|
||||||
})
|
})
|
||||||
// Create a bunch of tasks
|
// Create a bunch of tasks
|
||||||
for i := 0; i < count; i++ {
|
for i := 0; i < count; i++ {
|
||||||
t := NewTask(fmt.Sprintf("task%d", i), map[string]interface{}{"data": i})
|
if _, err := client.Enqueue(makeTask(i)); err != nil {
|
||||||
if err := client.Enqueue(t); err != nil {
|
|
||||||
b.Fatalf("could not enqueue a task: %v", err)
|
b.Fatalf("could not enqueue a task: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
client.Close()
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
wg.Add(count)
|
wg.Add(count)
|
||||||
@@ -60,38 +69,47 @@ func BenchmarkEndToEnd(b *testing.B) {
|
|||||||
const count = 100000
|
const count = 100000
|
||||||
for n := 0; n < b.N; n++ {
|
for n := 0; n < b.N; n++ {
|
||||||
b.StopTimer() // begin setup
|
b.StopTimer() // begin setup
|
||||||
rand.Seed(time.Now().UnixNano())
|
|
||||||
setup(b)
|
setup(b)
|
||||||
redis := &RedisClientOpt{
|
redis := getRedisConnOpt(b)
|
||||||
Addr: redisAddr,
|
|
||||||
DB: redisDB,
|
|
||||||
}
|
|
||||||
client := NewClient(redis)
|
client := NewClient(redis)
|
||||||
srv := NewServer(redis, Config{
|
srv := NewServer(redis, Config{
|
||||||
Concurrency: 10,
|
Concurrency: 10,
|
||||||
RetryDelayFunc: func(n int, err error, t *Task) time.Duration {
|
RetryDelayFunc: func(n int, err error, t *Task) time.Duration {
|
||||||
return time.Second
|
return time.Second
|
||||||
},
|
},
|
||||||
|
LogLevel: testLogLevel,
|
||||||
})
|
})
|
||||||
// Create a bunch of tasks
|
// Create a bunch of tasks
|
||||||
for i := 0; i < count; i++ {
|
for i := 0; i < count; i++ {
|
||||||
t := NewTask(fmt.Sprintf("task%d", i), map[string]interface{}{"data": i})
|
if _, err := client.Enqueue(makeTask(i)); err != nil {
|
||||||
if err := client.Enqueue(t); err != nil {
|
|
||||||
b.Fatalf("could not enqueue a task: %v", err)
|
b.Fatalf("could not enqueue a task: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for i := 0; i < count; i++ {
|
for i := 0; i < count; i++ {
|
||||||
t := NewTask(fmt.Sprintf("scheduled%d", i), map[string]interface{}{"data": i})
|
if _, err := client.Enqueue(makeTask(i), ProcessIn(1*time.Second)); err != nil {
|
||||||
if err := client.EnqueueAt(time.Now().Add(time.Second), t); err != nil {
|
|
||||||
b.Fatalf("could not enqueue a task: %v", err)
|
b.Fatalf("could not enqueue a task: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
client.Close()
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
wg.Add(count * 2)
|
wg.Add(count * 2)
|
||||||
handler := func(ctx context.Context, t *Task) error {
|
handler := func(ctx context.Context, t *Task) error {
|
||||||
// randomly fail 1% of tasks
|
var p map[string]int
|
||||||
if rand.Intn(100) == 1 {
|
if err := json.Unmarshal(t.Payload(), &p); err != nil {
|
||||||
|
b.Logf("internal error: %v", err)
|
||||||
|
}
|
||||||
|
n, ok := p["data"]
|
||||||
|
if !ok {
|
||||||
|
n = 1
|
||||||
|
b.Logf("internal error: could not get data from payload")
|
||||||
|
}
|
||||||
|
retried, ok := GetRetryCount(ctx)
|
||||||
|
if !ok {
|
||||||
|
b.Logf("internal error: could not get retry count from context")
|
||||||
|
}
|
||||||
|
// Fail 1% of tasks for the first attempt.
|
||||||
|
if retried == 0 && n%100 == 0 {
|
||||||
return fmt.Errorf(":(")
|
return fmt.Errorf(":(")
|
||||||
}
|
}
|
||||||
wg.Done()
|
wg.Done()
|
||||||
@@ -119,10 +137,7 @@ func BenchmarkEndToEndMultipleQueues(b *testing.B) {
|
|||||||
for n := 0; n < b.N; n++ {
|
for n := 0; n < b.N; n++ {
|
||||||
b.StopTimer() // begin setup
|
b.StopTimer() // begin setup
|
||||||
setup(b)
|
setup(b)
|
||||||
redis := &RedisClientOpt{
|
redis := getRedisConnOpt(b)
|
||||||
Addr: redisAddr,
|
|
||||||
DB: redisDB,
|
|
||||||
}
|
|
||||||
client := NewClient(redis)
|
client := NewClient(redis)
|
||||||
srv := NewServer(redis, Config{
|
srv := NewServer(redis, Config{
|
||||||
Concurrency: 10,
|
Concurrency: 10,
|
||||||
@@ -131,26 +146,25 @@ func BenchmarkEndToEndMultipleQueues(b *testing.B) {
|
|||||||
"default": 3,
|
"default": 3,
|
||||||
"low": 1,
|
"low": 1,
|
||||||
},
|
},
|
||||||
|
LogLevel: testLogLevel,
|
||||||
})
|
})
|
||||||
// Create a bunch of tasks
|
// Create a bunch of tasks
|
||||||
for i := 0; i < highCount; i++ {
|
for i := 0; i < highCount; i++ {
|
||||||
t := NewTask(fmt.Sprintf("task%d", i), map[string]interface{}{"data": i})
|
if _, err := client.Enqueue(makeTask(i), Queue("high")); err != nil {
|
||||||
if err := client.Enqueue(t, Queue("high")); err != nil {
|
|
||||||
b.Fatalf("could not enqueue a task: %v", err)
|
b.Fatalf("could not enqueue a task: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for i := 0; i < defaultCount; i++ {
|
for i := 0; i < defaultCount; i++ {
|
||||||
t := NewTask(fmt.Sprintf("task%d", i), map[string]interface{}{"data": i})
|
if _, err := client.Enqueue(makeTask(i)); err != nil {
|
||||||
if err := client.Enqueue(t); err != nil {
|
|
||||||
b.Fatalf("could not enqueue a task: %v", err)
|
b.Fatalf("could not enqueue a task: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for i := 0; i < lowCount; i++ {
|
for i := 0; i < lowCount; i++ {
|
||||||
t := NewTask(fmt.Sprintf("task%d", i), map[string]interface{}{"data": i})
|
if _, err := client.Enqueue(makeTask(i), Queue("low")); err != nil {
|
||||||
if err := client.Enqueue(t, Queue("low")); err != nil {
|
|
||||||
b.Fatalf("could not enqueue a task: %v", err)
|
b.Fatalf("could not enqueue a task: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
client.Close()
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
wg.Add(highCount + defaultCount + lowCount)
|
wg.Add(highCount + defaultCount + lowCount)
|
||||||
@@ -168,3 +182,58 @@ func BenchmarkEndToEndMultipleQueues(b *testing.B) {
|
|||||||
b.StartTimer() // end teardown
|
b.StartTimer() // end teardown
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// E2E benchmark to check client enqueue operation performs correctly,
|
||||||
|
// while server is busy processing tasks.
|
||||||
|
func BenchmarkClientWhileServerRunning(b *testing.B) {
|
||||||
|
const count = 10000
|
||||||
|
for n := 0; n < b.N; n++ {
|
||||||
|
b.StopTimer() // begin setup
|
||||||
|
setup(b)
|
||||||
|
redis := getRedisConnOpt(b)
|
||||||
|
client := NewClient(redis)
|
||||||
|
srv := NewServer(redis, Config{
|
||||||
|
Concurrency: 10,
|
||||||
|
RetryDelayFunc: func(n int, err error, t *Task) time.Duration {
|
||||||
|
return time.Second
|
||||||
|
},
|
||||||
|
LogLevel: testLogLevel,
|
||||||
|
})
|
||||||
|
// Enqueue 10,000 tasks.
|
||||||
|
for i := 0; i < count; i++ {
|
||||||
|
if _, err := client.Enqueue(makeTask(i)); err != nil {
|
||||||
|
b.Fatalf("could not enqueue a task: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Schedule 10,000 tasks.
|
||||||
|
for i := 0; i < count; i++ {
|
||||||
|
if _, err := client.Enqueue(makeTask(i), ProcessIn(1*time.Second)); err != nil {
|
||||||
|
b.Fatalf("could not enqueue a task: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
handler := func(ctx context.Context, t *Task) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
srv.Start(HandlerFunc(handler))
|
||||||
|
|
||||||
|
b.StartTimer() // end setup
|
||||||
|
|
||||||
|
b.Log("Starting enqueueing")
|
||||||
|
enqueued := 0
|
||||||
|
for enqueued < 100000 {
|
||||||
|
t := NewTask(fmt.Sprintf("enqueued%d", enqueued), h.JSON(map[string]interface{}{"data": enqueued}))
|
||||||
|
if _, err := client.Enqueue(t); err != nil {
|
||||||
|
b.Logf("could not enqueue task %d: %v", enqueued, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
enqueued++
|
||||||
|
}
|
||||||
|
b.Logf("Finished enqueueing %d tasks", enqueued)
|
||||||
|
|
||||||
|
b.StopTimer() // begin teardown
|
||||||
|
srv.Stop()
|
||||||
|
client.Close()
|
||||||
|
b.StartTimer() // end teardown
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
271
client.go
271
client.go
@@ -5,16 +5,16 @@
|
|||||||
package asynq
|
package asynq
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"sort"
|
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/go-redis/redis/v7"
|
||||||
|
"github.com/google/uuid"
|
||||||
"github.com/hibiken/asynq/internal/base"
|
"github.com/hibiken/asynq/internal/base"
|
||||||
|
"github.com/hibiken/asynq/internal/errors"
|
||||||
"github.com/hibiken/asynq/internal/rdb"
|
"github.com/hibiken/asynq/internal/rdb"
|
||||||
"github.com/rs/xid"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// A Client is responsible for scheduling tasks.
|
// A Client is responsible for scheduling tasks.
|
||||||
@@ -29,17 +29,42 @@ type Client struct {
|
|||||||
rdb *rdb.RDB
|
rdb *rdb.RDB
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewClient and returns a new Client given a redis connection option.
|
// NewClient returns a new Client instance given a redis connection option.
|
||||||
func NewClient(r RedisConnOpt) *Client {
|
func NewClient(r RedisConnOpt) *Client {
|
||||||
rdb := rdb.NewRDB(createRedisClient(r))
|
c, ok := r.MakeRedisClient().(redis.UniversalClient)
|
||||||
|
if !ok {
|
||||||
|
panic(fmt.Sprintf("asynq: unsupported RedisConnOpt type %T", r))
|
||||||
|
}
|
||||||
|
rdb := rdb.NewRDB(c)
|
||||||
return &Client{
|
return &Client{
|
||||||
opts: make(map[string][]Option),
|
opts: make(map[string][]Option),
|
||||||
rdb: rdb,
|
rdb: rdb,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type OptionType int
|
||||||
|
|
||||||
|
const (
|
||||||
|
MaxRetryOpt OptionType = iota
|
||||||
|
QueueOpt
|
||||||
|
TimeoutOpt
|
||||||
|
DeadlineOpt
|
||||||
|
UniqueOpt
|
||||||
|
ProcessAtOpt
|
||||||
|
ProcessInOpt
|
||||||
|
)
|
||||||
|
|
||||||
// Option specifies the task processing behavior.
|
// Option specifies the task processing behavior.
|
||||||
type Option interface{}
|
type Option interface {
|
||||||
|
// String returns a string representation of the option.
|
||||||
|
String() string
|
||||||
|
|
||||||
|
// Type describes the type of the option.
|
||||||
|
Type() OptionType
|
||||||
|
|
||||||
|
// Value returns a value used to create this option.
|
||||||
|
Value() interface{}
|
||||||
|
}
|
||||||
|
|
||||||
// Internal option representations.
|
// Internal option representations.
|
||||||
type (
|
type (
|
||||||
@@ -48,6 +73,8 @@ type (
|
|||||||
timeoutOption time.Duration
|
timeoutOption time.Duration
|
||||||
deadlineOption time.Time
|
deadlineOption time.Time
|
||||||
uniqueOption time.Duration
|
uniqueOption time.Duration
|
||||||
|
processAtOption time.Time
|
||||||
|
processInOption time.Duration
|
||||||
)
|
)
|
||||||
|
|
||||||
// MaxRetry returns an option to specify the max number of times
|
// MaxRetry returns an option to specify the max number of times
|
||||||
@@ -61,25 +88,51 @@ func MaxRetry(n int) Option {
|
|||||||
return retryOption(n)
|
return retryOption(n)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (n retryOption) String() string { return fmt.Sprintf("MaxRetry(%d)", int(n)) }
|
||||||
|
func (n retryOption) Type() OptionType { return MaxRetryOpt }
|
||||||
|
func (n retryOption) Value() interface{} { return int(n) }
|
||||||
|
|
||||||
// Queue returns an option to specify the queue to enqueue the task into.
|
// Queue returns an option to specify the queue to enqueue the task into.
|
||||||
//
|
func Queue(qname string) Option {
|
||||||
// Queue name is case-insensitive and the lowercased version is used.
|
return queueOption(qname)
|
||||||
func Queue(name string) Option {
|
|
||||||
return queueOption(strings.ToLower(name))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (qname queueOption) String() string { return fmt.Sprintf("Queue(%q)", string(qname)) }
|
||||||
|
func (qname queueOption) Type() OptionType { return QueueOpt }
|
||||||
|
func (qname queueOption) Value() interface{} { return string(qname) }
|
||||||
|
|
||||||
// Timeout returns an option to specify how long a task may run.
|
// Timeout returns an option to specify how long a task may run.
|
||||||
|
// If the timeout elapses before the Handler returns, then the task
|
||||||
|
// will be retried.
|
||||||
//
|
//
|
||||||
// Zero duration means no limit.
|
// Zero duration means no limit.
|
||||||
|
//
|
||||||
|
// If there's a conflicting Deadline option, whichever comes earliest
|
||||||
|
// will be used.
|
||||||
func Timeout(d time.Duration) Option {
|
func Timeout(d time.Duration) Option {
|
||||||
return timeoutOption(d)
|
return timeoutOption(d)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d timeoutOption) String() string { return fmt.Sprintf("Timeout(%v)", time.Duration(d)) }
|
||||||
|
func (d timeoutOption) Type() OptionType { return TimeoutOpt }
|
||||||
|
func (d timeoutOption) Value() interface{} { return time.Duration(d) }
|
||||||
|
|
||||||
// Deadline returns an option to specify the deadline for the given task.
|
// Deadline returns an option to specify the deadline for the given task.
|
||||||
|
// If it reaches the deadline before the Handler returns, then the task
|
||||||
|
// will be retried.
|
||||||
|
//
|
||||||
|
// If there's a conflicting Timeout option, whichever comes earliest
|
||||||
|
// will be used.
|
||||||
func Deadline(t time.Time) Option {
|
func Deadline(t time.Time) Option {
|
||||||
return deadlineOption(t)
|
return deadlineOption(t)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (t deadlineOption) String() string {
|
||||||
|
return fmt.Sprintf("Deadline(%v)", time.Time(t).Format(time.UnixDate))
|
||||||
|
}
|
||||||
|
func (t deadlineOption) Type() OptionType { return DeadlineOpt }
|
||||||
|
func (t deadlineOption) Value() interface{} { return time.Time(t) }
|
||||||
|
|
||||||
// Unique returns an option to enqueue a task only if the given task is unique.
|
// Unique returns an option to enqueue a task only if the given task is unique.
|
||||||
// Task enqueued with this option is guaranteed to be unique within the given ttl.
|
// Task enqueued with this option is guaranteed to be unique within the given ttl.
|
||||||
// Once the task gets processed successfully or once the TTL has expired, another task with the same uniqueness may be enqueued.
|
// Once the task gets processed successfully or once the TTL has expired, another task with the same uniqueness may be enqueued.
|
||||||
@@ -93,6 +146,34 @@ func Unique(ttl time.Duration) Option {
|
|||||||
return uniqueOption(ttl)
|
return uniqueOption(ttl)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (ttl uniqueOption) String() string { return fmt.Sprintf("Unique(%v)", time.Duration(ttl)) }
|
||||||
|
func (ttl uniqueOption) Type() OptionType { return UniqueOpt }
|
||||||
|
func (ttl uniqueOption) Value() interface{} { return time.Duration(ttl) }
|
||||||
|
|
||||||
|
// ProcessAt returns an option to specify when to process the given task.
|
||||||
|
//
|
||||||
|
// If there's a conflicting ProcessIn option, the last option passed to Enqueue overrides the others.
|
||||||
|
func ProcessAt(t time.Time) Option {
|
||||||
|
return processAtOption(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t processAtOption) String() string {
|
||||||
|
return fmt.Sprintf("ProcessAt(%v)", time.Time(t).Format(time.UnixDate))
|
||||||
|
}
|
||||||
|
func (t processAtOption) Type() OptionType { return ProcessAtOpt }
|
||||||
|
func (t processAtOption) Value() interface{} { return time.Time(t) }
|
||||||
|
|
||||||
|
// ProcessIn returns an option to specify when to process the given task relative to the current time.
|
||||||
|
//
|
||||||
|
// If there's a conflicting ProcessAt option, the last option passed to Enqueue overrides the others.
|
||||||
|
func ProcessIn(d time.Duration) Option {
|
||||||
|
return processInOption(d)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d processInOption) String() string { return fmt.Sprintf("ProcessIn(%v)", time.Duration(d)) }
|
||||||
|
func (d processInOption) Type() OptionType { return ProcessInOpt }
|
||||||
|
func (d processInOption) Value() interface{} { return time.Duration(d) }
|
||||||
|
|
||||||
// ErrDuplicateTask indicates that the given task could not be enqueued since it's a duplicate of another task.
|
// ErrDuplicateTask indicates that the given task could not be enqueued since it's a duplicate of another task.
|
||||||
//
|
//
|
||||||
// ErrDuplicateTask error only applies to tasks enqueued with a Unique option.
|
// ErrDuplicateTask error only applies to tasks enqueued with a Unique option.
|
||||||
@@ -104,69 +185,61 @@ type option struct {
|
|||||||
timeout time.Duration
|
timeout time.Duration
|
||||||
deadline time.Time
|
deadline time.Time
|
||||||
uniqueTTL time.Duration
|
uniqueTTL time.Duration
|
||||||
|
processAt time.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
func composeOptions(opts ...Option) option {
|
// composeOptions merges user provided options into the default options
|
||||||
|
// and returns the composed option.
|
||||||
|
// It also validates the user provided options and returns an error if any of
|
||||||
|
// the user provided options fail the validations.
|
||||||
|
func composeOptions(opts ...Option) (option, error) {
|
||||||
res := option{
|
res := option{
|
||||||
retry: defaultMaxRetry,
|
retry: defaultMaxRetry,
|
||||||
queue: base.DefaultQueueName,
|
queue: base.DefaultQueueName,
|
||||||
timeout: 0,
|
timeout: 0, // do not set to deafultTimeout here
|
||||||
deadline: time.Time{},
|
deadline: time.Time{},
|
||||||
|
processAt: time.Now(),
|
||||||
}
|
}
|
||||||
for _, opt := range opts {
|
for _, opt := range opts {
|
||||||
switch opt := opt.(type) {
|
switch opt := opt.(type) {
|
||||||
case retryOption:
|
case retryOption:
|
||||||
res.retry = int(opt)
|
res.retry = int(opt)
|
||||||
case queueOption:
|
case queueOption:
|
||||||
res.queue = string(opt)
|
qname := string(opt)
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
return option{}, err
|
||||||
|
}
|
||||||
|
res.queue = qname
|
||||||
case timeoutOption:
|
case timeoutOption:
|
||||||
res.timeout = time.Duration(opt)
|
res.timeout = time.Duration(opt)
|
||||||
case deadlineOption:
|
case deadlineOption:
|
||||||
res.deadline = time.Time(opt)
|
res.deadline = time.Time(opt)
|
||||||
case uniqueOption:
|
case uniqueOption:
|
||||||
res.uniqueTTL = time.Duration(opt)
|
res.uniqueTTL = time.Duration(opt)
|
||||||
|
case processAtOption:
|
||||||
|
res.processAt = time.Time(opt)
|
||||||
|
case processInOption:
|
||||||
|
res.processAt = time.Now().Add(time.Duration(opt))
|
||||||
default:
|
default:
|
||||||
// ignore unexpected option
|
// ignore unexpected option
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return res
|
return res, nil
|
||||||
}
|
|
||||||
|
|
||||||
// uniqueKey computes the redis key used for the given task.
|
|
||||||
// It returns an empty string if ttl is zero.
|
|
||||||
func uniqueKey(t *Task, ttl time.Duration, qname string) string {
|
|
||||||
if ttl == 0 {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("%s:%s:%s", t.Type, serializePayload(t.Payload.data), qname)
|
|
||||||
}
|
|
||||||
|
|
||||||
func serializePayload(payload map[string]interface{}) string {
|
|
||||||
if payload == nil {
|
|
||||||
return "nil"
|
|
||||||
}
|
|
||||||
type entry struct {
|
|
||||||
k string
|
|
||||||
v interface{}
|
|
||||||
}
|
|
||||||
var es []entry
|
|
||||||
for k, v := range payload {
|
|
||||||
es = append(es, entry{k, v})
|
|
||||||
}
|
|
||||||
// sort entries by key
|
|
||||||
sort.Slice(es, func(i, j int) bool { return es[i].k < es[j].k })
|
|
||||||
var b strings.Builder
|
|
||||||
for _, e := range es {
|
|
||||||
if b.Len() > 0 {
|
|
||||||
b.WriteString(",")
|
|
||||||
}
|
|
||||||
b.WriteString(fmt.Sprintf("%s=%v", e.k, e.v))
|
|
||||||
}
|
|
||||||
return b.String()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
// Default max retry count used if nothing is specified.
|
// Default max retry count used if nothing is specified.
|
||||||
const defaultMaxRetry = 25
|
defaultMaxRetry = 25
|
||||||
|
|
||||||
|
// Default timeout used if both timeout and deadline are not specified.
|
||||||
|
defaultTimeout = 30 * time.Minute
|
||||||
|
)
|
||||||
|
|
||||||
|
// Value zero indicates no timeout and no deadline.
|
||||||
|
var (
|
||||||
|
noTimeout time.Duration = 0
|
||||||
|
noDeadline time.Time = time.Unix(0, 0)
|
||||||
|
)
|
||||||
|
|
||||||
// SetDefaultOptions sets options to be used for a given task type.
|
// SetDefaultOptions sets options to be used for a given task type.
|
||||||
// The argument opts specifies the behavior of task processing.
|
// The argument opts specifies the behavior of task processing.
|
||||||
@@ -179,68 +252,76 @@ func (c *Client) SetDefaultOptions(taskType string, opts ...Option) {
|
|||||||
c.opts[taskType] = opts
|
c.opts[taskType] = opts
|
||||||
}
|
}
|
||||||
|
|
||||||
// EnqueueAt schedules task to be enqueued at the specified time.
|
// Close closes the connection with redis.
|
||||||
//
|
|
||||||
// EnqueueAt returns nil if the task is scheduled successfully, otherwise returns a non-nil error.
|
|
||||||
//
|
|
||||||
// The argument opts specifies the behavior of task processing.
|
|
||||||
// If there are conflicting Option values the last one overrides others.
|
|
||||||
func (c *Client) EnqueueAt(t time.Time, task *Task, opts ...Option) error {
|
|
||||||
return c.enqueueAt(t, task, opts...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Enqueue enqueues task to be processed immediately.
|
|
||||||
//
|
|
||||||
// Enqueue returns nil if the task is enqueued successfully, otherwise returns a non-nil error.
|
|
||||||
//
|
|
||||||
// The argument opts specifies the behavior of task processing.
|
|
||||||
// If there are conflicting Option values the last one overrides others.
|
|
||||||
func (c *Client) Enqueue(task *Task, opts ...Option) error {
|
|
||||||
return c.enqueueAt(time.Now(), task, opts...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// EnqueueIn schedules task to be enqueued after the specified delay.
|
|
||||||
//
|
|
||||||
// EnqueueIn returns nil if the task is scheduled successfully, otherwise returns a non-nil error.
|
|
||||||
//
|
|
||||||
// The argument opts specifies the behavior of task processing.
|
|
||||||
// If there are conflicting Option values the last one overrides others.
|
|
||||||
func (c *Client) EnqueueIn(d time.Duration, task *Task, opts ...Option) error {
|
|
||||||
return c.enqueueAt(time.Now().Add(d), task, opts...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close closes the connection with redis server.
|
|
||||||
func (c *Client) Close() error {
|
func (c *Client) Close() error {
|
||||||
return c.rdb.Close()
|
return c.rdb.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Client) enqueueAt(t time.Time, task *Task, opts ...Option) error {
|
// Enqueue enqueues the given task to be processed asynchronously.
|
||||||
|
//
|
||||||
|
// Enqueue returns TaskInfo and nil error if the task is enqueued successfully, otherwise returns a non-nil error.
|
||||||
|
//
|
||||||
|
// The argument opts specifies the behavior of task processing.
|
||||||
|
// If there are conflicting Option values the last one overrides others.
|
||||||
|
// By deafult, max retry is set to 25 and timeout is set to 30 minutes.
|
||||||
|
//
|
||||||
|
// If no ProcessAt or ProcessIn options are provided, the task will be pending immediately.
|
||||||
|
func (c *Client) Enqueue(task *Task, opts ...Option) (*TaskInfo, error) {
|
||||||
|
if strings.TrimSpace(task.Type()) == "" {
|
||||||
|
return nil, fmt.Errorf("task typename cannot be empty")
|
||||||
|
}
|
||||||
c.mu.Lock()
|
c.mu.Lock()
|
||||||
defer c.mu.Unlock()
|
if defaults, ok := c.opts[task.Type()]; ok {
|
||||||
if defaults, ok := c.opts[task.Type]; ok {
|
|
||||||
opts = append(defaults, opts...)
|
opts = append(defaults, opts...)
|
||||||
}
|
}
|
||||||
opt := composeOptions(opts...)
|
c.mu.Unlock()
|
||||||
|
opt, err := composeOptions(opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
deadline := noDeadline
|
||||||
|
if !opt.deadline.IsZero() {
|
||||||
|
deadline = opt.deadline
|
||||||
|
}
|
||||||
|
timeout := noTimeout
|
||||||
|
if opt.timeout != 0 {
|
||||||
|
timeout = opt.timeout
|
||||||
|
}
|
||||||
|
if deadline.Equal(noDeadline) && timeout == noTimeout {
|
||||||
|
// If neither deadline nor timeout are set, use default timeout.
|
||||||
|
timeout = defaultTimeout
|
||||||
|
}
|
||||||
|
var uniqueKey string
|
||||||
|
if opt.uniqueTTL > 0 {
|
||||||
|
uniqueKey = base.UniqueKey(opt.queue, task.Type(), task.Payload())
|
||||||
|
}
|
||||||
msg := &base.TaskMessage{
|
msg := &base.TaskMessage{
|
||||||
ID: xid.New(),
|
ID: uuid.New(),
|
||||||
Type: task.Type,
|
Type: task.Type(),
|
||||||
Payload: task.Payload.data,
|
Payload: task.Payload(),
|
||||||
Queue: opt.queue,
|
Queue: opt.queue,
|
||||||
Retry: opt.retry,
|
Retry: opt.retry,
|
||||||
Timeout: opt.timeout.String(),
|
Deadline: deadline.Unix(),
|
||||||
Deadline: opt.deadline.Format(time.RFC3339),
|
Timeout: int64(timeout.Seconds()),
|
||||||
UniqueKey: uniqueKey(task, opt.uniqueTTL, opt.queue),
|
UniqueKey: uniqueKey,
|
||||||
}
|
}
|
||||||
var err error
|
now := time.Now()
|
||||||
if time.Now().After(t) {
|
var state base.TaskState
|
||||||
|
if opt.processAt.Before(now) || opt.processAt.Equal(now) {
|
||||||
|
opt.processAt = now
|
||||||
err = c.enqueue(msg, opt.uniqueTTL)
|
err = c.enqueue(msg, opt.uniqueTTL)
|
||||||
|
state = base.TaskStatePending
|
||||||
} else {
|
} else {
|
||||||
err = c.schedule(msg, t, opt.uniqueTTL)
|
err = c.schedule(msg, opt.processAt, opt.uniqueTTL)
|
||||||
|
state = base.TaskStateScheduled
|
||||||
}
|
}
|
||||||
if err == rdb.ErrDuplicateTask {
|
switch {
|
||||||
return fmt.Errorf("%w", ErrDuplicateTask)
|
case errors.Is(err, errors.ErrDuplicateTask):
|
||||||
|
return nil, fmt.Errorf("%w", ErrDuplicateTask)
|
||||||
|
case err != nil:
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
return err
|
return newTaskInfo(msg, state, opt.processAt), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Client) enqueue(msg *base.TaskMessage, uniqueTTL time.Duration) error {
|
func (c *Client) enqueue(msg *base.TaskMessage, uniqueTTL time.Duration) error {
|
||||||
|
|||||||
681
client_test.go
681
client_test.go
File diff suppressed because it is too large
Load Diff
30
context.go
30
context.go
@@ -16,6 +16,7 @@ type taskMetadata struct {
|
|||||||
id string
|
id string
|
||||||
maxRetry int
|
maxRetry int
|
||||||
retryCount int
|
retryCount int
|
||||||
|
qname string
|
||||||
}
|
}
|
||||||
|
|
||||||
// ctxKey type is unexported to prevent collisions with context keys defined in
|
// ctxKey type is unexported to prevent collisions with context keys defined in
|
||||||
@@ -27,25 +28,15 @@ type ctxKey int
|
|||||||
const metadataCtxKey ctxKey = 0
|
const metadataCtxKey ctxKey = 0
|
||||||
|
|
||||||
// createContext returns a context and cancel function for a given task message.
|
// createContext returns a context and cancel function for a given task message.
|
||||||
func createContext(msg *base.TaskMessage) (ctx context.Context, cancel context.CancelFunc) {
|
func createContext(msg *base.TaskMessage, deadline time.Time) (context.Context, context.CancelFunc) {
|
||||||
metadata := taskMetadata{
|
metadata := taskMetadata{
|
||||||
id: msg.ID.String(),
|
id: msg.ID.String(),
|
||||||
maxRetry: msg.Retry,
|
maxRetry: msg.Retry,
|
||||||
retryCount: msg.Retried,
|
retryCount: msg.Retried,
|
||||||
|
qname: msg.Queue,
|
||||||
}
|
}
|
||||||
ctx = context.WithValue(context.Background(), metadataCtxKey, metadata)
|
ctx := context.WithValue(context.Background(), metadataCtxKey, metadata)
|
||||||
timeout, err := time.ParseDuration(msg.Timeout)
|
return context.WithDeadline(ctx, deadline)
|
||||||
if err == nil && timeout != 0 {
|
|
||||||
ctx, cancel = context.WithTimeout(ctx, timeout)
|
|
||||||
}
|
|
||||||
deadline, err := time.Parse(time.RFC3339, msg.Deadline)
|
|
||||||
if err == nil && !deadline.IsZero() {
|
|
||||||
ctx, cancel = context.WithDeadline(ctx, deadline)
|
|
||||||
}
|
|
||||||
if cancel == nil {
|
|
||||||
ctx, cancel = context.WithCancel(ctx)
|
|
||||||
}
|
|
||||||
return ctx, cancel
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetTaskID extracts a task ID from a context, if any.
|
// GetTaskID extracts a task ID from a context, if any.
|
||||||
@@ -83,3 +74,14 @@ func GetMaxRetry(ctx context.Context) (n int, ok bool) {
|
|||||||
}
|
}
|
||||||
return metadata.maxRetry, true
|
return metadata.maxRetry, true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetQueueName extracts queue name from a context, if any.
|
||||||
|
//
|
||||||
|
// Return value qname indicates which queue the task was pulled from.
|
||||||
|
func GetQueueName(ctx context.Context) (qname string, ok bool) {
|
||||||
|
metadata, ok := ctx.Value(metadataCtxKey).(taskMetadata)
|
||||||
|
if !ok {
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
return metadata.qname, true
|
||||||
|
}
|
||||||
|
|||||||
@@ -10,51 +10,38 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/google/go-cmp/cmp"
|
"github.com/google/go-cmp/cmp"
|
||||||
"github.com/google/go-cmp/cmp/cmpopts"
|
"github.com/google/uuid"
|
||||||
"github.com/hibiken/asynq/internal/base"
|
"github.com/hibiken/asynq/internal/base"
|
||||||
"github.com/rs/xid"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestCreateContextWithTimeRestrictions(t *testing.T) {
|
|
||||||
var (
|
|
||||||
noTimeout = time.Duration(0)
|
|
||||||
noDeadline = time.Time{}
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func TestCreateContextWithFutureDeadline(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
desc string
|
|
||||||
timeout time.Duration
|
|
||||||
deadline time.Time
|
deadline time.Time
|
||||||
wantDeadline time.Time
|
|
||||||
}{
|
}{
|
||||||
{"only with timeout", 10 * time.Second, noDeadline, time.Now().Add(10 * time.Second)},
|
{time.Now().Add(time.Hour)},
|
||||||
{"only with deadline", noTimeout, time.Now().Add(time.Hour), time.Now().Add(time.Hour)},
|
|
||||||
{"with timeout and deadline (timeout < deadline)", 10 * time.Second, time.Now().Add(time.Hour), time.Now().Add(10 * time.Second)},
|
|
||||||
{"with timeout and deadline (timeout > deadline)", 10 * time.Minute, time.Now().Add(30 * time.Second), time.Now().Add(30 * time.Second)},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
msg := &base.TaskMessage{
|
msg := &base.TaskMessage{
|
||||||
Type: "something",
|
Type: "something",
|
||||||
ID: xid.New(),
|
ID: uuid.New(),
|
||||||
Timeout: tc.timeout.String(),
|
Payload: nil,
|
||||||
Deadline: tc.deadline.Format(time.RFC3339),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx, cancel := createContext(msg)
|
ctx, cancel := createContext(msg, tc.deadline)
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case x := <-ctx.Done():
|
case x := <-ctx.Done():
|
||||||
t.Errorf("%s: <-ctx.Done() == %v, want nothing (it should block)", tc.desc, x)
|
t.Errorf("<-ctx.Done() == %v, want nothing (it should block)", x)
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
|
|
||||||
got, ok := ctx.Deadline()
|
got, ok := ctx.Deadline()
|
||||||
if !ok {
|
if !ok {
|
||||||
t.Errorf("%s: ctx.Deadline() returned false, want deadline to be set", tc.desc)
|
t.Errorf("ctx.Deadline() returned false, want deadline to be set")
|
||||||
}
|
}
|
||||||
if !cmp.Equal(tc.wantDeadline, got, cmpopts.EquateApproxTime(time.Second)) {
|
if !cmp.Equal(tc.deadline, got) {
|
||||||
t.Errorf("%s: ctx.Deadline() returned %v, want %v", tc.desc, got, tc.wantDeadline)
|
t.Errorf("ctx.Deadline() returned %v, want %v", got, tc.deadline)
|
||||||
}
|
}
|
||||||
|
|
||||||
cancel()
|
cancel()
|
||||||
@@ -67,33 +54,36 @@ func TestCreateContextWithTimeRestrictions(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCreateContextWithoutTimeRestrictions(t *testing.T) {
|
func TestCreateContextWithPastDeadline(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
deadline time.Time
|
||||||
|
}{
|
||||||
|
{time.Now().Add(-2 * time.Hour)},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
msg := &base.TaskMessage{
|
msg := &base.TaskMessage{
|
||||||
Type: "something",
|
Type: "something",
|
||||||
ID: xid.New(),
|
ID: uuid.New(),
|
||||||
Timeout: time.Duration(0).String(), // zero value to indicate no timeout
|
Payload: nil,
|
||||||
Deadline: time.Time{}.Format(time.RFC3339), // zero value to indicate no deadline
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx, cancel := createContext(msg)
|
ctx, cancel := createContext(msg, tc.deadline)
|
||||||
|
defer cancel()
|
||||||
select {
|
|
||||||
case x := <-ctx.Done():
|
|
||||||
t.Errorf("<-ctx.Done() == %v, want nothing (it should block)", x)
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
|
|
||||||
_, ok := ctx.Deadline()
|
|
||||||
if ok {
|
|
||||||
t.Error("ctx.Deadline() returned true, want deadline to not be set")
|
|
||||||
}
|
|
||||||
|
|
||||||
cancel()
|
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
default:
|
default:
|
||||||
t.Error("ctx.Done() blocked, want it to be non-blocking")
|
t.Errorf("ctx.Done() blocked, want it to be non-blocking")
|
||||||
|
}
|
||||||
|
|
||||||
|
got, ok := ctx.Deadline()
|
||||||
|
if !ok {
|
||||||
|
t.Errorf("ctx.Deadline() returned false, want deadline to be set")
|
||||||
|
}
|
||||||
|
if !cmp.Equal(tc.deadline, got) {
|
||||||
|
t.Errorf("ctx.Deadline() returned %v, want %v", got, tc.deadline)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -102,12 +92,14 @@ func TestGetTaskMetadataFromContext(t *testing.T) {
|
|||||||
desc string
|
desc string
|
||||||
msg *base.TaskMessage
|
msg *base.TaskMessage
|
||||||
}{
|
}{
|
||||||
{"with zero retried message", &base.TaskMessage{Type: "something", ID: xid.New(), Retry: 25, Retried: 0}},
|
{"with zero retried message", &base.TaskMessage{Type: "something", ID: uuid.New(), Retry: 25, Retried: 0, Timeout: 1800, Queue: "default"}},
|
||||||
{"with non-zero retried message", &base.TaskMessage{Type: "something", ID: xid.New(), Retry: 10, Retried: 5}},
|
{"with non-zero retried message", &base.TaskMessage{Type: "something", ID: uuid.New(), Retry: 10, Retried: 5, Timeout: 1800, Queue: "default"}},
|
||||||
|
{"with custom queue name", &base.TaskMessage{Type: "something", ID: uuid.New(), Retry: 25, Retried: 0, Timeout: 1800, Queue: "custom"}},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
ctx, _ := createContext(tc.msg)
|
ctx, cancel := createContext(tc.msg, time.Now().Add(30*time.Minute))
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
id, ok := GetTaskID(ctx)
|
id, ok := GetTaskID(ctx)
|
||||||
if !ok {
|
if !ok {
|
||||||
@@ -132,6 +124,14 @@ func TestGetTaskMetadataFromContext(t *testing.T) {
|
|||||||
if ok && maxRetry != tc.msg.Retry {
|
if ok && maxRetry != tc.msg.Retry {
|
||||||
t.Errorf("%s: GetMaxRetry(ctx) returned n == %d want %d", tc.desc, maxRetry, tc.msg.Retry)
|
t.Errorf("%s: GetMaxRetry(ctx) returned n == %d want %d", tc.desc, maxRetry, tc.msg.Retry)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
qname, ok := GetQueueName(ctx)
|
||||||
|
if !ok {
|
||||||
|
t.Errorf("%s: GetQueueName(ctx) returned ok == false", tc.desc)
|
||||||
|
}
|
||||||
|
if ok && qname != tc.msg.Queue {
|
||||||
|
t.Errorf("%s: GetQueueName(ctx) returned qname == %q, want %q", tc.desc, qname, tc.msg.Queue)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -153,5 +153,8 @@ func TestGetTaskMetadataFromContextError(t *testing.T) {
|
|||||||
if _, ok := GetMaxRetry(tc.ctx); ok {
|
if _, ok := GetMaxRetry(tc.ctx); ok {
|
||||||
t.Errorf("%s: GetMaxRetry(ctx) returned ok == true", tc.desc)
|
t.Errorf("%s: GetMaxRetry(ctx) returned ok == true", tc.desc)
|
||||||
}
|
}
|
||||||
|
if _, ok := GetQueueName(tc.ctx); ok {
|
||||||
|
t.Errorf("%s: GetQueueName(ctx) returned ok == true", tc.desc)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
49
doc.go
49
doc.go
@@ -3,40 +3,46 @@
|
|||||||
// that can be found in the LICENSE file.
|
// that can be found in the LICENSE file.
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Package asynq provides a framework for asynchronous task processing.
|
Package asynq provides a framework for Redis based distrubted task queue.
|
||||||
|
|
||||||
Asynq uses Redis as a message broker. To connect to redis server,
|
Asynq uses Redis as a message broker. To connect to redis,
|
||||||
specify the options using one of RedisConnOpt types.
|
specify the connection using one of RedisConnOpt types.
|
||||||
|
|
||||||
redis = &asynq.RedisClientOpt{
|
redisConnOpt = asynq.RedisClientOpt{
|
||||||
Addr: "127.0.0.1:6379",
|
Addr: "127.0.0.1:6379",
|
||||||
Password: "xxxxx",
|
Password: "xxxxx",
|
||||||
DB: 3,
|
DB: 2,
|
||||||
}
|
}
|
||||||
|
|
||||||
The Client is used to enqueue a task to be processed at the specified time.
|
The Client is used to enqueue a task.
|
||||||
|
|
||||||
Task is created with two parameters: its type and payload.
|
|
||||||
|
|
||||||
client := asynq.NewClient(redis)
|
client := asynq.NewClient(redisConnOpt)
|
||||||
|
|
||||||
t := asynq.NewTask(
|
// Task is created with two parameters: its type and payload.
|
||||||
"send_email",
|
// Payload data is simply an array of bytes. It can be encoded in JSON, Protocol Buffer, Gob, etc.
|
||||||
map[string]interface{}{"user_id": 42})
|
b, err := json.Marshal(ExamplePayload{UserID: 42})
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
task := asynq.NewTask("example", b)
|
||||||
|
|
||||||
// Enqueue the task to be processed immediately.
|
// Enqueue the task to be processed immediately.
|
||||||
err := client.Enqueue(t)
|
info, err := client.Enqueue(task)
|
||||||
|
|
||||||
// Schedule the task to be processed after one minute.
|
// Schedule the task to be processed after one minute.
|
||||||
err = client.EnqueueIn(time.Minute, t)
|
info, err = client.Enqueue(t, asynq.ProcessIn(1*time.Minute))
|
||||||
|
|
||||||
The Server is used to run the background task processing with a given
|
The Server is used to run the task processing workers with a given
|
||||||
handler.
|
handler.
|
||||||
srv := asynq.NewServer(redis, asynq.Config{
|
srv := asynq.NewServer(redisConnOpt, asynq.Config{
|
||||||
Concurrency: 10,
|
Concurrency: 10,
|
||||||
})
|
})
|
||||||
|
|
||||||
srv.Run(handler)
|
if err := srv.Run(handler); err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
Handler is an interface type with a method which
|
Handler is an interface type with a method which
|
||||||
takes a task and returns an error. Handler should return nil if
|
takes a task and returns an error. Handler should return nil if
|
||||||
@@ -50,10 +56,13 @@ Example of a type that implements the Handler interface.
|
|||||||
|
|
||||||
func (h *TaskHandler) ProcessTask(ctx context.Context, task *asynq.Task) error {
|
func (h *TaskHandler) ProcessTask(ctx context.Context, task *asynq.Task) error {
|
||||||
switch task.Type {
|
switch task.Type {
|
||||||
case "send_email":
|
case "example":
|
||||||
id, err := task.Payload.GetInt("user_id")
|
var data ExamplePayload
|
||||||
// send email
|
if err := json.Unmarshal(task.Payload(), &data); err != nil {
|
||||||
//...
|
return err
|
||||||
|
}
|
||||||
|
// perform task with the data
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("unexpected task type %q", task.Type)
|
return fmt.Errorf("unexpected task type %q", task.Type)
|
||||||
}
|
}
|
||||||
|
|||||||
BIN
docs/assets/asynqmon-queues-view.png
Normal file
BIN
docs/assets/asynqmon-queues-view.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 279 KiB |
BIN
docs/assets/asynqmon-task-view.png
Normal file
BIN
docs/assets/asynqmon-task-view.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 347 KiB |
BIN
docs/assets/cluster.png
Normal file
BIN
docs/assets/cluster.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 60 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 983 KiB After Width: | Height: | Size: 329 KiB |
@@ -9,6 +9,7 @@ import (
|
|||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/hibiken/asynq"
|
"github.com/hibiken/asynq"
|
||||||
"golang.org/x/sys/unix"
|
"golang.org/x/sys/unix"
|
||||||
@@ -29,7 +30,7 @@ func ExampleServer_Run() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func ExampleServer_Stop() {
|
func ExampleServer_Shutdown() {
|
||||||
srv := asynq.NewServer(
|
srv := asynq.NewServer(
|
||||||
asynq.RedisClientOpt{Addr: ":6379"},
|
asynq.RedisClientOpt{Addr: ":6379"},
|
||||||
asynq.Config{Concurrency: 20},
|
asynq.Config{Concurrency: 20},
|
||||||
@@ -46,10 +47,10 @@ func ExampleServer_Stop() {
|
|||||||
signal.Notify(sigs, unix.SIGTERM, unix.SIGINT)
|
signal.Notify(sigs, unix.SIGTERM, unix.SIGINT)
|
||||||
<-sigs // wait for termination signal
|
<-sigs // wait for termination signal
|
||||||
|
|
||||||
srv.Stop()
|
srv.Shutdown()
|
||||||
}
|
}
|
||||||
|
|
||||||
func ExampleServer_Quiet() {
|
func ExampleServer_Stop() {
|
||||||
srv := asynq.NewServer(
|
srv := asynq.NewServer(
|
||||||
asynq.RedisClientOpt{Addr: ":6379"},
|
asynq.RedisClientOpt{Addr: ":6379"},
|
||||||
asynq.Config{Concurrency: 20},
|
asynq.Config{Concurrency: 20},
|
||||||
@@ -69,13 +70,32 @@ func ExampleServer_Quiet() {
|
|||||||
for {
|
for {
|
||||||
s := <-sigs
|
s := <-sigs
|
||||||
if s == unix.SIGTSTP {
|
if s == unix.SIGTSTP {
|
||||||
srv.Quiet() // stop processing new tasks
|
srv.Stop() // stop processing new tasks
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
break
|
break // received SIGTERM or SIGINT signal
|
||||||
}
|
}
|
||||||
|
|
||||||
srv.Stop()
|
srv.Shutdown()
|
||||||
|
}
|
||||||
|
|
||||||
|
func ExampleScheduler() {
|
||||||
|
scheduler := asynq.NewScheduler(
|
||||||
|
asynq.RedisClientOpt{Addr: ":6379"},
|
||||||
|
&asynq.SchedulerOpts{Location: time.Local},
|
||||||
|
)
|
||||||
|
|
||||||
|
if _, err := scheduler.Register("* * * * *", asynq.NewTask("task1", nil)); err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
if _, err := scheduler.Register("@every 30s", asynq.NewTask("task2", nil)); err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run blocks and waits for os signal to terminate the program.
|
||||||
|
if err := scheduler.Run(); err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func ExampleParseRedisURI() {
|
func ExampleParseRedisURI() {
|
||||||
|
|||||||
75
forwarder.go
Normal file
75
forwarder.go
Normal file
@@ -0,0 +1,75 @@
|
|||||||
|
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license
|
||||||
|
// that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package asynq
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/hibiken/asynq/internal/base"
|
||||||
|
"github.com/hibiken/asynq/internal/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A forwarder is responsible for moving scheduled and retry tasks to pending state
|
||||||
|
// so that the tasks get processed by the workers.
|
||||||
|
type forwarder struct {
|
||||||
|
logger *log.Logger
|
||||||
|
broker base.Broker
|
||||||
|
|
||||||
|
// channel to communicate back to the long running "forwarder" goroutine.
|
||||||
|
done chan struct{}
|
||||||
|
|
||||||
|
// list of queue names to check and enqueue.
|
||||||
|
queues []string
|
||||||
|
|
||||||
|
// poll interval on average
|
||||||
|
avgInterval time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
type forwarderParams struct {
|
||||||
|
logger *log.Logger
|
||||||
|
broker base.Broker
|
||||||
|
queues []string
|
||||||
|
interval time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
func newForwarder(params forwarderParams) *forwarder {
|
||||||
|
return &forwarder{
|
||||||
|
logger: params.logger,
|
||||||
|
broker: params.broker,
|
||||||
|
done: make(chan struct{}),
|
||||||
|
queues: params.queues,
|
||||||
|
avgInterval: params.interval,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *forwarder) shutdown() {
|
||||||
|
f.logger.Debug("Forwarder shutting down...")
|
||||||
|
// Signal the forwarder goroutine to stop polling.
|
||||||
|
f.done <- struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// start starts the "forwarder" goroutine.
|
||||||
|
func (f *forwarder) start(wg *sync.WaitGroup) {
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-f.done:
|
||||||
|
f.logger.Debug("Forwarder done")
|
||||||
|
return
|
||||||
|
case <-time.After(f.avgInterval):
|
||||||
|
f.exec()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *forwarder) exec() {
|
||||||
|
if err := f.broker.ForwardIfReady(f.queues...); err != nil {
|
||||||
|
f.logger.Errorf("Could not enqueue scheduled tasks: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
137
forwarder_test.go
Normal file
137
forwarder_test.go
Normal file
@@ -0,0 +1,137 @@
|
|||||||
|
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license
|
||||||
|
// that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package asynq
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/google/go-cmp/cmp"
|
||||||
|
h "github.com/hibiken/asynq/internal/asynqtest"
|
||||||
|
"github.com/hibiken/asynq/internal/base"
|
||||||
|
"github.com/hibiken/asynq/internal/rdb"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestForwarder(t *testing.T) {
|
||||||
|
r := setup(t)
|
||||||
|
defer r.Close()
|
||||||
|
rdbClient := rdb.NewRDB(r)
|
||||||
|
const pollInterval = time.Second
|
||||||
|
s := newForwarder(forwarderParams{
|
||||||
|
logger: testLogger,
|
||||||
|
broker: rdbClient,
|
||||||
|
queues: []string{"default", "critical"},
|
||||||
|
interval: pollInterval,
|
||||||
|
})
|
||||||
|
t1 := h.NewTaskMessageWithQueue("gen_thumbnail", nil, "default")
|
||||||
|
t2 := h.NewTaskMessageWithQueue("send_email", nil, "critical")
|
||||||
|
t3 := h.NewTaskMessageWithQueue("reindex", nil, "default")
|
||||||
|
t4 := h.NewTaskMessageWithQueue("sync", nil, "critical")
|
||||||
|
now := time.Now()
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
initScheduled map[string][]base.Z // scheduled queue initial state
|
||||||
|
initRetry map[string][]base.Z // retry queue initial state
|
||||||
|
initPending map[string][]*base.TaskMessage // default queue initial state
|
||||||
|
wait time.Duration // wait duration before checking for final state
|
||||||
|
wantScheduled map[string][]*base.TaskMessage // schedule queue final state
|
||||||
|
wantRetry map[string][]*base.TaskMessage // retry queue final state
|
||||||
|
wantPending map[string][]*base.TaskMessage // default queue final state
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
initScheduled: map[string][]base.Z{
|
||||||
|
"default": {{Message: t1, Score: now.Add(time.Hour).Unix()}},
|
||||||
|
"critical": {{Message: t2, Score: now.Add(-2 * time.Second).Unix()}},
|
||||||
|
},
|
||||||
|
initRetry: map[string][]base.Z{
|
||||||
|
"default": {{Message: t3, Score: time.Now().Add(-500 * time.Millisecond).Unix()}},
|
||||||
|
"critical": {},
|
||||||
|
},
|
||||||
|
initPending: map[string][]*base.TaskMessage{
|
||||||
|
"default": {},
|
||||||
|
"critical": {t4},
|
||||||
|
},
|
||||||
|
wait: pollInterval * 2,
|
||||||
|
wantScheduled: map[string][]*base.TaskMessage{
|
||||||
|
"default": {t1},
|
||||||
|
"critical": {},
|
||||||
|
},
|
||||||
|
wantRetry: map[string][]*base.TaskMessage{
|
||||||
|
"default": {},
|
||||||
|
"critical": {},
|
||||||
|
},
|
||||||
|
wantPending: map[string][]*base.TaskMessage{
|
||||||
|
"default": {t3},
|
||||||
|
"critical": {t2, t4},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
initScheduled: map[string][]base.Z{
|
||||||
|
"default": {
|
||||||
|
{Message: t1, Score: now.Unix()},
|
||||||
|
{Message: t3, Score: now.Add(-500 * time.Millisecond).Unix()},
|
||||||
|
},
|
||||||
|
"critical": {
|
||||||
|
{Message: t2, Score: now.Add(-2 * time.Second).Unix()},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
initRetry: map[string][]base.Z{
|
||||||
|
"default": {},
|
||||||
|
"critical": {},
|
||||||
|
},
|
||||||
|
initPending: map[string][]*base.TaskMessage{
|
||||||
|
"default": {},
|
||||||
|
"critical": {t4},
|
||||||
|
},
|
||||||
|
wait: pollInterval * 2,
|
||||||
|
wantScheduled: map[string][]*base.TaskMessage{
|
||||||
|
"default": {},
|
||||||
|
"critical": {},
|
||||||
|
},
|
||||||
|
wantRetry: map[string][]*base.TaskMessage{
|
||||||
|
"default": {},
|
||||||
|
"critical": {},
|
||||||
|
},
|
||||||
|
wantPending: map[string][]*base.TaskMessage{
|
||||||
|
"default": {t1, t3},
|
||||||
|
"critical": {t2, t4},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
h.FlushDB(t, r) // clean up db before each test case.
|
||||||
|
h.SeedAllScheduledQueues(t, r, tc.initScheduled) // initialize scheduled queue
|
||||||
|
h.SeedAllRetryQueues(t, r, tc.initRetry) // initialize retry queue
|
||||||
|
h.SeedAllPendingQueues(t, r, tc.initPending) // initialize default queue
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
s.start(&wg)
|
||||||
|
time.Sleep(tc.wait)
|
||||||
|
s.shutdown()
|
||||||
|
|
||||||
|
for qname, want := range tc.wantScheduled {
|
||||||
|
gotScheduled := h.GetScheduledMessages(t, r, qname)
|
||||||
|
if diff := cmp.Diff(want, gotScheduled, h.SortMsgOpt); diff != "" {
|
||||||
|
t.Errorf("mismatch found in %q after running forwarder: (-want, +got)\n%s", base.ScheduledKey(qname), diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for qname, want := range tc.wantRetry {
|
||||||
|
gotRetry := h.GetRetryMessages(t, r, qname)
|
||||||
|
if diff := cmp.Diff(want, gotRetry, h.SortMsgOpt); diff != "" {
|
||||||
|
t.Errorf("mismatch found in %q after running forwarder: (-want, +got)\n%s", base.RetryKey(qname), diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for qname, want := range tc.wantPending {
|
||||||
|
gotPending := h.GetPendingMessages(t, r, qname)
|
||||||
|
if diff := cmp.Diff(want, gotPending, h.SortMsgOpt); diff != "" {
|
||||||
|
t.Errorf("mismatch found in %q after running forwarder: (-want, +got)\n%s", base.PendingKey(qname), diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
9
go.mod
9
go.mod
@@ -3,12 +3,15 @@ module github.com/hibiken/asynq
|
|||||||
go 1.13
|
go 1.13
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/go-redis/redis/v7 v7.2.0
|
github.com/go-redis/redis/v7 v7.4.0
|
||||||
github.com/google/go-cmp v0.4.0
|
github.com/golang/protobuf v1.4.1
|
||||||
github.com/rs/xid v1.2.1
|
github.com/google/go-cmp v0.5.0
|
||||||
|
github.com/google/uuid v1.2.0
|
||||||
|
github.com/robfig/cron/v3 v3.0.1
|
||||||
github.com/spf13/cast v1.3.1
|
github.com/spf13/cast v1.3.1
|
||||||
go.uber.org/goleak v0.10.0
|
go.uber.org/goleak v0.10.0
|
||||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e
|
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e
|
||||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4
|
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4
|
||||||
|
google.golang.org/protobuf v1.25.0
|
||||||
gopkg.in/yaml.v2 v2.2.7 // indirect
|
gopkg.in/yaml.v2 v2.2.7 // indirect
|
||||||
)
|
)
|
||||||
|
|||||||
84
go.sum
84
go.sum
@@ -1,17 +1,40 @@
|
|||||||
|
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||||
|
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||||
|
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||||
|
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||||
|
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||||
github.com/go-redis/redis/v7 v7.0.0-beta.4 h1:p6z7Pde69EGRWvlC++y8aFcaWegyrKHzOBGo0zUACTQ=
|
|
||||||
github.com/go-redis/redis/v7 v7.0.0-beta.4/go.mod h1:xhhSbUMTsleRPur+Vgx9sUHtyN33bdjxY+9/0n9Ig8s=
|
|
||||||
github.com/go-redis/redis/v7 v7.2.0 h1:CrCexy/jYWZjW0AyVoHlcJUeZN19VWlbepTh1Vq6dJs=
|
github.com/go-redis/redis/v7 v7.2.0 h1:CrCexy/jYWZjW0AyVoHlcJUeZN19VWlbepTh1Vq6dJs=
|
||||||
github.com/go-redis/redis/v7 v7.2.0/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRfnwdHj/Dcg=
|
github.com/go-redis/redis/v7 v7.2.0/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRfnwdHj/Dcg=
|
||||||
|
github.com/go-redis/redis/v7 v7.4.0 h1:7obg6wUoj05T0EpY0o8B59S9w5yeMWql7sw2kwNW1x4=
|
||||||
|
github.com/go-redis/redis/v7 v7.4.0/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRfnwdHj/Dcg=
|
||||||
|
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||||
|
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
|
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
||||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
|
||||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||||
|
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||||
|
github.com/golang/protobuf v1.4.1 h1:ZFgWrT+bLgsYPirOnRfKLYJLvssAegOj/hgyMFdJZe0=
|
||||||
|
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||||
|
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||||
|
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
|
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
|
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
|
||||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w=
|
||||||
|
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
|
||||||
|
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
|
github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs=
|
||||||
|
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
||||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||||
@@ -20,16 +43,15 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
|||||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||||
github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w=
|
github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo=
|
||||||
github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
|
||||||
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||||
github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo=
|
github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME=
|
||||||
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
|
||||||
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/rs/xid v1.2.1 h1:mhH9Nq+C1fY2l1XIpgxIiUOfNpRBYH1kKcr+qfKgjRc=
|
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
|
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
|
||||||
|
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
|
||||||
github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng=
|
github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng=
|
||||||
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||||
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
|
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
|
||||||
@@ -37,13 +59,23 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf
|
|||||||
go.uber.org/goleak v0.10.0 h1:G3eWbSNIskeRqtsN/1uI5B+eP73y3JUuBsv9AZjehb4=
|
go.uber.org/goleak v0.10.0 h1:G3eWbSNIskeRqtsN/1uI5B+eP73y3JUuBsv9AZjehb4=
|
||||||
go.uber.org/goleak v0.10.0/go.mod h1:VCZuO8V8mFPlL0F5J5GK1rtHV3DrFcQ1R8ryq7FK0aI=
|
go.uber.org/goleak v0.10.0/go.mod h1:VCZuO8V8mFPlL0F5J5GK1rtHV3DrFcQ1R8ryq7FK0aI=
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
|
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
|
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||||
|
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||||
|
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||||
|
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA=
|
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA=
|
||||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20190522155817-f3200d17e092 h1:4QSRKanuywn15aTZvI/mIDEgPQpswuFndXpOj3rKEco=
|
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
|
golang.org/x/net v0.0.0-20190923162816-aa69164e4478 h1:l5EDrHhldLYb3ZRHDUhXF7Om7MvYXnkV9/iQNo1lX6g=
|
||||||
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjutRKlBEZQ6wTn8ozI/nI=
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e h1:o3PsSEY8E4eXWkXrIP9YJALUkVZqzHJT5DOasTyn8Vs=
|
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e h1:o3PsSEY8E4eXWkXrIP9YJALUkVZqzHJT5DOasTyn8Vs=
|
||||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
@@ -57,11 +89,31 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
|||||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
|
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
|
||||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||||
|
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||||
|
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||||
|
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
|
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||||
|
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||||
|
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||||
|
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||||
|
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||||
|
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||||
|
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||||
|
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||||
|
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||||
|
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||||
|
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||||
|
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||||
|
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||||
|
google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c=
|
||||||
|
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
|
||||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
|
||||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
||||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||||
@@ -72,3 +124,5 @@ gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
|||||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo=
|
gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo=
|
||||||
gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
|
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
|
|||||||
80
healthcheck.go
Normal file
80
healthcheck.go
Normal file
@@ -0,0 +1,80 @@
|
|||||||
|
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license
|
||||||
|
// that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package asynq
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/hibiken/asynq/internal/base"
|
||||||
|
"github.com/hibiken/asynq/internal/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
// healthchecker is responsible for pinging broker periodically
|
||||||
|
// and call user provided HeathCheckFunc with the ping result.
|
||||||
|
type healthchecker struct {
|
||||||
|
logger *log.Logger
|
||||||
|
broker base.Broker
|
||||||
|
|
||||||
|
// channel to communicate back to the long running "healthchecker" goroutine.
|
||||||
|
done chan struct{}
|
||||||
|
|
||||||
|
// interval between healthchecks.
|
||||||
|
interval time.Duration
|
||||||
|
|
||||||
|
// function to call periodically.
|
||||||
|
healthcheckFunc func(error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type healthcheckerParams struct {
|
||||||
|
logger *log.Logger
|
||||||
|
broker base.Broker
|
||||||
|
interval time.Duration
|
||||||
|
healthcheckFunc func(error)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newHealthChecker(params healthcheckerParams) *healthchecker {
|
||||||
|
return &healthchecker{
|
||||||
|
logger: params.logger,
|
||||||
|
broker: params.broker,
|
||||||
|
done: make(chan struct{}),
|
||||||
|
interval: params.interval,
|
||||||
|
healthcheckFunc: params.healthcheckFunc,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hc *healthchecker) shutdown() {
|
||||||
|
if hc.healthcheckFunc == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
hc.logger.Debug("Healthchecker shutting down...")
|
||||||
|
// Signal the healthchecker goroutine to stop.
|
||||||
|
hc.done <- struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hc *healthchecker) start(wg *sync.WaitGroup) {
|
||||||
|
if hc.healthcheckFunc == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
timer := time.NewTimer(hc.interval)
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-hc.done:
|
||||||
|
hc.logger.Debug("Healthchecker done")
|
||||||
|
timer.Stop()
|
||||||
|
return
|
||||||
|
case <-timer.C:
|
||||||
|
err := hc.broker.Ping()
|
||||||
|
hc.healthcheckFunc(err)
|
||||||
|
timer.Reset(hc.interval)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
103
healthcheck_test.go
Normal file
103
healthcheck_test.go
Normal file
@@ -0,0 +1,103 @@
|
|||||||
|
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license
|
||||||
|
// that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package asynq
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/hibiken/asynq/internal/rdb"
|
||||||
|
"github.com/hibiken/asynq/internal/testbroker"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestHealthChecker(t *testing.T) {
|
||||||
|
r := setup(t)
|
||||||
|
defer r.Close()
|
||||||
|
rdbClient := rdb.NewRDB(r)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// mu guards called and e variables.
|
||||||
|
mu sync.Mutex
|
||||||
|
called int
|
||||||
|
e error
|
||||||
|
)
|
||||||
|
checkFn := func(err error) {
|
||||||
|
mu.Lock()
|
||||||
|
defer mu.Unlock()
|
||||||
|
called++
|
||||||
|
e = err
|
||||||
|
}
|
||||||
|
|
||||||
|
hc := newHealthChecker(healthcheckerParams{
|
||||||
|
logger: testLogger,
|
||||||
|
broker: rdbClient,
|
||||||
|
interval: 1 * time.Second,
|
||||||
|
healthcheckFunc: checkFn,
|
||||||
|
})
|
||||||
|
|
||||||
|
hc.start(&sync.WaitGroup{})
|
||||||
|
|
||||||
|
time.Sleep(2 * time.Second)
|
||||||
|
|
||||||
|
mu.Lock()
|
||||||
|
if called == 0 {
|
||||||
|
t.Errorf("Healthchecker did not call the provided HealthCheckFunc")
|
||||||
|
}
|
||||||
|
if e != nil {
|
||||||
|
t.Errorf("HealthCheckFunc was called with non-nil error: %v", e)
|
||||||
|
}
|
||||||
|
mu.Unlock()
|
||||||
|
|
||||||
|
hc.shutdown()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHealthCheckerWhenRedisDown(t *testing.T) {
|
||||||
|
// Make sure that healthchecker goroutine doesn't panic
|
||||||
|
// if it cannot connect to redis.
|
||||||
|
defer func() {
|
||||||
|
if r := recover(); r != nil {
|
||||||
|
t.Errorf("panic occurred: %v", r)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
r := rdb.NewRDB(setup(t))
|
||||||
|
defer r.Close()
|
||||||
|
testBroker := testbroker.NewTestBroker(r)
|
||||||
|
var (
|
||||||
|
// mu guards called and e variables.
|
||||||
|
mu sync.Mutex
|
||||||
|
called int
|
||||||
|
e error
|
||||||
|
)
|
||||||
|
checkFn := func(err error) {
|
||||||
|
mu.Lock()
|
||||||
|
defer mu.Unlock()
|
||||||
|
called++
|
||||||
|
e = err
|
||||||
|
}
|
||||||
|
|
||||||
|
hc := newHealthChecker(healthcheckerParams{
|
||||||
|
logger: testLogger,
|
||||||
|
broker: testBroker,
|
||||||
|
interval: 1 * time.Second,
|
||||||
|
healthcheckFunc: checkFn,
|
||||||
|
})
|
||||||
|
|
||||||
|
testBroker.Sleep()
|
||||||
|
hc.start(&sync.WaitGroup{})
|
||||||
|
|
||||||
|
time.Sleep(2 * time.Second)
|
||||||
|
|
||||||
|
mu.Lock()
|
||||||
|
if called == 0 {
|
||||||
|
t.Errorf("Healthchecker did not call the provided HealthCheckFunc")
|
||||||
|
}
|
||||||
|
if e == nil {
|
||||||
|
t.Errorf("HealthCheckFunc was called with nil; want non-nil error")
|
||||||
|
}
|
||||||
|
mu.Unlock()
|
||||||
|
|
||||||
|
hc.shutdown()
|
||||||
|
}
|
||||||
114
heartbeat.go
114
heartbeat.go
@@ -5,9 +5,11 @@
|
|||||||
package asynq
|
package asynq
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"os"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/google/uuid"
|
||||||
"github.com/hibiken/asynq/internal/base"
|
"github.com/hibiken/asynq/internal/base"
|
||||||
"github.com/hibiken/asynq/internal/log"
|
"github.com/hibiken/asynq/internal/log"
|
||||||
)
|
)
|
||||||
@@ -18,63 +20,151 @@ type heartbeater struct {
|
|||||||
logger *log.Logger
|
logger *log.Logger
|
||||||
broker base.Broker
|
broker base.Broker
|
||||||
|
|
||||||
ss *base.ServerState
|
|
||||||
|
|
||||||
// channel to communicate back to the long running "heartbeater" goroutine.
|
// channel to communicate back to the long running "heartbeater" goroutine.
|
||||||
done chan struct{}
|
done chan struct{}
|
||||||
|
|
||||||
// interval between heartbeats.
|
// interval between heartbeats.
|
||||||
interval time.Duration
|
interval time.Duration
|
||||||
|
|
||||||
|
// following fields are initialized at construction time and are immutable.
|
||||||
|
host string
|
||||||
|
pid int
|
||||||
|
serverID string
|
||||||
|
concurrency int
|
||||||
|
queues map[string]int
|
||||||
|
strictPriority bool
|
||||||
|
|
||||||
|
// following fields are mutable and should be accessed only by the
|
||||||
|
// heartbeater goroutine. In other words, confine these variables
|
||||||
|
// to this goroutine only.
|
||||||
|
started time.Time
|
||||||
|
workers map[string]*workerInfo
|
||||||
|
|
||||||
|
// state is shared with other goroutine but is concurrency safe.
|
||||||
|
state *base.ServerState
|
||||||
|
|
||||||
|
// channels to receive updates on active workers.
|
||||||
|
starting <-chan *workerInfo
|
||||||
|
finished <-chan *base.TaskMessage
|
||||||
}
|
}
|
||||||
|
|
||||||
type heartbeaterParams struct {
|
type heartbeaterParams struct {
|
||||||
logger *log.Logger
|
logger *log.Logger
|
||||||
broker base.Broker
|
broker base.Broker
|
||||||
serverState *base.ServerState
|
|
||||||
interval time.Duration
|
interval time.Duration
|
||||||
|
concurrency int
|
||||||
|
queues map[string]int
|
||||||
|
strictPriority bool
|
||||||
|
state *base.ServerState
|
||||||
|
starting <-chan *workerInfo
|
||||||
|
finished <-chan *base.TaskMessage
|
||||||
}
|
}
|
||||||
|
|
||||||
func newHeartbeater(params heartbeaterParams) *heartbeater {
|
func newHeartbeater(params heartbeaterParams) *heartbeater {
|
||||||
|
host, err := os.Hostname()
|
||||||
|
if err != nil {
|
||||||
|
host = "unknown-host"
|
||||||
|
}
|
||||||
|
|
||||||
return &heartbeater{
|
return &heartbeater{
|
||||||
logger: params.logger,
|
logger: params.logger,
|
||||||
broker: params.broker,
|
broker: params.broker,
|
||||||
ss: params.serverState,
|
|
||||||
done: make(chan struct{}),
|
done: make(chan struct{}),
|
||||||
interval: params.interval,
|
interval: params.interval,
|
||||||
|
|
||||||
|
host: host,
|
||||||
|
pid: os.Getpid(),
|
||||||
|
serverID: uuid.New().String(),
|
||||||
|
concurrency: params.concurrency,
|
||||||
|
queues: params.queues,
|
||||||
|
strictPriority: params.strictPriority,
|
||||||
|
|
||||||
|
state: params.state,
|
||||||
|
workers: make(map[string]*workerInfo),
|
||||||
|
starting: params.starting,
|
||||||
|
finished: params.finished,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *heartbeater) terminate() {
|
func (h *heartbeater) shutdown() {
|
||||||
h.logger.Debug("Heartbeater shutting down...")
|
h.logger.Debug("Heartbeater shutting down...")
|
||||||
// Signal the heartbeater goroutine to stop.
|
// Signal the heartbeater goroutine to stop.
|
||||||
h.done <- struct{}{}
|
h.done <- struct{}{}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// A workerInfo holds an active worker information.
|
||||||
|
type workerInfo struct {
|
||||||
|
// the task message the worker is processing.
|
||||||
|
msg *base.TaskMessage
|
||||||
|
// the time the worker has started processing the message.
|
||||||
|
started time.Time
|
||||||
|
// deadline the worker has to finish processing the task by.
|
||||||
|
deadline time.Time
|
||||||
|
}
|
||||||
|
|
||||||
func (h *heartbeater) start(wg *sync.WaitGroup) {
|
func (h *heartbeater) start(wg *sync.WaitGroup) {
|
||||||
h.ss.SetStarted(time.Now())
|
|
||||||
h.ss.SetStatus(base.StatusRunning)
|
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
|
|
||||||
|
h.started = time.Now()
|
||||||
|
|
||||||
h.beat()
|
h.beat()
|
||||||
|
|
||||||
|
timer := time.NewTimer(h.interval)
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-h.done:
|
case <-h.done:
|
||||||
h.broker.ClearServerState(h.ss)
|
h.broker.ClearServerState(h.host, h.pid, h.serverID)
|
||||||
h.logger.Debug("Heartbeater done")
|
h.logger.Debug("Heartbeater done")
|
||||||
|
timer.Stop()
|
||||||
return
|
return
|
||||||
case <-time.After(h.interval):
|
|
||||||
|
case <-timer.C:
|
||||||
h.beat()
|
h.beat()
|
||||||
|
timer.Reset(h.interval)
|
||||||
|
|
||||||
|
case w := <-h.starting:
|
||||||
|
h.workers[w.msg.ID.String()] = w
|
||||||
|
|
||||||
|
case msg := <-h.finished:
|
||||||
|
delete(h.workers, msg.ID.String())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *heartbeater) beat() {
|
func (h *heartbeater) beat() {
|
||||||
|
info := base.ServerInfo{
|
||||||
|
Host: h.host,
|
||||||
|
PID: h.pid,
|
||||||
|
ServerID: h.serverID,
|
||||||
|
Concurrency: h.concurrency,
|
||||||
|
Queues: h.queues,
|
||||||
|
StrictPriority: h.strictPriority,
|
||||||
|
Status: h.state.String(),
|
||||||
|
Started: h.started,
|
||||||
|
ActiveWorkerCount: len(h.workers),
|
||||||
|
}
|
||||||
|
|
||||||
|
var ws []*base.WorkerInfo
|
||||||
|
for id, w := range h.workers {
|
||||||
|
ws = append(ws, &base.WorkerInfo{
|
||||||
|
Host: h.host,
|
||||||
|
PID: h.pid,
|
||||||
|
ServerID: h.serverID,
|
||||||
|
ID: id,
|
||||||
|
Type: w.msg.Type,
|
||||||
|
Queue: w.msg.Queue,
|
||||||
|
Payload: w.msg.Payload,
|
||||||
|
Started: w.started,
|
||||||
|
Deadline: w.deadline,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// Note: Set TTL to be long enough so that it won't expire before we write again
|
// Note: Set TTL to be long enough so that it won't expire before we write again
|
||||||
// and short enough to expire quickly once the process is shut down or killed.
|
// and short enough to expire quickly once the process is shut down or killed.
|
||||||
err := h.broker.WriteServerState(h.ss, h.interval*2)
|
if err := h.broker.WriteServerState(&info, ws, h.interval*2); err != nil {
|
||||||
if err != nil {
|
h.logger.Errorf("could not write server state data: %v", err)
|
||||||
h.logger.Errorf("could not write heartbeat data: %v", err)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -19,6 +19,7 @@ import (
|
|||||||
|
|
||||||
func TestHeartbeater(t *testing.T) {
|
func TestHeartbeater(t *testing.T) {
|
||||||
r := setup(t)
|
r := setup(t)
|
||||||
|
defer r.Close()
|
||||||
rdbClient := rdb.NewRDB(r)
|
rdbClient := rdb.NewRDB(r)
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
@@ -28,7 +29,7 @@ func TestHeartbeater(t *testing.T) {
|
|||||||
queues map[string]int
|
queues map[string]int
|
||||||
concurrency int
|
concurrency int
|
||||||
}{
|
}{
|
||||||
{time.Second, "localhost", 45678, map[string]int{"default": 1}, 10},
|
{2 * time.Second, "localhost", 45678, map[string]int{"default": 1}, 10},
|
||||||
}
|
}
|
||||||
|
|
||||||
timeCmpOpt := cmpopts.EquateApproxTime(10 * time.Millisecond)
|
timeCmpOpt := cmpopts.EquateApproxTime(10 * time.Millisecond)
|
||||||
@@ -37,14 +38,24 @@ func TestHeartbeater(t *testing.T) {
|
|||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
h.FlushDB(t, r)
|
h.FlushDB(t, r)
|
||||||
|
|
||||||
state := base.NewServerState(tc.host, tc.pid, tc.concurrency, tc.queues, false)
|
state := base.NewServerState()
|
||||||
hb := newHeartbeater(heartbeaterParams{
|
hb := newHeartbeater(heartbeaterParams{
|
||||||
logger: testLogger,
|
logger: testLogger,
|
||||||
broker: rdbClient,
|
broker: rdbClient,
|
||||||
serverState: state,
|
|
||||||
interval: tc.interval,
|
interval: tc.interval,
|
||||||
|
concurrency: tc.concurrency,
|
||||||
|
queues: tc.queues,
|
||||||
|
strictPriority: false,
|
||||||
|
state: state,
|
||||||
|
starting: make(chan *workerInfo),
|
||||||
|
finished: make(chan *base.TaskMessage),
|
||||||
})
|
})
|
||||||
|
|
||||||
|
// Change host and pid fields for testing purpose.
|
||||||
|
hb.host = tc.host
|
||||||
|
hb.pid = tc.pid
|
||||||
|
|
||||||
|
state.Set(base.StateActive)
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
hb.start(&wg)
|
hb.start(&wg)
|
||||||
|
|
||||||
@@ -54,58 +65,58 @@ func TestHeartbeater(t *testing.T) {
|
|||||||
Queues: tc.queues,
|
Queues: tc.queues,
|
||||||
Concurrency: tc.concurrency,
|
Concurrency: tc.concurrency,
|
||||||
Started: time.Now(),
|
Started: time.Now(),
|
||||||
Status: "running",
|
Status: "active",
|
||||||
}
|
}
|
||||||
|
|
||||||
// allow for heartbeater to write to redis
|
// allow for heartbeater to write to redis
|
||||||
time.Sleep(tc.interval * 2)
|
time.Sleep(tc.interval)
|
||||||
|
|
||||||
ss, err := rdbClient.ListServers()
|
ss, err := rdbClient.ListServers()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("could not read server info from redis: %v", err)
|
t.Errorf("could not read server info from redis: %v", err)
|
||||||
hb.terminate()
|
hb.shutdown()
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(ss) != 1 {
|
if len(ss) != 1 {
|
||||||
t.Errorf("(*RDB).ListServers returned %d process info, want 1", len(ss))
|
t.Errorf("(*RDB).ListServers returned %d process info, want 1", len(ss))
|
||||||
hb.terminate()
|
hb.shutdown()
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if diff := cmp.Diff(want, ss[0], timeCmpOpt, ignoreOpt, ignoreFieldOpt); diff != "" {
|
if diff := cmp.Diff(want, ss[0], timeCmpOpt, ignoreOpt, ignoreFieldOpt); diff != "" {
|
||||||
t.Errorf("redis stored process status %+v, want %+v; (-want, +got)\n%s", ss[0], want, diff)
|
t.Errorf("redis stored process status %+v, want %+v; (-want, +got)\n%s", ss[0], want, diff)
|
||||||
hb.terminate()
|
hb.shutdown()
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// status change
|
// status change
|
||||||
state.SetStatus(base.StatusStopped)
|
state.Set(base.StateClosed)
|
||||||
|
|
||||||
// allow for heartbeater to write to redis
|
// allow for heartbeater to write to redis
|
||||||
time.Sleep(tc.interval * 2)
|
time.Sleep(tc.interval * 2)
|
||||||
|
|
||||||
want.Status = "stopped"
|
want.Status = "closed"
|
||||||
ss, err = rdbClient.ListServers()
|
ss, err = rdbClient.ListServers()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("could not read process status from redis: %v", err)
|
t.Errorf("could not read process status from redis: %v", err)
|
||||||
hb.terminate()
|
hb.shutdown()
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(ss) != 1 {
|
if len(ss) != 1 {
|
||||||
t.Errorf("(*RDB).ListProcesses returned %d process info, want 1", len(ss))
|
t.Errorf("(*RDB).ListProcesses returned %d process info, want 1", len(ss))
|
||||||
hb.terminate()
|
hb.shutdown()
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if diff := cmp.Diff(want, ss[0], timeCmpOpt, ignoreOpt, ignoreFieldOpt); diff != "" {
|
if diff := cmp.Diff(want, ss[0], timeCmpOpt, ignoreOpt, ignoreFieldOpt); diff != "" {
|
||||||
t.Errorf("redis stored process status %+v, want %+v; (-want, +got)\n%s", ss[0], want, diff)
|
t.Errorf("redis stored process status %+v, want %+v; (-want, +got)\n%s", ss[0], want, diff)
|
||||||
hb.terminate()
|
hb.shutdown()
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
hb.terminate()
|
hb.shutdown()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -118,13 +129,20 @@ func TestHeartbeaterWithRedisDown(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
r := rdb.NewRDB(setup(t))
|
r := rdb.NewRDB(setup(t))
|
||||||
|
defer r.Close()
|
||||||
testBroker := testbroker.NewTestBroker(r)
|
testBroker := testbroker.NewTestBroker(r)
|
||||||
ss := base.NewServerState("localhost", 1234, 10, map[string]int{"default": 1}, false)
|
state := base.NewServerState()
|
||||||
|
state.Set(base.StateActive)
|
||||||
hb := newHeartbeater(heartbeaterParams{
|
hb := newHeartbeater(heartbeaterParams{
|
||||||
logger: testLogger,
|
logger: testLogger,
|
||||||
broker: testBroker,
|
broker: testBroker,
|
||||||
serverState: ss,
|
|
||||||
interval: time.Second,
|
interval: time.Second,
|
||||||
|
concurrency: 10,
|
||||||
|
queues: map[string]int{"default": 1},
|
||||||
|
strictPriority: false,
|
||||||
|
state: state,
|
||||||
|
starting: make(chan *workerInfo),
|
||||||
|
finished: make(chan *base.TaskMessage),
|
||||||
})
|
})
|
||||||
|
|
||||||
testBroker.Sleep()
|
testBroker.Sleep()
|
||||||
@@ -134,5 +152,5 @@ func TestHeartbeaterWithRedisDown(t *testing.T) {
|
|||||||
// wait for heartbeater to try writing data to redis
|
// wait for heartbeater to try writing data to redis
|
||||||
time.Sleep(2 * time.Second)
|
time.Sleep(2 * time.Second)
|
||||||
|
|
||||||
hb.terminate()
|
hb.shutdown()
|
||||||
}
|
}
|
||||||
|
|||||||
855
inspector.go
Normal file
855
inspector.go
Normal file
@@ -0,0 +1,855 @@
|
|||||||
|
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license
|
||||||
|
// that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package asynq
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/go-redis/redis/v7"
|
||||||
|
"github.com/google/uuid"
|
||||||
|
"github.com/hibiken/asynq/internal/base"
|
||||||
|
"github.com/hibiken/asynq/internal/errors"
|
||||||
|
"github.com/hibiken/asynq/internal/rdb"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Inspector is a client interface to inspect and mutate the state of
|
||||||
|
// queues and tasks.
|
||||||
|
type Inspector struct {
|
||||||
|
rdb *rdb.RDB
|
||||||
|
}
|
||||||
|
|
||||||
|
// New returns a new instance of Inspector.
|
||||||
|
func NewInspector(r RedisConnOpt) *Inspector {
|
||||||
|
c, ok := r.MakeRedisClient().(redis.UniversalClient)
|
||||||
|
if !ok {
|
||||||
|
panic(fmt.Sprintf("inspeq: unsupported RedisConnOpt type %T", r))
|
||||||
|
}
|
||||||
|
return &Inspector{
|
||||||
|
rdb: rdb.NewRDB(c),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes the connection with redis.
|
||||||
|
func (i *Inspector) Close() error {
|
||||||
|
return i.rdb.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Queues returns a list of all queue names.
|
||||||
|
func (i *Inspector) Queues() ([]string, error) {
|
||||||
|
return i.rdb.AllQueues()
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueueInfo represents a state of queues at a certain time.
|
||||||
|
type QueueInfo struct {
|
||||||
|
// Name of the queue.
|
||||||
|
Queue string
|
||||||
|
|
||||||
|
// Total number of bytes that the queue and its tasks require to be stored in redis.
|
||||||
|
// It is an approximate memory usage value in bytes since the value is computed by sampling.
|
||||||
|
MemoryUsage int64
|
||||||
|
|
||||||
|
// Size is the total number of tasks in the queue.
|
||||||
|
// The value is the sum of Pending, Active, Scheduled, Retry, and Archived.
|
||||||
|
Size int
|
||||||
|
|
||||||
|
// Number of pending tasks.
|
||||||
|
Pending int
|
||||||
|
// Number of active tasks.
|
||||||
|
Active int
|
||||||
|
// Number of scheduled tasks.
|
||||||
|
Scheduled int
|
||||||
|
// Number of retry tasks.
|
||||||
|
Retry int
|
||||||
|
// Number of archived tasks.
|
||||||
|
Archived int
|
||||||
|
|
||||||
|
// Total number of tasks being processed during the given date.
|
||||||
|
// The number includes both succeeded and failed tasks.
|
||||||
|
Processed int
|
||||||
|
// Total number of tasks failed to be processed during the given date.
|
||||||
|
Failed int
|
||||||
|
|
||||||
|
// Paused indicates whether the queue is paused.
|
||||||
|
// If true, tasks in the queue will not be processed.
|
||||||
|
Paused bool
|
||||||
|
|
||||||
|
// Time when this queue info snapshot was taken.
|
||||||
|
Timestamp time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetQueueInfo returns current information of the given queue.
|
||||||
|
func (i *Inspector) GetQueueInfo(qname string) (*QueueInfo, error) {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
stats, err := i.rdb.CurrentStats(qname)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &QueueInfo{
|
||||||
|
Queue: stats.Queue,
|
||||||
|
MemoryUsage: stats.MemoryUsage,
|
||||||
|
Size: stats.Size,
|
||||||
|
Pending: stats.Pending,
|
||||||
|
Active: stats.Active,
|
||||||
|
Scheduled: stats.Scheduled,
|
||||||
|
Retry: stats.Retry,
|
||||||
|
Archived: stats.Archived,
|
||||||
|
Processed: stats.Processed,
|
||||||
|
Failed: stats.Failed,
|
||||||
|
Paused: stats.Paused,
|
||||||
|
Timestamp: stats.Timestamp,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DailyStats holds aggregate data for a given day for a given queue.
|
||||||
|
type DailyStats struct {
|
||||||
|
// Name of the queue.
|
||||||
|
Queue string
|
||||||
|
// Total number of tasks being processed during the given date.
|
||||||
|
// The number includes both succeeded and failed tasks.
|
||||||
|
Processed int
|
||||||
|
// Total number of tasks failed to be processed during the given date.
|
||||||
|
Failed int
|
||||||
|
// Date this stats was taken.
|
||||||
|
Date time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// History returns a list of stats from the last n days.
|
||||||
|
func (i *Inspector) History(qname string, n int) ([]*DailyStats, error) {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
stats, err := i.rdb.HistoricalStats(qname, n)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var res []*DailyStats
|
||||||
|
for _, s := range stats {
|
||||||
|
res = append(res, &DailyStats{
|
||||||
|
Queue: s.Queue,
|
||||||
|
Processed: s.Processed,
|
||||||
|
Failed: s.Failed,
|
||||||
|
Date: s.Time,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrQueueNotFound indicates that the specified queue does not exist.
|
||||||
|
ErrQueueNotFound = errors.New("queue not found")
|
||||||
|
|
||||||
|
// ErrQueueNotEmpty indicates that the specified queue is not empty.
|
||||||
|
ErrQueueNotEmpty = errors.New("queue is not empty")
|
||||||
|
|
||||||
|
// ErrTaskNotFound indicates that the specified task cannot be found in the queue.
|
||||||
|
ErrTaskNotFound = errors.New("task not found")
|
||||||
|
)
|
||||||
|
|
||||||
|
// DeleteQueue removes the specified queue.
|
||||||
|
//
|
||||||
|
// If force is set to true, DeleteQueue will remove the queue regardless of
|
||||||
|
// the queue size as long as no tasks are active in the queue.
|
||||||
|
// If force is set to false, DeleteQueue will remove the queue only if
|
||||||
|
// the queue is empty.
|
||||||
|
//
|
||||||
|
// If the specified queue does not exist, DeleteQueue returns ErrQueueNotFound.
|
||||||
|
// If force is set to false and the specified queue is not empty, DeleteQueue
|
||||||
|
// returns ErrQueueNotEmpty.
|
||||||
|
func (i *Inspector) DeleteQueue(qname string, force bool) error {
|
||||||
|
err := i.rdb.RemoveQueue(qname, force)
|
||||||
|
if errors.IsQueueNotFound(err) {
|
||||||
|
return fmt.Errorf("%w: queue=%q", ErrQueueNotFound, qname)
|
||||||
|
}
|
||||||
|
if errors.IsQueueNotEmpty(err) {
|
||||||
|
return fmt.Errorf("%w: queue=%q", ErrQueueNotEmpty, qname)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetTaskInfo retrieves task information given a task id and queue name.
|
||||||
|
//
|
||||||
|
// Returns ErrQueueNotFound if a queue with the given name doesn't exist.
|
||||||
|
// Returns ErrTaskNotFound if a task with the given id doesn't exist in the queue.
|
||||||
|
func (i *Inspector) GetTaskInfo(qname, id string) (*TaskInfo, error) {
|
||||||
|
taskid, err := uuid.Parse(id)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("asynq: %s is not a valid task id", id)
|
||||||
|
}
|
||||||
|
info, err := i.rdb.GetTaskInfo(qname, taskid)
|
||||||
|
switch {
|
||||||
|
case errors.IsQueueNotFound(err):
|
||||||
|
return nil, fmt.Errorf("asynq: %w", ErrQueueNotFound)
|
||||||
|
case errors.IsTaskNotFound(err):
|
||||||
|
return nil, fmt.Errorf("asynq: %w", ErrTaskNotFound)
|
||||||
|
case err != nil:
|
||||||
|
return nil, fmt.Errorf("asynq: %v", err)
|
||||||
|
}
|
||||||
|
return newTaskInfo(info.Message, info.State, info.NextProcessAt), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListOption specifies behavior of list operation.
|
||||||
|
type ListOption interface{}
|
||||||
|
|
||||||
|
// Internal list option representations.
|
||||||
|
type (
|
||||||
|
pageSizeOpt int
|
||||||
|
pageNumOpt int
|
||||||
|
)
|
||||||
|
|
||||||
|
type listOption struct {
|
||||||
|
pageSize int
|
||||||
|
pageNum int
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Page size used by default in list operation.
|
||||||
|
defaultPageSize = 30
|
||||||
|
|
||||||
|
// Page number used by default in list operation.
|
||||||
|
defaultPageNum = 1
|
||||||
|
)
|
||||||
|
|
||||||
|
func composeListOptions(opts ...ListOption) listOption {
|
||||||
|
res := listOption{
|
||||||
|
pageSize: defaultPageSize,
|
||||||
|
pageNum: defaultPageNum,
|
||||||
|
}
|
||||||
|
for _, opt := range opts {
|
||||||
|
switch opt := opt.(type) {
|
||||||
|
case pageSizeOpt:
|
||||||
|
res.pageSize = int(opt)
|
||||||
|
case pageNumOpt:
|
||||||
|
res.pageNum = int(opt)
|
||||||
|
default:
|
||||||
|
// ignore unexpected option
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
// PageSize returns an option to specify the page size for list operation.
|
||||||
|
//
|
||||||
|
// Negative page size is treated as zero.
|
||||||
|
func PageSize(n int) ListOption {
|
||||||
|
if n < 0 {
|
||||||
|
n = 0
|
||||||
|
}
|
||||||
|
return pageSizeOpt(n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Page returns an option to specify the page number for list operation.
|
||||||
|
// The value 1 fetches the first page.
|
||||||
|
//
|
||||||
|
// Negative page number is treated as one.
|
||||||
|
func Page(n int) ListOption {
|
||||||
|
if n < 0 {
|
||||||
|
n = 1
|
||||||
|
}
|
||||||
|
return pageNumOpt(n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListPendingTasks retrieves pending tasks from the specified queue.
|
||||||
|
//
|
||||||
|
// By default, it retrieves the first 30 tasks.
|
||||||
|
func (i *Inspector) ListPendingTasks(qname string, opts ...ListOption) ([]*TaskInfo, error) {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
return nil, fmt.Errorf("asynq: %v", err)
|
||||||
|
}
|
||||||
|
opt := composeListOptions(opts...)
|
||||||
|
pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1}
|
||||||
|
msgs, err := i.rdb.ListPending(qname, pgn)
|
||||||
|
switch {
|
||||||
|
case errors.IsQueueNotFound(err):
|
||||||
|
return nil, fmt.Errorf("asynq: %w", ErrQueueNotFound)
|
||||||
|
case err != nil:
|
||||||
|
return nil, fmt.Errorf("asynq: %v", err)
|
||||||
|
}
|
||||||
|
now := time.Now()
|
||||||
|
var tasks []*TaskInfo
|
||||||
|
for _, m := range msgs {
|
||||||
|
tasks = append(tasks, newTaskInfo(m, base.TaskStatePending, now))
|
||||||
|
}
|
||||||
|
return tasks, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListActiveTasks retrieves active tasks from the specified queue.
|
||||||
|
//
|
||||||
|
// By default, it retrieves the first 30 tasks.
|
||||||
|
func (i *Inspector) ListActiveTasks(qname string, opts ...ListOption) ([]*TaskInfo, error) {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
return nil, fmt.Errorf("asynq: %v", err)
|
||||||
|
}
|
||||||
|
opt := composeListOptions(opts...)
|
||||||
|
pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1}
|
||||||
|
msgs, err := i.rdb.ListActive(qname, pgn)
|
||||||
|
switch {
|
||||||
|
case errors.IsQueueNotFound(err):
|
||||||
|
return nil, fmt.Errorf("asynq: %w", ErrQueueNotFound)
|
||||||
|
case err != nil:
|
||||||
|
return nil, fmt.Errorf("asynq: %v", err)
|
||||||
|
}
|
||||||
|
var tasks []*TaskInfo
|
||||||
|
for _, m := range msgs {
|
||||||
|
tasks = append(tasks, newTaskInfo(m, base.TaskStateActive, time.Time{}))
|
||||||
|
}
|
||||||
|
return tasks, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListScheduledTasks retrieves scheduled tasks from the specified queue.
|
||||||
|
// Tasks are sorted by NextProcessAt in ascending order.
|
||||||
|
//
|
||||||
|
// By default, it retrieves the first 30 tasks.
|
||||||
|
func (i *Inspector) ListScheduledTasks(qname string, opts ...ListOption) ([]*TaskInfo, error) {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
return nil, fmt.Errorf("asynq: %v", err)
|
||||||
|
}
|
||||||
|
opt := composeListOptions(opts...)
|
||||||
|
pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1}
|
||||||
|
zs, err := i.rdb.ListScheduled(qname, pgn)
|
||||||
|
switch {
|
||||||
|
case errors.IsQueueNotFound(err):
|
||||||
|
return nil, fmt.Errorf("asynq: %w", ErrQueueNotFound)
|
||||||
|
case err != nil:
|
||||||
|
return nil, fmt.Errorf("asynq: %v", err)
|
||||||
|
}
|
||||||
|
var tasks []*TaskInfo
|
||||||
|
for _, z := range zs {
|
||||||
|
tasks = append(tasks, newTaskInfo(
|
||||||
|
z.Message,
|
||||||
|
base.TaskStateScheduled,
|
||||||
|
time.Unix(z.Score, 0),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
return tasks, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListRetryTasks retrieves retry tasks from the specified queue.
|
||||||
|
// Tasks are sorted by NextProcessAt in ascending order.
|
||||||
|
//
|
||||||
|
// By default, it retrieves the first 30 tasks.
|
||||||
|
func (i *Inspector) ListRetryTasks(qname string, opts ...ListOption) ([]*TaskInfo, error) {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
return nil, fmt.Errorf("asynq: %v", err)
|
||||||
|
}
|
||||||
|
opt := composeListOptions(opts...)
|
||||||
|
pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1}
|
||||||
|
zs, err := i.rdb.ListRetry(qname, pgn)
|
||||||
|
switch {
|
||||||
|
case errors.IsQueueNotFound(err):
|
||||||
|
return nil, fmt.Errorf("asynq: %w", ErrQueueNotFound)
|
||||||
|
case err != nil:
|
||||||
|
return nil, fmt.Errorf("asynq: %v", err)
|
||||||
|
}
|
||||||
|
var tasks []*TaskInfo
|
||||||
|
for _, z := range zs {
|
||||||
|
tasks = append(tasks, newTaskInfo(
|
||||||
|
z.Message,
|
||||||
|
base.TaskStateRetry,
|
||||||
|
time.Unix(z.Score, 0),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
return tasks, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListArchivedTasks retrieves archived tasks from the specified queue.
|
||||||
|
// Tasks are sorted by LastFailedAt in descending order.
|
||||||
|
//
|
||||||
|
// By default, it retrieves the first 30 tasks.
|
||||||
|
func (i *Inspector) ListArchivedTasks(qname string, opts ...ListOption) ([]*TaskInfo, error) {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
return nil, fmt.Errorf("asynq: %v", err)
|
||||||
|
}
|
||||||
|
opt := composeListOptions(opts...)
|
||||||
|
pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1}
|
||||||
|
zs, err := i.rdb.ListArchived(qname, pgn)
|
||||||
|
switch {
|
||||||
|
case errors.IsQueueNotFound(err):
|
||||||
|
return nil, fmt.Errorf("asynq: %w", ErrQueueNotFound)
|
||||||
|
case err != nil:
|
||||||
|
return nil, fmt.Errorf("asynq: %v", err)
|
||||||
|
}
|
||||||
|
var tasks []*TaskInfo
|
||||||
|
for _, z := range zs {
|
||||||
|
tasks = append(tasks, newTaskInfo(
|
||||||
|
z.Message,
|
||||||
|
base.TaskStateArchived,
|
||||||
|
time.Time{},
|
||||||
|
))
|
||||||
|
}
|
||||||
|
return tasks, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteAllPendingTasks deletes all pending tasks from the specified queue,
|
||||||
|
// and reports the number tasks deleted.
|
||||||
|
func (i *Inspector) DeleteAllPendingTasks(qname string) (int, error) {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
n, err := i.rdb.DeleteAllPendingTasks(qname)
|
||||||
|
return int(n), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteAllScheduledTasks deletes all scheduled tasks from the specified queue,
|
||||||
|
// and reports the number tasks deleted.
|
||||||
|
func (i *Inspector) DeleteAllScheduledTasks(qname string) (int, error) {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
n, err := i.rdb.DeleteAllScheduledTasks(qname)
|
||||||
|
return int(n), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteAllRetryTasks deletes all retry tasks from the specified queue,
|
||||||
|
// and reports the number tasks deleted.
|
||||||
|
func (i *Inspector) DeleteAllRetryTasks(qname string) (int, error) {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
n, err := i.rdb.DeleteAllRetryTasks(qname)
|
||||||
|
return int(n), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteAllArchivedTasks deletes all archived tasks from the specified queue,
|
||||||
|
// and reports the number tasks deleted.
|
||||||
|
func (i *Inspector) DeleteAllArchivedTasks(qname string) (int, error) {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
n, err := i.rdb.DeleteAllArchivedTasks(qname)
|
||||||
|
return int(n), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteTask deletes a task with the given id from the given queue.
|
||||||
|
// The task needs to be in pending, scheduled, retry, or archived state,
|
||||||
|
// otherwise DeleteTask will return an error.
|
||||||
|
//
|
||||||
|
// If a queue with the given name doesn't exist, it returns ErrQueueNotFound.
|
||||||
|
// If a task with the given id doesn't exist in the queue, it returns ErrTaskNotFound.
|
||||||
|
// If the task is in active state, it returns a non-nil error.
|
||||||
|
func (i *Inspector) DeleteTask(qname, id string) error {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
return fmt.Errorf("asynq: %v", err)
|
||||||
|
}
|
||||||
|
taskid, err := uuid.Parse(id)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("asynq: %s is not a valid task id", id)
|
||||||
|
}
|
||||||
|
err = i.rdb.DeleteTask(qname, taskid)
|
||||||
|
switch {
|
||||||
|
case errors.IsQueueNotFound(err):
|
||||||
|
return fmt.Errorf("asynq: %w", ErrQueueNotFound)
|
||||||
|
case errors.IsTaskNotFound(err):
|
||||||
|
return fmt.Errorf("asynq: %w", ErrTaskNotFound)
|
||||||
|
case err != nil:
|
||||||
|
return fmt.Errorf("asynq: %v", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// RunAllScheduledTasks transition all scheduled tasks to pending state from the given queue,
|
||||||
|
// and reports the number of tasks transitioned.
|
||||||
|
func (i *Inspector) RunAllScheduledTasks(qname string) (int, error) {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
n, err := i.rdb.RunAllScheduledTasks(qname)
|
||||||
|
return int(n), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// RunAllRetryTasks transition all retry tasks to pending state from the given queue,
|
||||||
|
// and reports the number of tasks transitioned.
|
||||||
|
func (i *Inspector) RunAllRetryTasks(qname string) (int, error) {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
n, err := i.rdb.RunAllRetryTasks(qname)
|
||||||
|
return int(n), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// RunAllArchivedTasks transition all archived tasks to pending state from the given queue,
|
||||||
|
// and reports the number of tasks transitioned.
|
||||||
|
func (i *Inspector) RunAllArchivedTasks(qname string) (int, error) {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
n, err := i.rdb.RunAllArchivedTasks(qname)
|
||||||
|
return int(n), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// RunTask updates the task to pending state given a queue name and task id.
|
||||||
|
// The task needs to be in scheduled, retry, or archived state, otherwise RunTask
|
||||||
|
// will return an error.
|
||||||
|
//
|
||||||
|
// If a queue with the given name doesn't exist, it returns ErrQueueNotFound.
|
||||||
|
// If a task with the given id doesn't exist in the queue, it returns ErrTaskNotFound.
|
||||||
|
// If the task is in pending or active state, it returns a non-nil error.
|
||||||
|
func (i *Inspector) RunTask(qname, id string) error {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
return fmt.Errorf("asynq: %v", err)
|
||||||
|
}
|
||||||
|
taskid, err := uuid.Parse(id)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("asynq: %s is not a valid task id", id)
|
||||||
|
}
|
||||||
|
err = i.rdb.RunTask(qname, taskid)
|
||||||
|
switch {
|
||||||
|
case errors.IsQueueNotFound(err):
|
||||||
|
return fmt.Errorf("asynq: %w", ErrQueueNotFound)
|
||||||
|
case errors.IsTaskNotFound(err):
|
||||||
|
return fmt.Errorf("asynq: %w", ErrTaskNotFound)
|
||||||
|
case err != nil:
|
||||||
|
return fmt.Errorf("asynq: %v", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ArchiveAllPendingTasks archives all pending tasks from the given queue,
|
||||||
|
// and reports the number of tasks archived.
|
||||||
|
func (i *Inspector) ArchiveAllPendingTasks(qname string) (int, error) {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
n, err := i.rdb.ArchiveAllPendingTasks(qname)
|
||||||
|
return int(n), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ArchiveAllScheduledTasks archives all scheduled tasks from the given queue,
|
||||||
|
// and reports the number of tasks archiveed.
|
||||||
|
func (i *Inspector) ArchiveAllScheduledTasks(qname string) (int, error) {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
n, err := i.rdb.ArchiveAllScheduledTasks(qname)
|
||||||
|
return int(n), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ArchiveAllRetryTasks archives all retry tasks from the given queue,
|
||||||
|
// and reports the number of tasks archiveed.
|
||||||
|
func (i *Inspector) ArchiveAllRetryTasks(qname string) (int, error) {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
n, err := i.rdb.ArchiveAllRetryTasks(qname)
|
||||||
|
return int(n), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ArchiveTask archives a task with the given id in the given queue.
|
||||||
|
// The task needs to be in pending, scheduled, or retry state, otherwise ArchiveTask
|
||||||
|
// will return an error.
|
||||||
|
//
|
||||||
|
// If a queue with the given name doesn't exist, it returns ErrQueueNotFound.
|
||||||
|
// If a task with the given id doesn't exist in the queue, it returns ErrTaskNotFound.
|
||||||
|
// If the task is in already archived, it returns a non-nil error.
|
||||||
|
func (i *Inspector) ArchiveTask(qname, id string) error {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
return fmt.Errorf("asynq: err")
|
||||||
|
}
|
||||||
|
taskid, err := uuid.Parse(id)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("asynq: %s is not a valid task id", id)
|
||||||
|
}
|
||||||
|
err = i.rdb.ArchiveTask(qname, taskid)
|
||||||
|
switch {
|
||||||
|
case errors.IsQueueNotFound(err):
|
||||||
|
return fmt.Errorf("asynq: %w", ErrQueueNotFound)
|
||||||
|
case errors.IsTaskNotFound(err):
|
||||||
|
return fmt.Errorf("asynq: %w", ErrTaskNotFound)
|
||||||
|
case err != nil:
|
||||||
|
return fmt.Errorf("asynq: %v", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CancelProcessing sends a signal to cancel processing of the task
|
||||||
|
// given a task id. CancelProcessing is best-effort, which means that it does not
|
||||||
|
// guarantee that the task with the given id will be canceled. The return
|
||||||
|
// value only indicates whether the cancelation signal has been sent.
|
||||||
|
func (i *Inspector) CancelProcessing(id string) error {
|
||||||
|
return i.rdb.PublishCancelation(id)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PauseQueue pauses task processing on the specified queue.
|
||||||
|
// If the queue is already paused, it will return a non-nil error.
|
||||||
|
func (i *Inspector) PauseQueue(qname string) error {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return i.rdb.Pause(qname)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnpauseQueue resumes task processing on the specified queue.
|
||||||
|
// If the queue is not paused, it will return a non-nil error.
|
||||||
|
func (i *Inspector) UnpauseQueue(qname string) error {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return i.rdb.Unpause(qname)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Servers return a list of running servers' information.
|
||||||
|
func (i *Inspector) Servers() ([]*ServerInfo, error) {
|
||||||
|
servers, err := i.rdb.ListServers()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
workers, err := i.rdb.ListWorkers()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
m := make(map[string]*ServerInfo) // ServerInfo keyed by serverID
|
||||||
|
for _, s := range servers {
|
||||||
|
m[s.ServerID] = &ServerInfo{
|
||||||
|
ID: s.ServerID,
|
||||||
|
Host: s.Host,
|
||||||
|
PID: s.PID,
|
||||||
|
Concurrency: s.Concurrency,
|
||||||
|
Queues: s.Queues,
|
||||||
|
StrictPriority: s.StrictPriority,
|
||||||
|
Started: s.Started,
|
||||||
|
Status: s.Status,
|
||||||
|
ActiveWorkers: make([]*WorkerInfo, 0),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, w := range workers {
|
||||||
|
srvInfo, ok := m[w.ServerID]
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
wrkInfo := &WorkerInfo{
|
||||||
|
TaskID: w.ID,
|
||||||
|
TaskType: w.Type,
|
||||||
|
TaskPayload: w.Payload,
|
||||||
|
Queue: w.Queue,
|
||||||
|
Started: w.Started,
|
||||||
|
Deadline: w.Deadline,
|
||||||
|
}
|
||||||
|
srvInfo.ActiveWorkers = append(srvInfo.ActiveWorkers, wrkInfo)
|
||||||
|
}
|
||||||
|
var out []*ServerInfo
|
||||||
|
for _, srvInfo := range m {
|
||||||
|
out = append(out, srvInfo)
|
||||||
|
}
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServerInfo describes a running Server instance.
|
||||||
|
type ServerInfo struct {
|
||||||
|
// Unique Identifier for the server.
|
||||||
|
ID string
|
||||||
|
// Host machine on which the server is running.
|
||||||
|
Host string
|
||||||
|
// PID of the process in which the server is running.
|
||||||
|
PID int
|
||||||
|
|
||||||
|
// Server configuration details.
|
||||||
|
// See Config doc for field descriptions.
|
||||||
|
Concurrency int
|
||||||
|
Queues map[string]int
|
||||||
|
StrictPriority bool
|
||||||
|
|
||||||
|
// Time the server started.
|
||||||
|
Started time.Time
|
||||||
|
// Status indicates the status of the server.
|
||||||
|
// TODO: Update comment with more details.
|
||||||
|
Status string
|
||||||
|
// A List of active workers currently processing tasks.
|
||||||
|
ActiveWorkers []*WorkerInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
// WorkerInfo describes a running worker processing a task.
|
||||||
|
type WorkerInfo struct {
|
||||||
|
// ID of the task the worker is processing.
|
||||||
|
TaskID string
|
||||||
|
// Type of the task the worker is processing.
|
||||||
|
TaskType string
|
||||||
|
// Payload of the task the worker is processing.
|
||||||
|
TaskPayload []byte
|
||||||
|
// Queue from which the worker got its task.
|
||||||
|
Queue string
|
||||||
|
// Time the worker started processing the task.
|
||||||
|
Started time.Time
|
||||||
|
// Time the worker needs to finish processing the task by.
|
||||||
|
Deadline time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClusterKeySlot returns an integer identifying the hash slot the given queue hashes to.
|
||||||
|
func (i *Inspector) ClusterKeySlot(qname string) (int64, error) {
|
||||||
|
return i.rdb.ClusterKeySlot(qname)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClusterNode describes a node in redis cluster.
|
||||||
|
type ClusterNode struct {
|
||||||
|
// Node ID in the cluster.
|
||||||
|
ID string
|
||||||
|
|
||||||
|
// Address of the node.
|
||||||
|
Addr string
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClusterNodes returns a list of nodes the given queue belongs to.
|
||||||
|
//
|
||||||
|
// Only relevant if task queues are stored in redis cluster.
|
||||||
|
func (i *Inspector) ClusterNodes(qname string) ([]*ClusterNode, error) {
|
||||||
|
nodes, err := i.rdb.ClusterNodes(qname)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var res []*ClusterNode
|
||||||
|
for _, node := range nodes {
|
||||||
|
res = append(res, &ClusterNode{ID: node.ID, Addr: node.Addr})
|
||||||
|
}
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SchedulerEntry holds information about a periodic task registered with a scheduler.
|
||||||
|
type SchedulerEntry struct {
|
||||||
|
// Identifier of this entry.
|
||||||
|
ID string
|
||||||
|
|
||||||
|
// Spec describes the schedule of this entry.
|
||||||
|
Spec string
|
||||||
|
|
||||||
|
// Periodic Task registered for this entry.
|
||||||
|
Task *Task
|
||||||
|
|
||||||
|
// Opts is the options for the periodic task.
|
||||||
|
Opts []Option
|
||||||
|
|
||||||
|
// Next shows the next time the task will be enqueued.
|
||||||
|
Next time.Time
|
||||||
|
|
||||||
|
// Prev shows the last time the task was enqueued.
|
||||||
|
// Zero time if task was never enqueued.
|
||||||
|
Prev time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// SchedulerEntries returns a list of all entries registered with
|
||||||
|
// currently running schedulers.
|
||||||
|
func (i *Inspector) SchedulerEntries() ([]*SchedulerEntry, error) {
|
||||||
|
var entries []*SchedulerEntry
|
||||||
|
res, err := i.rdb.ListSchedulerEntries()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
for _, e := range res {
|
||||||
|
task := NewTask(e.Type, e.Payload)
|
||||||
|
var opts []Option
|
||||||
|
for _, s := range e.Opts {
|
||||||
|
if o, err := parseOption(s); err == nil {
|
||||||
|
// ignore bad data
|
||||||
|
opts = append(opts, o)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
entries = append(entries, &SchedulerEntry{
|
||||||
|
ID: e.ID,
|
||||||
|
Spec: e.Spec,
|
||||||
|
Task: task,
|
||||||
|
Opts: opts,
|
||||||
|
Next: e.Next,
|
||||||
|
Prev: e.Prev,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return entries, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseOption interprets a string s as an Option and returns the Option if parsing is successful,
|
||||||
|
// otherwise returns non-nil error.
|
||||||
|
func parseOption(s string) (Option, error) {
|
||||||
|
fn, arg := parseOptionFunc(s), parseOptionArg(s)
|
||||||
|
switch fn {
|
||||||
|
case "Queue":
|
||||||
|
qname, err := strconv.Unquote(arg)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return Queue(qname), nil
|
||||||
|
case "MaxRetry":
|
||||||
|
n, err := strconv.Atoi(arg)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return MaxRetry(n), nil
|
||||||
|
case "Timeout":
|
||||||
|
d, err := time.ParseDuration(arg)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return Timeout(d), nil
|
||||||
|
case "Deadline":
|
||||||
|
t, err := time.Parse(time.UnixDate, arg)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return Deadline(t), nil
|
||||||
|
case "Unique":
|
||||||
|
d, err := time.ParseDuration(arg)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return Unique(d), nil
|
||||||
|
case "ProcessAt":
|
||||||
|
t, err := time.Parse(time.UnixDate, arg)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ProcessAt(t), nil
|
||||||
|
case "ProcessIn":
|
||||||
|
d, err := time.ParseDuration(arg)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ProcessIn(d), nil
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("cannot not parse option string %q", s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseOptionFunc(s string) string {
|
||||||
|
i := strings.Index(s, "(")
|
||||||
|
return s[:i]
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseOptionArg(s string) string {
|
||||||
|
i := strings.Index(s, "(")
|
||||||
|
if i >= 0 {
|
||||||
|
j := strings.Index(s, ")")
|
||||||
|
if j > i {
|
||||||
|
return s[i+1 : j]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// SchedulerEnqueueEvent holds information about an enqueue event by a scheduler.
|
||||||
|
type SchedulerEnqueueEvent struct {
|
||||||
|
// ID of the task that was enqueued.
|
||||||
|
TaskID string
|
||||||
|
|
||||||
|
// Time the task was enqueued.
|
||||||
|
EnqueuedAt time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListSchedulerEnqueueEvents retrieves a list of enqueue events from the specified scheduler entry.
|
||||||
|
//
|
||||||
|
// By default, it retrieves the first 30 tasks.
|
||||||
|
func (i *Inspector) ListSchedulerEnqueueEvents(entryID string, opts ...ListOption) ([]*SchedulerEnqueueEvent, error) {
|
||||||
|
opt := composeListOptions(opts...)
|
||||||
|
pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1}
|
||||||
|
data, err := i.rdb.ListSchedulerEnqueueEvents(entryID, pgn)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var events []*SchedulerEnqueueEvent
|
||||||
|
for _, e := range data {
|
||||||
|
events = append(events, &SchedulerEnqueueEvent{TaskID: e.TaskID, EnqueuedAt: e.EnqueuedAt})
|
||||||
|
}
|
||||||
|
return events, nil
|
||||||
|
}
|
||||||
3088
inspector_test.go
Normal file
3088
inspector_test.go
Normal file
File diff suppressed because it is too large
Load Diff
@@ -7,20 +7,24 @@ package asynqtest
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"math"
|
||||||
"sort"
|
"sort"
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/go-redis/redis/v7"
|
"github.com/go-redis/redis/v7"
|
||||||
"github.com/google/go-cmp/cmp"
|
"github.com/google/go-cmp/cmp"
|
||||||
"github.com/google/go-cmp/cmp/cmpopts"
|
"github.com/google/go-cmp/cmp/cmpopts"
|
||||||
|
"github.com/google/uuid"
|
||||||
"github.com/hibiken/asynq/internal/base"
|
"github.com/hibiken/asynq/internal/base"
|
||||||
"github.com/rs/xid"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// ZSetEntry is an entry in redis sorted set.
|
// EquateInt64Approx returns a Comparer option that treats int64 values
|
||||||
type ZSetEntry struct {
|
// to be equal if they are within the given margin.
|
||||||
Msg *base.TaskMessage
|
func EquateInt64Approx(margin int64) cmp.Option {
|
||||||
Score float64
|
return cmp.Comparer(func(a, b int64) bool {
|
||||||
|
return math.Abs(float64(a-b)) <= float64(margin)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// SortMsgOpt is a cmp.Option to sort base.TaskMessage for comparing slice of task messages.
|
// SortMsgOpt is a cmp.Option to sort base.TaskMessage for comparing slice of task messages.
|
||||||
@@ -33,10 +37,10 @@ var SortMsgOpt = cmp.Transformer("SortTaskMessages", func(in []*base.TaskMessage
|
|||||||
})
|
})
|
||||||
|
|
||||||
// SortZSetEntryOpt is an cmp.Option to sort ZSetEntry for comparing slice of zset entries.
|
// SortZSetEntryOpt is an cmp.Option to sort ZSetEntry for comparing slice of zset entries.
|
||||||
var SortZSetEntryOpt = cmp.Transformer("SortZSetEntries", func(in []ZSetEntry) []ZSetEntry {
|
var SortZSetEntryOpt = cmp.Transformer("SortZSetEntries", func(in []base.Z) []base.Z {
|
||||||
out := append([]ZSetEntry(nil), in...) // Copy input to avoid mutating it
|
out := append([]base.Z(nil), in...) // Copy input to avoid mutating it
|
||||||
sort.Slice(out, func(i, j int) bool {
|
sort.Slice(out, func(i, j int) bool {
|
||||||
return out[i].Msg.ID.String() < out[j].Msg.ID.String()
|
return out[i].Message.ID.String() < out[j].Message.ID.String()
|
||||||
})
|
})
|
||||||
return out
|
return out
|
||||||
})
|
})
|
||||||
@@ -57,7 +61,25 @@ var SortServerInfoOpt = cmp.Transformer("SortServerInfo", func(in []*base.Server
|
|||||||
var SortWorkerInfoOpt = cmp.Transformer("SortWorkerInfo", func(in []*base.WorkerInfo) []*base.WorkerInfo {
|
var SortWorkerInfoOpt = cmp.Transformer("SortWorkerInfo", func(in []*base.WorkerInfo) []*base.WorkerInfo {
|
||||||
out := append([]*base.WorkerInfo(nil), in...) // Copy input to avoid mutating it
|
out := append([]*base.WorkerInfo(nil), in...) // Copy input to avoid mutating it
|
||||||
sort.Slice(out, func(i, j int) bool {
|
sort.Slice(out, func(i, j int) bool {
|
||||||
return out[i].ID.String() < out[j].ID.String()
|
return out[i].ID < out[j].ID
|
||||||
|
})
|
||||||
|
return out
|
||||||
|
})
|
||||||
|
|
||||||
|
// SortSchedulerEntryOpt is a cmp.Option to sort base.SchedulerEntry for comparing slice of entries.
|
||||||
|
var SortSchedulerEntryOpt = cmp.Transformer("SortSchedulerEntry", func(in []*base.SchedulerEntry) []*base.SchedulerEntry {
|
||||||
|
out := append([]*base.SchedulerEntry(nil), in...) // Copy input to avoid mutating it
|
||||||
|
sort.Slice(out, func(i, j int) bool {
|
||||||
|
return out[i].Spec < out[j].Spec
|
||||||
|
})
|
||||||
|
return out
|
||||||
|
})
|
||||||
|
|
||||||
|
// SortSchedulerEnqueueEventOpt is a cmp.Option to sort base.SchedulerEnqueueEvent for comparing slice of events.
|
||||||
|
var SortSchedulerEnqueueEventOpt = cmp.Transformer("SortSchedulerEnqueueEvent", func(in []*base.SchedulerEnqueueEvent) []*base.SchedulerEnqueueEvent {
|
||||||
|
out := append([]*base.SchedulerEnqueueEvent(nil), in...)
|
||||||
|
sort.Slice(out, func(i, j int) bool {
|
||||||
|
return out[i].EnqueuedAt.Unix() < out[j].EnqueuedAt.Unix()
|
||||||
})
|
})
|
||||||
return out
|
return out
|
||||||
})
|
})
|
||||||
@@ -73,33 +95,54 @@ var SortStringSliceOpt = cmp.Transformer("SortStringSlice", func(in []string) []
|
|||||||
var IgnoreIDOpt = cmpopts.IgnoreFields(base.TaskMessage{}, "ID")
|
var IgnoreIDOpt = cmpopts.IgnoreFields(base.TaskMessage{}, "ID")
|
||||||
|
|
||||||
// NewTaskMessage returns a new instance of TaskMessage given a task type and payload.
|
// NewTaskMessage returns a new instance of TaskMessage given a task type and payload.
|
||||||
func NewTaskMessage(taskType string, payload map[string]interface{}) *base.TaskMessage {
|
func NewTaskMessage(taskType string, payload []byte) *base.TaskMessage {
|
||||||
return &base.TaskMessage{
|
return NewTaskMessageWithQueue(taskType, payload, base.DefaultQueueName)
|
||||||
ID: xid.New(),
|
|
||||||
Type: taskType,
|
|
||||||
Queue: base.DefaultQueueName,
|
|
||||||
Retry: 25,
|
|
||||||
Payload: payload,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewTaskMessageWithQueue returns a new instance of TaskMessage given a
|
// NewTaskMessageWithQueue returns a new instance of TaskMessage given a
|
||||||
// task type, payload and queue name.
|
// task type, payload and queue name.
|
||||||
func NewTaskMessageWithQueue(taskType string, payload map[string]interface{}, qname string) *base.TaskMessage {
|
func NewTaskMessageWithQueue(taskType string, payload []byte, qname string) *base.TaskMessage {
|
||||||
return &base.TaskMessage{
|
return &base.TaskMessage{
|
||||||
ID: xid.New(),
|
ID: uuid.New(),
|
||||||
Type: taskType,
|
Type: taskType,
|
||||||
Queue: qname,
|
Queue: qname,
|
||||||
Retry: 25,
|
Retry: 25,
|
||||||
Payload: payload,
|
Payload: payload,
|
||||||
|
Timeout: 1800, // default timeout of 30 mins
|
||||||
|
Deadline: 0, // no deadline
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// JSON serializes the given key-value pairs into stream of bytes in JSON.
|
||||||
|
func JSON(kv map[string]interface{}) []byte {
|
||||||
|
b, err := json.Marshal(kv)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
// TaskMessageAfterRetry returns an updated copy of t after retry.
|
||||||
|
// It increments retry count and sets the error message and last_failed_at time.
|
||||||
|
func TaskMessageAfterRetry(t base.TaskMessage, errMsg string, failedAt time.Time) *base.TaskMessage {
|
||||||
|
t.Retried = t.Retried + 1
|
||||||
|
t.ErrorMsg = errMsg
|
||||||
|
t.LastFailedAt = failedAt.Unix()
|
||||||
|
return &t
|
||||||
|
}
|
||||||
|
|
||||||
|
// TaskMessageWithError returns an updated copy of t with the given error message.
|
||||||
|
func TaskMessageWithError(t base.TaskMessage, errMsg string, failedAt time.Time) *base.TaskMessage {
|
||||||
|
t.ErrorMsg = errMsg
|
||||||
|
t.LastFailedAt = failedAt.Unix()
|
||||||
|
return &t
|
||||||
|
}
|
||||||
|
|
||||||
// MustMarshal marshals given task message and returns a json string.
|
// MustMarshal marshals given task message and returns a json string.
|
||||||
// Calling test will fail if marshaling errors out.
|
// Calling test will fail if marshaling errors out.
|
||||||
func MustMarshal(tb testing.TB, msg *base.TaskMessage) string {
|
func MustMarshal(tb testing.TB, msg *base.TaskMessage) string {
|
||||||
tb.Helper()
|
tb.Helper()
|
||||||
data, err := json.Marshal(msg)
|
data, err := base.EncodeMessage(msg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
tb.Fatal(err)
|
tb.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -110,171 +153,295 @@ func MustMarshal(tb testing.TB, msg *base.TaskMessage) string {
|
|||||||
// Calling test will fail if unmarshaling errors out.
|
// Calling test will fail if unmarshaling errors out.
|
||||||
func MustUnmarshal(tb testing.TB, data string) *base.TaskMessage {
|
func MustUnmarshal(tb testing.TB, data string) *base.TaskMessage {
|
||||||
tb.Helper()
|
tb.Helper()
|
||||||
var msg base.TaskMessage
|
msg, err := base.DecodeMessage([]byte(data))
|
||||||
err := json.Unmarshal([]byte(data), &msg)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
tb.Fatal(err)
|
tb.Fatal(err)
|
||||||
}
|
}
|
||||||
return &msg
|
return msg
|
||||||
}
|
}
|
||||||
|
|
||||||
// MustMarshalSlice marshals a slice of task messages and return a slice of
|
// FlushDB deletes all the keys of the currently selected DB.
|
||||||
// json strings. Calling test will fail if marshaling errors out.
|
func FlushDB(tb testing.TB, r redis.UniversalClient) {
|
||||||
func MustMarshalSlice(tb testing.TB, msgs []*base.TaskMessage) []string {
|
|
||||||
tb.Helper()
|
tb.Helper()
|
||||||
var data []string
|
switch r := r.(type) {
|
||||||
for _, m := range msgs {
|
case *redis.Client:
|
||||||
data = append(data, MustMarshal(tb, m))
|
if err := r.FlushDB().Err(); err != nil {
|
||||||
|
tb.Fatal(err)
|
||||||
|
}
|
||||||
|
case *redis.ClusterClient:
|
||||||
|
err := r.ForEachMaster(func(c *redis.Client) error {
|
||||||
|
if err := c.FlushAll().Err(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
tb.Fatal(err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return data
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// MustUnmarshalSlice unmarshals a slice of strings into a slice of task message structs.
|
// SeedPendingQueue initializes the specified queue with the given messages.
|
||||||
// Calling test will fail if marshaling errors out.
|
func SeedPendingQueue(tb testing.TB, r redis.UniversalClient, msgs []*base.TaskMessage, qname string) {
|
||||||
func MustUnmarshalSlice(tb testing.TB, data []string) []*base.TaskMessage {
|
|
||||||
tb.Helper()
|
tb.Helper()
|
||||||
|
r.SAdd(base.AllQueues, qname)
|
||||||
|
seedRedisList(tb, r, base.PendingKey(qname), msgs, base.TaskStatePending)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SeedActiveQueue initializes the active queue with the given messages.
|
||||||
|
func SeedActiveQueue(tb testing.TB, r redis.UniversalClient, msgs []*base.TaskMessage, qname string) {
|
||||||
|
tb.Helper()
|
||||||
|
r.SAdd(base.AllQueues, qname)
|
||||||
|
seedRedisList(tb, r, base.ActiveKey(qname), msgs, base.TaskStateActive)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SeedScheduledQueue initializes the scheduled queue with the given messages.
|
||||||
|
func SeedScheduledQueue(tb testing.TB, r redis.UniversalClient, entries []base.Z, qname string) {
|
||||||
|
tb.Helper()
|
||||||
|
r.SAdd(base.AllQueues, qname)
|
||||||
|
seedRedisZSet(tb, r, base.ScheduledKey(qname), entries, base.TaskStateScheduled)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SeedRetryQueue initializes the retry queue with the given messages.
|
||||||
|
func SeedRetryQueue(tb testing.TB, r redis.UniversalClient, entries []base.Z, qname string) {
|
||||||
|
tb.Helper()
|
||||||
|
r.SAdd(base.AllQueues, qname)
|
||||||
|
seedRedisZSet(tb, r, base.RetryKey(qname), entries, base.TaskStateRetry)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SeedArchivedQueue initializes the archived queue with the given messages.
|
||||||
|
func SeedArchivedQueue(tb testing.TB, r redis.UniversalClient, entries []base.Z, qname string) {
|
||||||
|
tb.Helper()
|
||||||
|
r.SAdd(base.AllQueues, qname)
|
||||||
|
seedRedisZSet(tb, r, base.ArchivedKey(qname), entries, base.TaskStateArchived)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SeedDeadlines initializes the deadlines set with the given entries.
|
||||||
|
func SeedDeadlines(tb testing.TB, r redis.UniversalClient, entries []base.Z, qname string) {
|
||||||
|
tb.Helper()
|
||||||
|
r.SAdd(base.AllQueues, qname)
|
||||||
|
seedRedisZSet(tb, r, base.DeadlinesKey(qname), entries, base.TaskStateActive)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SeedAllPendingQueues initializes all of the specified queues with the given messages.
|
||||||
|
//
|
||||||
|
// pending maps a queue name to a list of messages.
|
||||||
|
func SeedAllPendingQueues(tb testing.TB, r redis.UniversalClient, pending map[string][]*base.TaskMessage) {
|
||||||
|
tb.Helper()
|
||||||
|
for q, msgs := range pending {
|
||||||
|
SeedPendingQueue(tb, r, msgs, q)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SeedAllActiveQueues initializes all of the specified active queues with the given messages.
|
||||||
|
func SeedAllActiveQueues(tb testing.TB, r redis.UniversalClient, active map[string][]*base.TaskMessage) {
|
||||||
|
tb.Helper()
|
||||||
|
for q, msgs := range active {
|
||||||
|
SeedActiveQueue(tb, r, msgs, q)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SeedAllScheduledQueues initializes all of the specified scheduled queues with the given entries.
|
||||||
|
func SeedAllScheduledQueues(tb testing.TB, r redis.UniversalClient, scheduled map[string][]base.Z) {
|
||||||
|
tb.Helper()
|
||||||
|
for q, entries := range scheduled {
|
||||||
|
SeedScheduledQueue(tb, r, entries, q)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SeedAllRetryQueues initializes all of the specified retry queues with the given entries.
|
||||||
|
func SeedAllRetryQueues(tb testing.TB, r redis.UniversalClient, retry map[string][]base.Z) {
|
||||||
|
tb.Helper()
|
||||||
|
for q, entries := range retry {
|
||||||
|
SeedRetryQueue(tb, r, entries, q)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SeedAllArchivedQueues initializes all of the specified archived queues with the given entries.
|
||||||
|
func SeedAllArchivedQueues(tb testing.TB, r redis.UniversalClient, archived map[string][]base.Z) {
|
||||||
|
tb.Helper()
|
||||||
|
for q, entries := range archived {
|
||||||
|
SeedArchivedQueue(tb, r, entries, q)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SeedAllDeadlines initializes all of the deadlines with the given entries.
|
||||||
|
func SeedAllDeadlines(tb testing.TB, r redis.UniversalClient, deadlines map[string][]base.Z) {
|
||||||
|
tb.Helper()
|
||||||
|
for q, entries := range deadlines {
|
||||||
|
SeedDeadlines(tb, r, entries, q)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func seedRedisList(tb testing.TB, c redis.UniversalClient, key string,
|
||||||
|
msgs []*base.TaskMessage, state base.TaskState) {
|
||||||
|
tb.Helper()
|
||||||
|
for _, msg := range msgs {
|
||||||
|
encoded := MustMarshal(tb, msg)
|
||||||
|
if err := c.LPush(key, msg.ID.String()).Err(); err != nil {
|
||||||
|
tb.Fatal(err)
|
||||||
|
}
|
||||||
|
key := base.TaskKey(msg.Queue, msg.ID.String())
|
||||||
|
data := map[string]interface{}{
|
||||||
|
"msg": encoded,
|
||||||
|
"state": state.String(),
|
||||||
|
"timeout": msg.Timeout,
|
||||||
|
"deadline": msg.Deadline,
|
||||||
|
"unique_key": msg.UniqueKey,
|
||||||
|
}
|
||||||
|
if err := c.HSet(key, data).Err(); err != nil {
|
||||||
|
tb.Fatal(err)
|
||||||
|
}
|
||||||
|
if len(msg.UniqueKey) > 0 {
|
||||||
|
err := c.SetNX(msg.UniqueKey, msg.ID.String(), 1*time.Minute).Err()
|
||||||
|
if err != nil {
|
||||||
|
tb.Fatalf("Failed to set unique lock in redis: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func seedRedisZSet(tb testing.TB, c redis.UniversalClient, key string,
|
||||||
|
items []base.Z, state base.TaskState) {
|
||||||
|
tb.Helper()
|
||||||
|
for _, item := range items {
|
||||||
|
msg := item.Message
|
||||||
|
encoded := MustMarshal(tb, msg)
|
||||||
|
z := &redis.Z{Member: msg.ID.String(), Score: float64(item.Score)}
|
||||||
|
if err := c.ZAdd(key, z).Err(); err != nil {
|
||||||
|
tb.Fatal(err)
|
||||||
|
}
|
||||||
|
key := base.TaskKey(msg.Queue, msg.ID.String())
|
||||||
|
data := map[string]interface{}{
|
||||||
|
"msg": encoded,
|
||||||
|
"state": state.String(),
|
||||||
|
"timeout": msg.Timeout,
|
||||||
|
"deadline": msg.Deadline,
|
||||||
|
"unique_key": msg.UniqueKey,
|
||||||
|
}
|
||||||
|
if err := c.HSet(key, data).Err(); err != nil {
|
||||||
|
tb.Fatal(err)
|
||||||
|
}
|
||||||
|
if len(msg.UniqueKey) > 0 {
|
||||||
|
err := c.SetNX(msg.UniqueKey, msg.ID.String(), 1*time.Minute).Err()
|
||||||
|
if err != nil {
|
||||||
|
tb.Fatalf("Failed to set unique lock in redis: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetPendingMessages returns all pending messages in the given queue.
|
||||||
|
// It also asserts the state field of the task.
|
||||||
|
func GetPendingMessages(tb testing.TB, r redis.UniversalClient, qname string) []*base.TaskMessage {
|
||||||
|
tb.Helper()
|
||||||
|
return getMessagesFromList(tb, r, qname, base.PendingKey, base.TaskStatePending)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetActiveMessages returns all active messages in the given queue.
|
||||||
|
// It also asserts the state field of the task.
|
||||||
|
func GetActiveMessages(tb testing.TB, r redis.UniversalClient, qname string) []*base.TaskMessage {
|
||||||
|
tb.Helper()
|
||||||
|
return getMessagesFromList(tb, r, qname, base.ActiveKey, base.TaskStateActive)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetScheduledMessages returns all scheduled task messages in the given queue.
|
||||||
|
// It also asserts the state field of the task.
|
||||||
|
func GetScheduledMessages(tb testing.TB, r redis.UniversalClient, qname string) []*base.TaskMessage {
|
||||||
|
tb.Helper()
|
||||||
|
return getMessagesFromZSet(tb, r, qname, base.ScheduledKey, base.TaskStateScheduled)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetRetryMessages returns all retry messages in the given queue.
|
||||||
|
// It also asserts the state field of the task.
|
||||||
|
func GetRetryMessages(tb testing.TB, r redis.UniversalClient, qname string) []*base.TaskMessage {
|
||||||
|
tb.Helper()
|
||||||
|
return getMessagesFromZSet(tb, r, qname, base.RetryKey, base.TaskStateRetry)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetArchivedMessages returns all archived messages in the given queue.
|
||||||
|
// It also asserts the state field of the task.
|
||||||
|
func GetArchivedMessages(tb testing.TB, r redis.UniversalClient, qname string) []*base.TaskMessage {
|
||||||
|
tb.Helper()
|
||||||
|
return getMessagesFromZSet(tb, r, qname, base.ArchivedKey, base.TaskStateArchived)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetScheduledEntries returns all scheduled messages and its score in the given queue.
|
||||||
|
// It also asserts the state field of the task.
|
||||||
|
func GetScheduledEntries(tb testing.TB, r redis.UniversalClient, qname string) []base.Z {
|
||||||
|
tb.Helper()
|
||||||
|
return getMessagesFromZSetWithScores(tb, r, qname, base.ScheduledKey, base.TaskStateScheduled)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetRetryEntries returns all retry messages and its score in the given queue.
|
||||||
|
// It also asserts the state field of the task.
|
||||||
|
func GetRetryEntries(tb testing.TB, r redis.UniversalClient, qname string) []base.Z {
|
||||||
|
tb.Helper()
|
||||||
|
return getMessagesFromZSetWithScores(tb, r, qname, base.RetryKey, base.TaskStateRetry)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetArchivedEntries returns all archived messages and its score in the given queue.
|
||||||
|
// It also asserts the state field of the task.
|
||||||
|
func GetArchivedEntries(tb testing.TB, r redis.UniversalClient, qname string) []base.Z {
|
||||||
|
tb.Helper()
|
||||||
|
return getMessagesFromZSetWithScores(tb, r, qname, base.ArchivedKey, base.TaskStateArchived)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetDeadlinesEntries returns all task messages and its score in the deadlines set for the given queue.
|
||||||
|
// It also asserts the state field of the task.
|
||||||
|
func GetDeadlinesEntries(tb testing.TB, r redis.UniversalClient, qname string) []base.Z {
|
||||||
|
tb.Helper()
|
||||||
|
return getMessagesFromZSetWithScores(tb, r, qname, base.DeadlinesKey, base.TaskStateActive)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Retrieves all messages stored under `keyFn(qname)` key in redis list.
|
||||||
|
func getMessagesFromList(tb testing.TB, r redis.UniversalClient, qname string,
|
||||||
|
keyFn func(qname string) string, state base.TaskState) []*base.TaskMessage {
|
||||||
|
tb.Helper()
|
||||||
|
ids := r.LRange(keyFn(qname), 0, -1).Val()
|
||||||
var msgs []*base.TaskMessage
|
var msgs []*base.TaskMessage
|
||||||
for _, s := range data {
|
for _, id := range ids {
|
||||||
msgs = append(msgs, MustUnmarshal(tb, s))
|
taskKey := base.TaskKey(qname, id)
|
||||||
|
data := r.HGet(taskKey, "msg").Val()
|
||||||
|
msgs = append(msgs, MustUnmarshal(tb, data))
|
||||||
|
if gotState := r.HGet(taskKey, "state").Val(); gotState != state.String() {
|
||||||
|
tb.Errorf("task (id=%q) is in %q state, want %v", id, gotState, state)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return msgs
|
return msgs
|
||||||
}
|
}
|
||||||
|
|
||||||
// FlushDB deletes all the keys of the currently selected DB.
|
// Retrieves all messages stored under `keyFn(qname)` key in redis zset (sorted-set).
|
||||||
func FlushDB(tb testing.TB, r *redis.Client) {
|
func getMessagesFromZSet(tb testing.TB, r redis.UniversalClient, qname string,
|
||||||
|
keyFn func(qname string) string, state base.TaskState) []*base.TaskMessage {
|
||||||
tb.Helper()
|
tb.Helper()
|
||||||
if err := r.FlushDB().Err(); err != nil {
|
ids := r.ZRange(keyFn(qname), 0, -1).Val()
|
||||||
tb.Fatal(err)
|
var msgs []*base.TaskMessage
|
||||||
|
for _, id := range ids {
|
||||||
|
taskKey := base.TaskKey(qname, id)
|
||||||
|
msg := r.HGet(taskKey, "msg").Val()
|
||||||
|
msgs = append(msgs, MustUnmarshal(tb, msg))
|
||||||
|
if gotState := r.HGet(taskKey, "state").Val(); gotState != state.String() {
|
||||||
|
tb.Errorf("task (id=%q) is in %q state, want %v", id, gotState, state)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return msgs
|
||||||
|
}
|
||||||
|
|
||||||
// SeedEnqueuedQueue initializes the specified queue with the given messages.
|
// Retrieves all messages along with their scores stored under `keyFn(qname)` key in redis zset (sorted-set).
|
||||||
//
|
func getMessagesFromZSetWithScores(tb testing.TB, r redis.UniversalClient,
|
||||||
// If queue name option is not passed, it defaults to the default queue.
|
qname string, keyFn func(qname string) string, state base.TaskState) []base.Z {
|
||||||
func SeedEnqueuedQueue(tb testing.TB, r *redis.Client, msgs []*base.TaskMessage, queueOpt ...string) {
|
|
||||||
tb.Helper()
|
tb.Helper()
|
||||||
queue := base.DefaultQueue
|
zs := r.ZRangeWithScores(keyFn(qname), 0, -1).Val()
|
||||||
if len(queueOpt) > 0 {
|
var res []base.Z
|
||||||
queue = base.QueueKey(queueOpt[0])
|
for _, z := range zs {
|
||||||
}
|
taskID := z.Member.(string)
|
||||||
r.SAdd(base.AllQueues, queue)
|
taskKey := base.TaskKey(qname, taskID)
|
||||||
seedRedisList(tb, r, queue, msgs)
|
msg := r.HGet(taskKey, "msg").Val()
|
||||||
}
|
res = append(res, base.Z{Message: MustUnmarshal(tb, msg), Score: int64(z.Score)})
|
||||||
|
if gotState := r.HGet(taskKey, "state").Val(); gotState != state.String() {
|
||||||
// SeedInProgressQueue initializes the in-progress queue with the given messages.
|
tb.Errorf("task (id=%q) is in %q state, want %v", taskID, gotState, state)
|
||||||
func SeedInProgressQueue(tb testing.TB, r *redis.Client, msgs []*base.TaskMessage) {
|
|
||||||
tb.Helper()
|
|
||||||
seedRedisList(tb, r, base.InProgressQueue, msgs)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SeedScheduledQueue initializes the scheduled queue with the given messages.
|
|
||||||
func SeedScheduledQueue(tb testing.TB, r *redis.Client, entries []ZSetEntry) {
|
|
||||||
tb.Helper()
|
|
||||||
seedRedisZSet(tb, r, base.ScheduledQueue, entries)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SeedRetryQueue initializes the retry queue with the given messages.
|
|
||||||
func SeedRetryQueue(tb testing.TB, r *redis.Client, entries []ZSetEntry) {
|
|
||||||
tb.Helper()
|
|
||||||
seedRedisZSet(tb, r, base.RetryQueue, entries)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SeedDeadQueue initializes the dead queue with the given messages.
|
|
||||||
func SeedDeadQueue(tb testing.TB, r *redis.Client, entries []ZSetEntry) {
|
|
||||||
tb.Helper()
|
|
||||||
seedRedisZSet(tb, r, base.DeadQueue, entries)
|
|
||||||
}
|
|
||||||
|
|
||||||
func seedRedisList(tb testing.TB, c *redis.Client, key string, msgs []*base.TaskMessage) {
|
|
||||||
data := MustMarshalSlice(tb, msgs)
|
|
||||||
for _, s := range data {
|
|
||||||
if err := c.LPush(key, s).Err(); err != nil {
|
|
||||||
tb.Fatal(err)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
return res
|
||||||
|
|
||||||
func seedRedisZSet(tb testing.TB, c *redis.Client, key string, items []ZSetEntry) {
|
|
||||||
for _, item := range items {
|
|
||||||
z := &redis.Z{Member: MustMarshal(tb, item.Msg), Score: float64(item.Score)}
|
|
||||||
if err := c.ZAdd(key, z).Err(); err != nil {
|
|
||||||
tb.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetEnqueuedMessages returns all task messages in the specified queue.
|
|
||||||
//
|
|
||||||
// If queue name option is not passed, it defaults to the default queue.
|
|
||||||
func GetEnqueuedMessages(tb testing.TB, r *redis.Client, queueOpt ...string) []*base.TaskMessage {
|
|
||||||
tb.Helper()
|
|
||||||
queue := base.DefaultQueue
|
|
||||||
if len(queueOpt) > 0 {
|
|
||||||
queue = base.QueueKey(queueOpt[0])
|
|
||||||
}
|
|
||||||
return getListMessages(tb, r, queue)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetInProgressMessages returns all task messages in the in-progress queue.
|
|
||||||
func GetInProgressMessages(tb testing.TB, r *redis.Client) []*base.TaskMessage {
|
|
||||||
tb.Helper()
|
|
||||||
return getListMessages(tb, r, base.InProgressQueue)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetScheduledMessages returns all task messages in the scheduled queue.
|
|
||||||
func GetScheduledMessages(tb testing.TB, r *redis.Client) []*base.TaskMessage {
|
|
||||||
tb.Helper()
|
|
||||||
return getZSetMessages(tb, r, base.ScheduledQueue)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetRetryMessages returns all task messages in the retry queue.
|
|
||||||
func GetRetryMessages(tb testing.TB, r *redis.Client) []*base.TaskMessage {
|
|
||||||
tb.Helper()
|
|
||||||
return getZSetMessages(tb, r, base.RetryQueue)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetDeadMessages returns all task messages in the dead queue.
|
|
||||||
func GetDeadMessages(tb testing.TB, r *redis.Client) []*base.TaskMessage {
|
|
||||||
tb.Helper()
|
|
||||||
return getZSetMessages(tb, r, base.DeadQueue)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetScheduledEntries returns all task messages and its score in the scheduled queue.
|
|
||||||
func GetScheduledEntries(tb testing.TB, r *redis.Client) []ZSetEntry {
|
|
||||||
tb.Helper()
|
|
||||||
return getZSetEntries(tb, r, base.ScheduledQueue)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetRetryEntries returns all task messages and its score in the retry queue.
|
|
||||||
func GetRetryEntries(tb testing.TB, r *redis.Client) []ZSetEntry {
|
|
||||||
tb.Helper()
|
|
||||||
return getZSetEntries(tb, r, base.RetryQueue)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetDeadEntries returns all task messages and its score in the dead queue.
|
|
||||||
func GetDeadEntries(tb testing.TB, r *redis.Client) []ZSetEntry {
|
|
||||||
tb.Helper()
|
|
||||||
return getZSetEntries(tb, r, base.DeadQueue)
|
|
||||||
}
|
|
||||||
|
|
||||||
func getListMessages(tb testing.TB, r *redis.Client, list string) []*base.TaskMessage {
|
|
||||||
data := r.LRange(list, 0, -1).Val()
|
|
||||||
return MustUnmarshalSlice(tb, data)
|
|
||||||
}
|
|
||||||
|
|
||||||
func getZSetMessages(tb testing.TB, r *redis.Client, zset string) []*base.TaskMessage {
|
|
||||||
data := r.ZRange(zset, 0, -1).Val()
|
|
||||||
return MustUnmarshalSlice(tb, data)
|
|
||||||
}
|
|
||||||
|
|
||||||
func getZSetEntries(tb testing.TB, r *redis.Client, zset string) []ZSetEntry {
|
|
||||||
data := r.ZRangeWithScores(zset, 0, -1).Val()
|
|
||||||
var entries []ZSetEntry
|
|
||||||
for _, z := range data {
|
|
||||||
entries = append(entries, ZSetEntry{
|
|
||||||
Msg: MustUnmarshal(tb, z.Member.(string)),
|
|
||||||
Score: z.Score,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return entries
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,59 +7,178 @@ package base
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"crypto/md5"
|
||||||
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-redis/redis/v7"
|
"github.com/go-redis/redis/v7"
|
||||||
"github.com/rs/xid"
|
"github.com/golang/protobuf/ptypes"
|
||||||
|
"github.com/google/uuid"
|
||||||
|
"github.com/hibiken/asynq/internal/errors"
|
||||||
|
pb "github.com/hibiken/asynq/internal/proto"
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Version of asynq library and CLI.
|
||||||
|
const Version = "0.18.5"
|
||||||
|
|
||||||
// DefaultQueueName is the queue name used if none are specified by user.
|
// DefaultQueueName is the queue name used if none are specified by user.
|
||||||
const DefaultQueueName = "default"
|
const DefaultQueueName = "default"
|
||||||
|
|
||||||
// Redis keys
|
// DefaultQueue is the redis key for the default queue.
|
||||||
|
var DefaultQueue = PendingKey(DefaultQueueName)
|
||||||
|
|
||||||
|
// Global Redis keys.
|
||||||
const (
|
const (
|
||||||
AllServers = "asynq:servers" // ZSET
|
AllServers = "asynq:servers" // ZSET
|
||||||
serversPrefix = "asynq:servers:" // STRING - asynq:ps:<host>:<pid>:<serverid>
|
|
||||||
AllWorkers = "asynq:workers" // ZSET
|
AllWorkers = "asynq:workers" // ZSET
|
||||||
workersPrefix = "asynq:workers:" // HASH - asynq:workers:<host:<pid>:<serverid>
|
AllSchedulers = "asynq:schedulers" // ZSET
|
||||||
processedPrefix = "asynq:processed:" // STRING - asynq:processed:<yyyy-mm-dd>
|
|
||||||
failurePrefix = "asynq:failure:" // STRING - asynq:failure:<yyyy-mm-dd>
|
|
||||||
QueuePrefix = "asynq:queues:" // LIST - asynq:queues:<qname>
|
|
||||||
AllQueues = "asynq:queues" // SET
|
AllQueues = "asynq:queues" // SET
|
||||||
DefaultQueue = QueuePrefix + DefaultQueueName // LIST
|
|
||||||
ScheduledQueue = "asynq:scheduled" // ZSET
|
|
||||||
RetryQueue = "asynq:retry" // ZSET
|
|
||||||
DeadQueue = "asynq:dead" // ZSET
|
|
||||||
InProgressQueue = "asynq:in_progress" // LIST
|
|
||||||
CancelChannel = "asynq:cancel" // PubSub channel
|
CancelChannel = "asynq:cancel" // PubSub channel
|
||||||
)
|
)
|
||||||
|
|
||||||
// QueueKey returns a redis key for the given queue name.
|
// TaskState denotes the state of a task.
|
||||||
func QueueKey(qname string) string {
|
type TaskState int
|
||||||
return QueuePrefix + strings.ToLower(qname)
|
|
||||||
|
const (
|
||||||
|
TaskStateActive TaskState = iota + 1
|
||||||
|
TaskStatePending
|
||||||
|
TaskStateScheduled
|
||||||
|
TaskStateRetry
|
||||||
|
TaskStateArchived
|
||||||
|
)
|
||||||
|
|
||||||
|
func (s TaskState) String() string {
|
||||||
|
switch s {
|
||||||
|
case TaskStateActive:
|
||||||
|
return "active"
|
||||||
|
case TaskStatePending:
|
||||||
|
return "pending"
|
||||||
|
case TaskStateScheduled:
|
||||||
|
return "scheduled"
|
||||||
|
case TaskStateRetry:
|
||||||
|
return "retry"
|
||||||
|
case TaskStateArchived:
|
||||||
|
return "archived"
|
||||||
|
}
|
||||||
|
panic(fmt.Sprintf("internal error: unknown task state %d", s))
|
||||||
}
|
}
|
||||||
|
|
||||||
// ProcessedKey returns a redis key for processed count for the given day.
|
func TaskStateFromString(s string) (TaskState, error) {
|
||||||
func ProcessedKey(t time.Time) string {
|
switch s {
|
||||||
return processedPrefix + t.UTC().Format("2006-01-02")
|
case "active":
|
||||||
|
return TaskStateActive, nil
|
||||||
|
case "pending":
|
||||||
|
return TaskStatePending, nil
|
||||||
|
case "scheduled":
|
||||||
|
return TaskStateScheduled, nil
|
||||||
|
case "retry":
|
||||||
|
return TaskStateRetry, nil
|
||||||
|
case "archived":
|
||||||
|
return TaskStateArchived, nil
|
||||||
|
}
|
||||||
|
return 0, errors.E(errors.FailedPrecondition, fmt.Sprintf("%q is not supported task state", s))
|
||||||
}
|
}
|
||||||
|
|
||||||
// FailureKey returns a redis key for failure count for the given day.
|
// ValidateQueueName validates a given qname to be used as a queue name.
|
||||||
func FailureKey(t time.Time) string {
|
// Returns nil if valid, otherwise returns non-nil error.
|
||||||
return failurePrefix + t.UTC().Format("2006-01-02")
|
func ValidateQueueName(qname string) error {
|
||||||
|
if len(strings.TrimSpace(qname)) == 0 {
|
||||||
|
return fmt.Errorf("queue name must contain one or more characters")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueueKeyPrefix returns a prefix for all keys in the given queue.
|
||||||
|
func QueueKeyPrefix(qname string) string {
|
||||||
|
return fmt.Sprintf("asynq:{%s}:", qname)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TaskKeyPrefix returns a prefix for task key.
|
||||||
|
func TaskKeyPrefix(qname string) string {
|
||||||
|
return fmt.Sprintf("%st:", QueueKeyPrefix(qname))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TaskKey returns a redis key for the given task message.
|
||||||
|
func TaskKey(qname, id string) string {
|
||||||
|
return fmt.Sprintf("%s%s", TaskKeyPrefix(qname), id)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PendingKey returns a redis key for the given queue name.
|
||||||
|
func PendingKey(qname string) string {
|
||||||
|
return fmt.Sprintf("%spending", QueueKeyPrefix(qname))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ActiveKey returns a redis key for the active tasks.
|
||||||
|
func ActiveKey(qname string) string {
|
||||||
|
return fmt.Sprintf("%sactive", QueueKeyPrefix(qname))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ScheduledKey returns a redis key for the scheduled tasks.
|
||||||
|
func ScheduledKey(qname string) string {
|
||||||
|
return fmt.Sprintf("%sscheduled", QueueKeyPrefix(qname))
|
||||||
|
}
|
||||||
|
|
||||||
|
// RetryKey returns a redis key for the retry tasks.
|
||||||
|
func RetryKey(qname string) string {
|
||||||
|
return fmt.Sprintf("%sretry", QueueKeyPrefix(qname))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ArchivedKey returns a redis key for the archived tasks.
|
||||||
|
func ArchivedKey(qname string) string {
|
||||||
|
return fmt.Sprintf("%sarchived", QueueKeyPrefix(qname))
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeadlinesKey returns a redis key for the deadlines.
|
||||||
|
func DeadlinesKey(qname string) string {
|
||||||
|
return fmt.Sprintf("%sdeadlines", QueueKeyPrefix(qname))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PausedKey returns a redis key to indicate that the given queue is paused.
|
||||||
|
func PausedKey(qname string) string {
|
||||||
|
return fmt.Sprintf("%spaused", QueueKeyPrefix(qname))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProcessedKey returns a redis key for processed count for the given day for the queue.
|
||||||
|
func ProcessedKey(qname string, t time.Time) string {
|
||||||
|
return fmt.Sprintf("%sprocessed:%s", QueueKeyPrefix(qname), t.UTC().Format("2006-01-02"))
|
||||||
|
}
|
||||||
|
|
||||||
|
// FailedKey returns a redis key for failure count for the given day for the queue.
|
||||||
|
func FailedKey(qname string, t time.Time) string {
|
||||||
|
return fmt.Sprintf("%sfailed:%s", QueueKeyPrefix(qname), t.UTC().Format("2006-01-02"))
|
||||||
}
|
}
|
||||||
|
|
||||||
// ServerInfoKey returns a redis key for process info.
|
// ServerInfoKey returns a redis key for process info.
|
||||||
func ServerInfoKey(hostname string, pid int, sid string) string {
|
func ServerInfoKey(hostname string, pid int, serverID string) string {
|
||||||
return fmt.Sprintf("%s%s:%d:%s", serversPrefix, hostname, pid, sid)
|
return fmt.Sprintf("asynq:servers:{%s:%d:%s}", hostname, pid, serverID)
|
||||||
}
|
}
|
||||||
|
|
||||||
// WorkersKey returns a redis key for the workers given hostname, pid, and server ID.
|
// WorkersKey returns a redis key for the workers given hostname, pid, and server ID.
|
||||||
func WorkersKey(hostname string, pid int, sid string) string {
|
func WorkersKey(hostname string, pid int, serverID string) string {
|
||||||
return fmt.Sprintf("%s%s:%d:%s", workersPrefix, hostname, pid, sid)
|
return fmt.Sprintf("asynq:workers:{%s:%d:%s}", hostname, pid, serverID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SchedulerEntriesKey returns a redis key for the scheduler entries given scheduler ID.
|
||||||
|
func SchedulerEntriesKey(schedulerID string) string {
|
||||||
|
return fmt.Sprintf("asynq:schedulers:{%s}", schedulerID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SchedulerHistoryKey returns a redis key for the scheduler's history for the given entry.
|
||||||
|
func SchedulerHistoryKey(entryID string) string {
|
||||||
|
return fmt.Sprintf("asynq:scheduler_history:%s", entryID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UniqueKey returns a redis key with the given type, payload, and queue name.
|
||||||
|
func UniqueKey(qname, tasktype string, payload []byte) string {
|
||||||
|
if payload == nil {
|
||||||
|
return fmt.Sprintf("%sunique:%s:", QueueKeyPrefix(qname), tasktype)
|
||||||
|
}
|
||||||
|
checksum := md5.Sum(payload)
|
||||||
|
return fmt.Sprintf("%sunique:%s:%s", QueueKeyPrefix(qname), tasktype, hex.EncodeToString(checksum[:]))
|
||||||
}
|
}
|
||||||
|
|
||||||
// TaskMessage is the internal representation of a task with additional metadata fields.
|
// TaskMessage is the internal representation of a task with additional metadata fields.
|
||||||
@@ -69,10 +188,10 @@ type TaskMessage struct {
|
|||||||
Type string
|
Type string
|
||||||
|
|
||||||
// Payload holds data needed to process the task.
|
// Payload holds data needed to process the task.
|
||||||
Payload map[string]interface{}
|
Payload []byte
|
||||||
|
|
||||||
// ID is a unique identifier for each task.
|
// ID is a unique identifier for each task.
|
||||||
ID xid.ID
|
ID uuid.UUID
|
||||||
|
|
||||||
// Queue is a name this message should be enqueued to.
|
// Queue is a name this message should be enqueued to.
|
||||||
Queue string
|
Queue string
|
||||||
@@ -86,18 +205,26 @@ type TaskMessage struct {
|
|||||||
// ErrorMsg holds the error message from the last failure.
|
// ErrorMsg holds the error message from the last failure.
|
||||||
ErrorMsg string
|
ErrorMsg string
|
||||||
|
|
||||||
// Timeout specifies how long a task may run.
|
// Time of last failure in Unix time,
|
||||||
// The string value should be compatible with time.Duration.ParseDuration.
|
// the number of seconds elapsed since January 1, 1970 UTC.
|
||||||
//
|
//
|
||||||
// Zero means no limit.
|
// Use zero to indicate no last failure
|
||||||
Timeout string
|
LastFailedAt int64
|
||||||
|
|
||||||
// Deadline specifies the deadline for the task.
|
// Timeout specifies timeout in seconds.
|
||||||
// Task won't be processed if it exceeded its deadline.
|
// If task processing doesn't complete within the timeout, the task will be retried
|
||||||
// The string shoulbe be in RFC3339 format.
|
// if retry count is remaining. Otherwise it will be moved to the archive.
|
||||||
//
|
//
|
||||||
// time.Time's zero value means no deadline.
|
// Use zero to indicate no timeout.
|
||||||
Deadline string
|
Timeout int64
|
||||||
|
|
||||||
|
// Deadline specifies the deadline for the task in Unix time,
|
||||||
|
// the number of seconds elapsed since January 1, 1970 UTC.
|
||||||
|
// If task processing doesn't complete before the deadline, the task will be retried
|
||||||
|
// if retry count is remaining. Otherwise it will be moved to the archive.
|
||||||
|
//
|
||||||
|
// Use zero to indicate no deadline.
|
||||||
|
Deadline int64
|
||||||
|
|
||||||
// UniqueKey holds the redis key used for uniqueness lock for this task.
|
// UniqueKey holds the redis key used for uniqueness lock for this task.
|
||||||
//
|
//
|
||||||
@@ -105,157 +232,120 @@ type TaskMessage struct {
|
|||||||
UniqueKey string
|
UniqueKey string
|
||||||
}
|
}
|
||||||
|
|
||||||
// ServerState holds process level information.
|
// EncodeMessage marshals the given task message and returns an encoded bytes.
|
||||||
//
|
func EncodeMessage(msg *TaskMessage) ([]byte, error) {
|
||||||
// ServerStates are safe for concurrent use by multiple goroutines.
|
if msg == nil {
|
||||||
type ServerState struct {
|
return nil, fmt.Errorf("cannot encode nil message")
|
||||||
mu sync.Mutex // guards all data fields
|
}
|
||||||
id xid.ID
|
return proto.Marshal(&pb.TaskMessage{
|
||||||
concurrency int
|
Type: msg.Type,
|
||||||
queues map[string]int
|
Payload: msg.Payload,
|
||||||
strictPriority bool
|
Id: msg.ID.String(),
|
||||||
pid int
|
Queue: msg.Queue,
|
||||||
host string
|
Retry: int32(msg.Retry),
|
||||||
status ServerStatus
|
Retried: int32(msg.Retried),
|
||||||
started time.Time
|
ErrorMsg: msg.ErrorMsg,
|
||||||
workers map[string]*workerStats
|
LastFailedAt: msg.LastFailedAt,
|
||||||
|
Timeout: msg.Timeout,
|
||||||
|
Deadline: msg.Deadline,
|
||||||
|
UniqueKey: msg.UniqueKey,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// ServerStatus represents status of a server.
|
// DecodeMessage unmarshals the given bytes and returns a decoded task message.
|
||||||
type ServerStatus int
|
func DecodeMessage(data []byte) (*TaskMessage, error) {
|
||||||
|
var pbmsg pb.TaskMessage
|
||||||
|
if err := proto.Unmarshal(data, &pbmsg); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &TaskMessage{
|
||||||
|
Type: pbmsg.GetType(),
|
||||||
|
Payload: pbmsg.GetPayload(),
|
||||||
|
ID: uuid.MustParse(pbmsg.GetId()),
|
||||||
|
Queue: pbmsg.GetQueue(),
|
||||||
|
Retry: int(pbmsg.GetRetry()),
|
||||||
|
Retried: int(pbmsg.GetRetried()),
|
||||||
|
ErrorMsg: pbmsg.GetErrorMsg(),
|
||||||
|
LastFailedAt: pbmsg.GetLastFailedAt(),
|
||||||
|
Timeout: pbmsg.GetTimeout(),
|
||||||
|
Deadline: pbmsg.GetDeadline(),
|
||||||
|
UniqueKey: pbmsg.GetUniqueKey(),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// TaskInfo describes a task message and its metadata.
|
||||||
|
type TaskInfo struct {
|
||||||
|
Message *TaskMessage
|
||||||
|
State TaskState
|
||||||
|
NextProcessAt time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// Z represents sorted set member.
|
||||||
|
type Z struct {
|
||||||
|
Message *TaskMessage
|
||||||
|
Score int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServerState represents state of a server.
|
||||||
|
// ServerState methods are concurrency safe.
|
||||||
|
type ServerState struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
val ServerStateValue
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewServerState returns a new state instance.
|
||||||
|
// Initial state is set to StateNew.
|
||||||
|
func NewServerState() *ServerState {
|
||||||
|
return &ServerState{val: StateNew}
|
||||||
|
}
|
||||||
|
|
||||||
|
type ServerStateValue int
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// StatusIdle indicates the server is in idle state.
|
// StateNew represents a new server. Server begins in
|
||||||
StatusIdle ServerStatus = iota
|
// this state and then transition to StatusActive when
|
||||||
|
// Start or Run is callled.
|
||||||
|
StateNew ServerStateValue = iota
|
||||||
|
|
||||||
// StatusRunning indicates the servier is up and processing tasks.
|
// StateActive indicates the server is up and active.
|
||||||
StatusRunning
|
StateActive
|
||||||
|
|
||||||
// StatusQuiet indicates the server is up but not processing new tasks.
|
// StateStopped indicates the server is up but no longer processing new tasks.
|
||||||
StatusQuiet
|
StateStopped
|
||||||
|
|
||||||
// StatusStopped indicates the server server has been stopped.
|
// StateClosed indicates the server has been shutdown.
|
||||||
StatusStopped
|
StateClosed
|
||||||
)
|
)
|
||||||
|
|
||||||
var statuses = []string{
|
var serverStates = []string{
|
||||||
"idle",
|
"new",
|
||||||
"running",
|
"active",
|
||||||
"quiet",
|
|
||||||
"stopped",
|
"stopped",
|
||||||
|
"closed",
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s ServerStatus) String() string {
|
func (s *ServerState) String() string {
|
||||||
if StatusIdle <= s && s <= StatusStopped {
|
s.mu.Lock()
|
||||||
return statuses[s]
|
defer s.mu.Unlock()
|
||||||
|
if StateNew <= s.val && s.val <= StateClosed {
|
||||||
|
return serverStates[s.val]
|
||||||
}
|
}
|
||||||
return "unknown status"
|
return "unknown status"
|
||||||
}
|
}
|
||||||
|
|
||||||
type workerStats struct {
|
// Get returns the status value.
|
||||||
msg *TaskMessage
|
func (s *ServerState) Get() ServerStateValue {
|
||||||
started time.Time
|
s.mu.Lock()
|
||||||
|
v := s.val
|
||||||
|
s.mu.Unlock()
|
||||||
|
return v
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewServerState returns a new instance of ServerState.
|
// Set sets the status value.
|
||||||
func NewServerState(host string, pid, concurrency int, queues map[string]int, strict bool) *ServerState {
|
func (s *ServerState) Set(v ServerStateValue) {
|
||||||
return &ServerState{
|
s.mu.Lock()
|
||||||
host: host,
|
s.val = v
|
||||||
pid: pid,
|
s.mu.Unlock()
|
||||||
id: xid.New(),
|
|
||||||
concurrency: concurrency,
|
|
||||||
queues: cloneQueueConfig(queues),
|
|
||||||
strictPriority: strict,
|
|
||||||
status: StatusIdle,
|
|
||||||
workers: make(map[string]*workerStats),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetStatus updates the status of server.
|
|
||||||
func (ss *ServerState) SetStatus(status ServerStatus) {
|
|
||||||
ss.mu.Lock()
|
|
||||||
defer ss.mu.Unlock()
|
|
||||||
ss.status = status
|
|
||||||
}
|
|
||||||
|
|
||||||
// Status returns the status of server.
|
|
||||||
func (ss *ServerState) Status() ServerStatus {
|
|
||||||
ss.mu.Lock()
|
|
||||||
defer ss.mu.Unlock()
|
|
||||||
return ss.status
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetStarted records when the process started processing.
|
|
||||||
func (ss *ServerState) SetStarted(t time.Time) {
|
|
||||||
ss.mu.Lock()
|
|
||||||
defer ss.mu.Unlock()
|
|
||||||
ss.started = t
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddWorkerStats records when a worker started and which task it's processing.
|
|
||||||
func (ss *ServerState) AddWorkerStats(msg *TaskMessage, started time.Time) {
|
|
||||||
ss.mu.Lock()
|
|
||||||
defer ss.mu.Unlock()
|
|
||||||
ss.workers[msg.ID.String()] = &workerStats{msg, started}
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteWorkerStats removes a worker's entry from the process state.
|
|
||||||
func (ss *ServerState) DeleteWorkerStats(msg *TaskMessage) {
|
|
||||||
ss.mu.Lock()
|
|
||||||
defer ss.mu.Unlock()
|
|
||||||
delete(ss.workers, msg.ID.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetInfo returns current state of server as a ServerInfo.
|
|
||||||
func (ss *ServerState) GetInfo() *ServerInfo {
|
|
||||||
ss.mu.Lock()
|
|
||||||
defer ss.mu.Unlock()
|
|
||||||
return &ServerInfo{
|
|
||||||
Host: ss.host,
|
|
||||||
PID: ss.pid,
|
|
||||||
ServerID: ss.id.String(),
|
|
||||||
Concurrency: ss.concurrency,
|
|
||||||
Queues: cloneQueueConfig(ss.queues),
|
|
||||||
StrictPriority: ss.strictPriority,
|
|
||||||
Status: ss.status.String(),
|
|
||||||
Started: ss.started,
|
|
||||||
ActiveWorkerCount: len(ss.workers),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetWorkers returns a list of currently running workers' info.
|
|
||||||
func (ss *ServerState) GetWorkers() []*WorkerInfo {
|
|
||||||
ss.mu.Lock()
|
|
||||||
defer ss.mu.Unlock()
|
|
||||||
var res []*WorkerInfo
|
|
||||||
for _, w := range ss.workers {
|
|
||||||
res = append(res, &WorkerInfo{
|
|
||||||
Host: ss.host,
|
|
||||||
PID: ss.pid,
|
|
||||||
ID: w.msg.ID,
|
|
||||||
Type: w.msg.Type,
|
|
||||||
Queue: w.msg.Queue,
|
|
||||||
Payload: clonePayload(w.msg.Payload),
|
|
||||||
Started: w.started,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
func cloneQueueConfig(qcfg map[string]int) map[string]int {
|
|
||||||
res := make(map[string]int)
|
|
||||||
for qname, n := range qcfg {
|
|
||||||
res[qname] = n
|
|
||||||
}
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
func clonePayload(payload map[string]interface{}) map[string]interface{} {
|
|
||||||
res := make(map[string]interface{})
|
|
||||||
for k, v := range payload {
|
|
||||||
res[k] = v
|
|
||||||
}
|
|
||||||
return res
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ServerInfo holds information about a running server.
|
// ServerInfo holds information about a running server.
|
||||||
@@ -271,18 +361,242 @@ type ServerInfo struct {
|
|||||||
ActiveWorkerCount int
|
ActiveWorkerCount int
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// EncodeServerInfo marshals the given ServerInfo and returns the encoded bytes.
|
||||||
|
func EncodeServerInfo(info *ServerInfo) ([]byte, error) {
|
||||||
|
if info == nil {
|
||||||
|
return nil, fmt.Errorf("cannot encode nil server info")
|
||||||
|
}
|
||||||
|
queues := make(map[string]int32)
|
||||||
|
for q, p := range info.Queues {
|
||||||
|
queues[q] = int32(p)
|
||||||
|
}
|
||||||
|
started, err := ptypes.TimestampProto(info.Started)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return proto.Marshal(&pb.ServerInfo{
|
||||||
|
Host: info.Host,
|
||||||
|
Pid: int32(info.PID),
|
||||||
|
ServerId: info.ServerID,
|
||||||
|
Concurrency: int32(info.Concurrency),
|
||||||
|
Queues: queues,
|
||||||
|
StrictPriority: info.StrictPriority,
|
||||||
|
Status: info.Status,
|
||||||
|
StartTime: started,
|
||||||
|
ActiveWorkerCount: int32(info.ActiveWorkerCount),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeServerInfo decodes the given bytes into ServerInfo.
|
||||||
|
func DecodeServerInfo(b []byte) (*ServerInfo, error) {
|
||||||
|
var pbmsg pb.ServerInfo
|
||||||
|
if err := proto.Unmarshal(b, &pbmsg); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
queues := make(map[string]int)
|
||||||
|
for q, p := range pbmsg.GetQueues() {
|
||||||
|
queues[q] = int(p)
|
||||||
|
}
|
||||||
|
startTime, err := ptypes.Timestamp(pbmsg.GetStartTime())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &ServerInfo{
|
||||||
|
Host: pbmsg.GetHost(),
|
||||||
|
PID: int(pbmsg.GetPid()),
|
||||||
|
ServerID: pbmsg.GetServerId(),
|
||||||
|
Concurrency: int(pbmsg.GetConcurrency()),
|
||||||
|
Queues: queues,
|
||||||
|
StrictPriority: pbmsg.GetStrictPriority(),
|
||||||
|
Status: pbmsg.GetStatus(),
|
||||||
|
Started: startTime,
|
||||||
|
ActiveWorkerCount: int(pbmsg.GetActiveWorkerCount()),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
// WorkerInfo holds information about a running worker.
|
// WorkerInfo holds information about a running worker.
|
||||||
type WorkerInfo struct {
|
type WorkerInfo struct {
|
||||||
Host string
|
Host string
|
||||||
PID int
|
PID int
|
||||||
ID xid.ID
|
ServerID string
|
||||||
|
ID string
|
||||||
Type string
|
Type string
|
||||||
|
Payload []byte
|
||||||
Queue string
|
Queue string
|
||||||
Payload map[string]interface{}
|
|
||||||
Started time.Time
|
Started time.Time
|
||||||
|
Deadline time.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cancelations is a collection that holds cancel functions for all in-progress tasks.
|
// EncodeWorkerInfo marshals the given WorkerInfo and returns the encoded bytes.
|
||||||
|
func EncodeWorkerInfo(info *WorkerInfo) ([]byte, error) {
|
||||||
|
if info == nil {
|
||||||
|
return nil, fmt.Errorf("cannot encode nil worker info")
|
||||||
|
}
|
||||||
|
startTime, err := ptypes.TimestampProto(info.Started)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
deadline, err := ptypes.TimestampProto(info.Deadline)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return proto.Marshal(&pb.WorkerInfo{
|
||||||
|
Host: info.Host,
|
||||||
|
Pid: int32(info.PID),
|
||||||
|
ServerId: info.ServerID,
|
||||||
|
TaskId: info.ID,
|
||||||
|
TaskType: info.Type,
|
||||||
|
TaskPayload: info.Payload,
|
||||||
|
Queue: info.Queue,
|
||||||
|
StartTime: startTime,
|
||||||
|
Deadline: deadline,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeWorkerInfo decodes the given bytes into WorkerInfo.
|
||||||
|
func DecodeWorkerInfo(b []byte) (*WorkerInfo, error) {
|
||||||
|
var pbmsg pb.WorkerInfo
|
||||||
|
if err := proto.Unmarshal(b, &pbmsg); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
startTime, err := ptypes.Timestamp(pbmsg.GetStartTime())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
deadline, err := ptypes.Timestamp(pbmsg.GetDeadline())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &WorkerInfo{
|
||||||
|
Host: pbmsg.GetHost(),
|
||||||
|
PID: int(pbmsg.GetPid()),
|
||||||
|
ServerID: pbmsg.GetServerId(),
|
||||||
|
ID: pbmsg.GetTaskId(),
|
||||||
|
Type: pbmsg.GetTaskType(),
|
||||||
|
Payload: pbmsg.GetTaskPayload(),
|
||||||
|
Queue: pbmsg.GetQueue(),
|
||||||
|
Started: startTime,
|
||||||
|
Deadline: deadline,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SchedulerEntry holds information about a periodic task registered with a scheduler.
|
||||||
|
type SchedulerEntry struct {
|
||||||
|
// Identifier of this entry.
|
||||||
|
ID string
|
||||||
|
|
||||||
|
// Spec describes the schedule of this entry.
|
||||||
|
Spec string
|
||||||
|
|
||||||
|
// Type is the task type of the periodic task.
|
||||||
|
Type string
|
||||||
|
|
||||||
|
// Payload is the payload of the periodic task.
|
||||||
|
Payload []byte
|
||||||
|
|
||||||
|
// Opts is the options for the periodic task.
|
||||||
|
Opts []string
|
||||||
|
|
||||||
|
// Next shows the next time the task will be enqueued.
|
||||||
|
Next time.Time
|
||||||
|
|
||||||
|
// Prev shows the last time the task was enqueued.
|
||||||
|
// Zero time if task was never enqueued.
|
||||||
|
Prev time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// EncodeSchedulerEntry marshals the given entry and returns an encoded bytes.
|
||||||
|
func EncodeSchedulerEntry(entry *SchedulerEntry) ([]byte, error) {
|
||||||
|
if entry == nil {
|
||||||
|
return nil, fmt.Errorf("cannot encode nil scheduler entry")
|
||||||
|
}
|
||||||
|
next, err := ptypes.TimestampProto(entry.Next)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
prev, err := ptypes.TimestampProto(entry.Prev)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return proto.Marshal(&pb.SchedulerEntry{
|
||||||
|
Id: entry.ID,
|
||||||
|
Spec: entry.Spec,
|
||||||
|
TaskType: entry.Type,
|
||||||
|
TaskPayload: entry.Payload,
|
||||||
|
EnqueueOptions: entry.Opts,
|
||||||
|
NextEnqueueTime: next,
|
||||||
|
PrevEnqueueTime: prev,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeSchedulerEntry unmarshals the given bytes and returns a decoded SchedulerEntry.
|
||||||
|
func DecodeSchedulerEntry(b []byte) (*SchedulerEntry, error) {
|
||||||
|
var pbmsg pb.SchedulerEntry
|
||||||
|
if err := proto.Unmarshal(b, &pbmsg); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
next, err := ptypes.Timestamp(pbmsg.GetNextEnqueueTime())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
prev, err := ptypes.Timestamp(pbmsg.GetPrevEnqueueTime())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &SchedulerEntry{
|
||||||
|
ID: pbmsg.GetId(),
|
||||||
|
Spec: pbmsg.GetSpec(),
|
||||||
|
Type: pbmsg.GetTaskType(),
|
||||||
|
Payload: pbmsg.GetTaskPayload(),
|
||||||
|
Opts: pbmsg.GetEnqueueOptions(),
|
||||||
|
Next: next,
|
||||||
|
Prev: prev,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SchedulerEnqueueEvent holds information about an enqueue event by a scheduler.
|
||||||
|
type SchedulerEnqueueEvent struct {
|
||||||
|
// ID of the task that was enqueued.
|
||||||
|
TaskID string
|
||||||
|
|
||||||
|
// Time the task was enqueued.
|
||||||
|
EnqueuedAt time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// EncodeSchedulerEnqueueEvent marshals the given event
|
||||||
|
// and returns an encoded bytes.
|
||||||
|
func EncodeSchedulerEnqueueEvent(event *SchedulerEnqueueEvent) ([]byte, error) {
|
||||||
|
if event == nil {
|
||||||
|
return nil, fmt.Errorf("cannot encode nil enqueue event")
|
||||||
|
}
|
||||||
|
enqueuedAt, err := ptypes.TimestampProto(event.EnqueuedAt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return proto.Marshal(&pb.SchedulerEnqueueEvent{
|
||||||
|
TaskId: event.TaskID,
|
||||||
|
EnqueueTime: enqueuedAt,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeSchedulerEnqueueEvent unmarshals the given bytes
|
||||||
|
// and returns a decoded SchedulerEnqueueEvent.
|
||||||
|
func DecodeSchedulerEnqueueEvent(b []byte) (*SchedulerEnqueueEvent, error) {
|
||||||
|
var pbmsg pb.SchedulerEnqueueEvent
|
||||||
|
if err := proto.Unmarshal(b, &pbmsg); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
enqueuedAt, err := ptypes.Timestamp(pbmsg.GetEnqueueTime())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &SchedulerEnqueueEvent{
|
||||||
|
TaskID: pbmsg.GetTaskId(),
|
||||||
|
EnqueuedAt: enqueuedAt,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cancelations is a collection that holds cancel functions for all active tasks.
|
||||||
//
|
//
|
||||||
// Cancelations are safe for concurrent use by multipel goroutines.
|
// Cancelations are safe for concurrent use by multipel goroutines.
|
||||||
type Cancelations struct {
|
type Cancelations struct {
|
||||||
@@ -319,34 +633,24 @@ func (c *Cancelations) Get(id string) (fn context.CancelFunc, ok bool) {
|
|||||||
return fn, ok
|
return fn, ok
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetAll returns all cancel funcs.
|
|
||||||
func (c *Cancelations) GetAll() []context.CancelFunc {
|
|
||||||
c.mu.Lock()
|
|
||||||
defer c.mu.Unlock()
|
|
||||||
var res []context.CancelFunc
|
|
||||||
for _, fn := range c.cancelFuncs {
|
|
||||||
res = append(res, fn)
|
|
||||||
}
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
// Broker is a message broker that supports operations to manage task queues.
|
// Broker is a message broker that supports operations to manage task queues.
|
||||||
//
|
//
|
||||||
// See rdb.RDB as a reference implementation.
|
// See rdb.RDB as a reference implementation.
|
||||||
type Broker interface {
|
type Broker interface {
|
||||||
|
Ping() error
|
||||||
Enqueue(msg *TaskMessage) error
|
Enqueue(msg *TaskMessage) error
|
||||||
EnqueueUnique(msg *TaskMessage, ttl time.Duration) error
|
EnqueueUnique(msg *TaskMessage, ttl time.Duration) error
|
||||||
Dequeue(qnames ...string) (*TaskMessage, error)
|
Dequeue(qnames ...string) (*TaskMessage, time.Time, error)
|
||||||
Done(msg *TaskMessage) error
|
Done(msg *TaskMessage) error
|
||||||
Requeue(msg *TaskMessage) error
|
Requeue(msg *TaskMessage) error
|
||||||
Schedule(msg *TaskMessage, processAt time.Time) error
|
Schedule(msg *TaskMessage, processAt time.Time) error
|
||||||
ScheduleUnique(msg *TaskMessage, processAt time.Time, ttl time.Duration) error
|
ScheduleUnique(msg *TaskMessage, processAt time.Time, ttl time.Duration) error
|
||||||
Retry(msg *TaskMessage, processAt time.Time, errMsg string) error
|
Retry(msg *TaskMessage, processAt time.Time, errMsg string, isFailure bool) error
|
||||||
Kill(msg *TaskMessage, errMsg string) error
|
Archive(msg *TaskMessage, errMsg string) error
|
||||||
RequeueAll() (int64, error)
|
ForwardIfReady(qnames ...string) error
|
||||||
CheckAndEnqueue(qnames ...string) error
|
ListDeadlineExceeded(deadline time.Time, qnames ...string) ([]*TaskMessage, error)
|
||||||
WriteServerState(ss *ServerState, ttl time.Duration) error
|
WriteServerState(info *ServerInfo, workers []*WorkerInfo, ttl time.Duration) error
|
||||||
ClearServerState(ss *ServerState) error
|
ClearServerState(host string, pid int, serverID string) error
|
||||||
CancelationPubSub() (*redis.PubSub, error) // TODO: Need to decouple from redis to support other brokers
|
CancelationPubSub() (*redis.PubSub, error) // TODO: Need to decouple from redis to support other brokers
|
||||||
PublishCancelation(id string) error
|
PublishCancelation(id string) error
|
||||||
Close() error
|
Close() error
|
||||||
|
|||||||
@@ -6,62 +6,188 @@ package base
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"math/rand"
|
"crypto/md5"
|
||||||
|
"encoding/hex"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/google/go-cmp/cmp"
|
"github.com/google/go-cmp/cmp"
|
||||||
"github.com/google/go-cmp/cmp/cmpopts"
|
"github.com/google/uuid"
|
||||||
"github.com/rs/xid"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func TestTaskKey(t *testing.T) {
|
||||||
|
id := uuid.NewString()
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
qname string
|
||||||
|
id string
|
||||||
|
want string
|
||||||
|
}{
|
||||||
|
{"default", id, fmt.Sprintf("asynq:{default}:t:%s", id)},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
got := TaskKey(tc.qname, tc.id)
|
||||||
|
if got != tc.want {
|
||||||
|
t.Errorf("TaskKey(%q, %s) = %q, want %q", tc.qname, tc.id, got, tc.want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestQueueKey(t *testing.T) {
|
func TestQueueKey(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
qname string
|
qname string
|
||||||
want string
|
want string
|
||||||
}{
|
}{
|
||||||
{"custom", "asynq:queues:custom"},
|
{"default", "asynq:{default}:pending"},
|
||||||
|
{"custom", "asynq:{custom}:pending"},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
got := QueueKey(tc.qname)
|
got := PendingKey(tc.qname)
|
||||||
if got != tc.want {
|
if got != tc.want {
|
||||||
t.Errorf("QueueKey(%q) = %q, want %q", tc.qname, got, tc.want)
|
t.Errorf("QueueKey(%q) = %q, want %q", tc.qname, got, tc.want)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestProcessedKey(t *testing.T) {
|
func TestActiveKey(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
input time.Time
|
qname string
|
||||||
want string
|
want string
|
||||||
}{
|
}{
|
||||||
{time.Date(2019, 11, 14, 10, 30, 1, 1, time.UTC), "asynq:processed:2019-11-14"},
|
{"default", "asynq:{default}:active"},
|
||||||
{time.Date(2020, 12, 1, 1, 0, 1, 1, time.UTC), "asynq:processed:2020-12-01"},
|
{"custom", "asynq:{custom}:active"},
|
||||||
{time.Date(2020, 1, 6, 15, 02, 1, 1, time.UTC), "asynq:processed:2020-01-06"},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
got := ProcessedKey(tc.input)
|
got := ActiveKey(tc.qname)
|
||||||
|
if got != tc.want {
|
||||||
|
t.Errorf("ActiveKey(%q) = %q, want %q", tc.qname, got, tc.want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDeadlinesKey(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
qname string
|
||||||
|
want string
|
||||||
|
}{
|
||||||
|
{"default", "asynq:{default}:deadlines"},
|
||||||
|
{"custom", "asynq:{custom}:deadlines"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
got := DeadlinesKey(tc.qname)
|
||||||
|
if got != tc.want {
|
||||||
|
t.Errorf("DeadlinesKey(%q) = %q, want %q", tc.qname, got, tc.want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestScheduledKey(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
qname string
|
||||||
|
want string
|
||||||
|
}{
|
||||||
|
{"default", "asynq:{default}:scheduled"},
|
||||||
|
{"custom", "asynq:{custom}:scheduled"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
got := ScheduledKey(tc.qname)
|
||||||
|
if got != tc.want {
|
||||||
|
t.Errorf("ScheduledKey(%q) = %q, want %q", tc.qname, got, tc.want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRetryKey(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
qname string
|
||||||
|
want string
|
||||||
|
}{
|
||||||
|
{"default", "asynq:{default}:retry"},
|
||||||
|
{"custom", "asynq:{custom}:retry"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
got := RetryKey(tc.qname)
|
||||||
|
if got != tc.want {
|
||||||
|
t.Errorf("RetryKey(%q) = %q, want %q", tc.qname, got, tc.want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestArchivedKey(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
qname string
|
||||||
|
want string
|
||||||
|
}{
|
||||||
|
{"default", "asynq:{default}:archived"},
|
||||||
|
{"custom", "asynq:{custom}:archived"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
got := ArchivedKey(tc.qname)
|
||||||
|
if got != tc.want {
|
||||||
|
t.Errorf("ArchivedKey(%q) = %q, want %q", tc.qname, got, tc.want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPausedKey(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
qname string
|
||||||
|
want string
|
||||||
|
}{
|
||||||
|
{"default", "asynq:{default}:paused"},
|
||||||
|
{"custom", "asynq:{custom}:paused"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
got := PausedKey(tc.qname)
|
||||||
|
if got != tc.want {
|
||||||
|
t.Errorf("PausedKey(%q) = %q, want %q", tc.qname, got, tc.want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProcessedKey(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
qname string
|
||||||
|
input time.Time
|
||||||
|
want string
|
||||||
|
}{
|
||||||
|
{"default", time.Date(2019, 11, 14, 10, 30, 1, 1, time.UTC), "asynq:{default}:processed:2019-11-14"},
|
||||||
|
{"critical", time.Date(2020, 12, 1, 1, 0, 1, 1, time.UTC), "asynq:{critical}:processed:2020-12-01"},
|
||||||
|
{"default", time.Date(2020, 1, 6, 15, 02, 1, 1, time.UTC), "asynq:{default}:processed:2020-01-06"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
got := ProcessedKey(tc.qname, tc.input)
|
||||||
if got != tc.want {
|
if got != tc.want {
|
||||||
t.Errorf("ProcessedKey(%v) = %q, want %q", tc.input, got, tc.want)
|
t.Errorf("ProcessedKey(%v) = %q, want %q", tc.input, got, tc.want)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFailureKey(t *testing.T) {
|
func TestFailedKey(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
|
qname string
|
||||||
input time.Time
|
input time.Time
|
||||||
want string
|
want string
|
||||||
}{
|
}{
|
||||||
{time.Date(2019, 11, 14, 10, 30, 1, 1, time.UTC), "asynq:failure:2019-11-14"},
|
{"default", time.Date(2019, 11, 14, 10, 30, 1, 1, time.UTC), "asynq:{default}:failed:2019-11-14"},
|
||||||
{time.Date(2020, 12, 1, 1, 0, 1, 1, time.UTC), "asynq:failure:2020-12-01"},
|
{"custom", time.Date(2020, 12, 1, 1, 0, 1, 1, time.UTC), "asynq:{custom}:failed:2020-12-01"},
|
||||||
{time.Date(2020, 1, 6, 15, 02, 1, 1, time.UTC), "asynq:failure:2020-01-06"},
|
{"low", time.Date(2020, 1, 6, 15, 02, 1, 1, time.UTC), "asynq:{low}:failed:2020-01-06"},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
got := FailureKey(tc.input)
|
got := FailedKey(tc.qname, tc.input)
|
||||||
if got != tc.want {
|
if got != tc.want {
|
||||||
t.Errorf("FailureKey(%v) = %q, want %q", tc.input, got, tc.want)
|
t.Errorf("FailureKey(%v) = %q, want %q", tc.input, got, tc.want)
|
||||||
}
|
}
|
||||||
@@ -75,8 +201,8 @@ func TestServerInfoKey(t *testing.T) {
|
|||||||
sid string
|
sid string
|
||||||
want string
|
want string
|
||||||
}{
|
}{
|
||||||
{"localhost", 9876, "server123", "asynq:servers:localhost:9876:server123"},
|
{"localhost", 9876, "server123", "asynq:servers:{localhost:9876:server123}"},
|
||||||
{"127.0.0.1", 1234, "server987", "asynq:servers:127.0.0.1:1234:server987"},
|
{"127.0.0.1", 1234, "server987", "asynq:servers:{127.0.0.1:1234:server987}"},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
@@ -95,8 +221,8 @@ func TestWorkersKey(t *testing.T) {
|
|||||||
sid string
|
sid string
|
||||||
want string
|
want string
|
||||||
}{
|
}{
|
||||||
{"localhost", 9876, "server1", "asynq:workers:localhost:9876:server1"},
|
{"localhost", 9876, "server1", "asynq:workers:{localhost:9876:server1}"},
|
||||||
{"127.0.0.1", 1234, "server2", "asynq:workers:127.0.0.1:1234:server2"},
|
{"127.0.0.1", 1234, "server2", "asynq:workers:{127.0.0.1:1234:server2}"},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
@@ -108,69 +234,324 @@ func TestWorkersKey(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test for server state being accessed by multiple goroutines.
|
func TestSchedulerEntriesKey(t *testing.T) {
|
||||||
// Run with -race flag to check for data race.
|
tests := []struct {
|
||||||
func TestServerStateConcurrentAccess(t *testing.T) {
|
schedulerID string
|
||||||
ss := NewServerState("127.0.0.1", 1234, 10, map[string]int{"default": 1}, false)
|
want string
|
||||||
var wg sync.WaitGroup
|
}{
|
||||||
started := time.Now()
|
{"localhost:9876:scheduler123", "asynq:schedulers:{localhost:9876:scheduler123}"},
|
||||||
msgs := []*TaskMessage{
|
{"127.0.0.1:1234:scheduler987", "asynq:schedulers:{127.0.0.1:1234:scheduler987}"},
|
||||||
{ID: xid.New(), Type: "type1", Payload: map[string]interface{}{"user_id": 42}},
|
|
||||||
{ID: xid.New(), Type: "type2"},
|
|
||||||
{ID: xid.New(), Type: "type3"},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Simulate hearbeater calling SetStatus and SetStarted.
|
for _, tc := range tests {
|
||||||
|
got := SchedulerEntriesKey(tc.schedulerID)
|
||||||
|
if got != tc.want {
|
||||||
|
t.Errorf("SchedulerEntriesKey(%q) = %q, want %q", tc.schedulerID, got, tc.want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSchedulerHistoryKey(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
entryID string
|
||||||
|
want string
|
||||||
|
}{
|
||||||
|
{"entry876", "asynq:scheduler_history:entry876"},
|
||||||
|
{"entry345", "asynq:scheduler_history:entry345"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
got := SchedulerHistoryKey(tc.entryID)
|
||||||
|
if got != tc.want {
|
||||||
|
t.Errorf("SchedulerHistoryKey(%q) = %q, want %q",
|
||||||
|
tc.entryID, got, tc.want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func toBytes(m map[string]interface{}) []byte {
|
||||||
|
b, err := json.Marshal(m)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUniqueKey(t *testing.T) {
|
||||||
|
payload1 := toBytes(map[string]interface{}{"a": 123, "b": "hello", "c": true})
|
||||||
|
payload2 := toBytes(map[string]interface{}{"b": "hello", "c": true, "a": 123})
|
||||||
|
payload3 := toBytes(map[string]interface{}{
|
||||||
|
"address": map[string]string{"line": "123 Main St", "city": "Boston", "state": "MA"},
|
||||||
|
"names": []string{"bob", "mike", "rob"}})
|
||||||
|
payload4 := toBytes(map[string]interface{}{
|
||||||
|
"time": time.Date(2020, time.July, 28, 0, 0, 0, 0, time.UTC),
|
||||||
|
"duration": time.Hour})
|
||||||
|
|
||||||
|
checksum := func(data []byte) string {
|
||||||
|
sum := md5.Sum(data)
|
||||||
|
return hex.EncodeToString(sum[:])
|
||||||
|
}
|
||||||
|
tests := []struct {
|
||||||
|
desc string
|
||||||
|
qname string
|
||||||
|
tasktype string
|
||||||
|
payload []byte
|
||||||
|
want string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
"with primitive types",
|
||||||
|
"default",
|
||||||
|
"email:send",
|
||||||
|
payload1,
|
||||||
|
fmt.Sprintf("asynq:{default}:unique:email:send:%s", checksum(payload1)),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"with unsorted keys",
|
||||||
|
"default",
|
||||||
|
"email:send",
|
||||||
|
payload2,
|
||||||
|
fmt.Sprintf("asynq:{default}:unique:email:send:%s", checksum(payload2)),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"with composite types",
|
||||||
|
"default",
|
||||||
|
"email:send",
|
||||||
|
payload3,
|
||||||
|
fmt.Sprintf("asynq:{default}:unique:email:send:%s", checksum(payload3)),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"with complex types",
|
||||||
|
"default",
|
||||||
|
"email:send",
|
||||||
|
payload4,
|
||||||
|
fmt.Sprintf("asynq:{default}:unique:email:send:%s", checksum(payload4)),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"with nil payload",
|
||||||
|
"default",
|
||||||
|
"reindex",
|
||||||
|
nil,
|
||||||
|
"asynq:{default}:unique:reindex:",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
got := UniqueKey(tc.qname, tc.tasktype, tc.payload)
|
||||||
|
if got != tc.want {
|
||||||
|
t.Errorf("%s: UniqueKey(%q, %q, %v) = %q, want %q", tc.desc, tc.qname, tc.tasktype, tc.payload, got, tc.want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMessageEncoding(t *testing.T) {
|
||||||
|
id := uuid.New()
|
||||||
|
tests := []struct {
|
||||||
|
in *TaskMessage
|
||||||
|
out *TaskMessage
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
in: &TaskMessage{
|
||||||
|
Type: "task1",
|
||||||
|
Payload: toBytes(map[string]interface{}{"a": 1, "b": "hello!", "c": true}),
|
||||||
|
ID: id,
|
||||||
|
Queue: "default",
|
||||||
|
Retry: 10,
|
||||||
|
Retried: 0,
|
||||||
|
Timeout: 1800,
|
||||||
|
Deadline: 1692311100,
|
||||||
|
},
|
||||||
|
out: &TaskMessage{
|
||||||
|
Type: "task1",
|
||||||
|
Payload: toBytes(map[string]interface{}{"a": json.Number("1"), "b": "hello!", "c": true}),
|
||||||
|
ID: id,
|
||||||
|
Queue: "default",
|
||||||
|
Retry: 10,
|
||||||
|
Retried: 0,
|
||||||
|
Timeout: 1800,
|
||||||
|
Deadline: 1692311100,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
encoded, err := EncodeMessage(tc.in)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("EncodeMessage(msg) returned error: %v", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
decoded, err := DecodeMessage(encoded)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("DecodeMessage(encoded) returned error: %v", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if diff := cmp.Diff(tc.out, decoded); diff != "" {
|
||||||
|
t.Errorf("Decoded message == %+v, want %+v;(-want,+got)\n%s",
|
||||||
|
decoded, tc.out, diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestServerInfoEncoding(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
info ServerInfo
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
info: ServerInfo{
|
||||||
|
Host: "127.0.0.1",
|
||||||
|
PID: 9876,
|
||||||
|
ServerID: "abc123",
|
||||||
|
Concurrency: 10,
|
||||||
|
Queues: map[string]int{"default": 1, "critical": 2},
|
||||||
|
StrictPriority: false,
|
||||||
|
Status: "active",
|
||||||
|
Started: time.Now().Add(-3 * time.Hour),
|
||||||
|
ActiveWorkerCount: 8,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
encoded, err := EncodeServerInfo(&tc.info)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("EncodeServerInfo(info) returned error: %v", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
decoded, err := DecodeServerInfo(encoded)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("DecodeServerInfo(encoded) returned error: %v", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if diff := cmp.Diff(&tc.info, decoded); diff != "" {
|
||||||
|
t.Errorf("Decoded ServerInfo == %+v, want %+v;(-want,+got)\n%s",
|
||||||
|
decoded, tc.info, diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWorkerInfoEncoding(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
info WorkerInfo
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
info: WorkerInfo{
|
||||||
|
Host: "127.0.0.1",
|
||||||
|
PID: 9876,
|
||||||
|
ServerID: "abc123",
|
||||||
|
ID: uuid.NewString(),
|
||||||
|
Type: "taskA",
|
||||||
|
Payload: toBytes(map[string]interface{}{"foo": "bar"}),
|
||||||
|
Queue: "default",
|
||||||
|
Started: time.Now().Add(-3 * time.Hour),
|
||||||
|
Deadline: time.Now().Add(30 * time.Second),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
encoded, err := EncodeWorkerInfo(&tc.info)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("EncodeWorkerInfo(info) returned error: %v", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
decoded, err := DecodeWorkerInfo(encoded)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("DecodeWorkerInfo(encoded) returned error: %v", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if diff := cmp.Diff(&tc.info, decoded); diff != "" {
|
||||||
|
t.Errorf("Decoded WorkerInfo == %+v, want %+v;(-want,+got)\n%s",
|
||||||
|
decoded, tc.info, diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSchedulerEntryEncoding(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
entry SchedulerEntry
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
entry: SchedulerEntry{
|
||||||
|
ID: uuid.NewString(),
|
||||||
|
Spec: "* * * * *",
|
||||||
|
Type: "task_A",
|
||||||
|
Payload: toBytes(map[string]interface{}{"foo": "bar"}),
|
||||||
|
Opts: []string{"Queue('email')"},
|
||||||
|
Next: time.Now().Add(30 * time.Second).UTC(),
|
||||||
|
Prev: time.Now().Add(-2 * time.Minute).UTC(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
encoded, err := EncodeSchedulerEntry(&tc.entry)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("EncodeSchedulerEntry(entry) returned error: %v", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
decoded, err := DecodeSchedulerEntry(encoded)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("DecodeSchedulerEntry(encoded) returned error: %v", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if diff := cmp.Diff(&tc.entry, decoded); diff != "" {
|
||||||
|
t.Errorf("Decoded SchedulerEntry == %+v, want %+v;(-want,+got)\n%s",
|
||||||
|
decoded, tc.entry, diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSchedulerEnqueueEventEncoding(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
event SchedulerEnqueueEvent
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
event: SchedulerEnqueueEvent{
|
||||||
|
TaskID: uuid.NewString(),
|
||||||
|
EnqueuedAt: time.Now().Add(-30 * time.Second).UTC(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
encoded, err := EncodeSchedulerEnqueueEvent(&tc.event)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("EncodeSchedulerEnqueueEvent(event) returned error: %v", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
decoded, err := DecodeSchedulerEnqueueEvent(encoded)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("DecodeSchedulerEnqueueEvent(encoded) returned error: %v", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if diff := cmp.Diff(&tc.event, decoded); diff != "" {
|
||||||
|
t.Errorf("Decoded SchedulerEnqueueEvent == %+v, want %+v;(-want,+got)\n%s",
|
||||||
|
decoded, tc.event, diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test for status being accessed by multiple goroutines.
|
||||||
|
// Run with -race flag to check for data race.
|
||||||
|
func TestStatusConcurrentAccess(t *testing.T) {
|
||||||
|
status := NewServerState()
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
ss.SetStarted(started)
|
status.Get()
|
||||||
ss.SetStatus(StatusRunning)
|
_ = status.String()
|
||||||
if status := ss.Status(); status != StatusRunning {
|
|
||||||
t.Errorf("(*ServerState).Status() = %v, want %v", status, StatusRunning)
|
|
||||||
}
|
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// Simulate processor starting worker goroutines.
|
|
||||||
for _, msg := range msgs {
|
|
||||||
wg.Add(1)
|
|
||||||
ss.AddWorkerStats(msg, time.Now())
|
|
||||||
go func(msg *TaskMessage) {
|
|
||||||
defer wg.Done()
|
|
||||||
time.Sleep(time.Duration(rand.Intn(500)) * time.Millisecond)
|
|
||||||
ss.DeleteWorkerStats(msg)
|
|
||||||
}(msg)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Simulate hearbeater calling Get and GetWorkers
|
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
wg.Done()
|
defer wg.Done()
|
||||||
for i := 0; i < 5; i++ {
|
status.Set(StateClosed)
|
||||||
ss.GetInfo()
|
_ = status.String()
|
||||||
ss.GetWorkers()
|
|
||||||
time.Sleep(time.Duration(rand.Intn(100)) * time.Millisecond)
|
|
||||||
}
|
|
||||||
}()
|
}()
|
||||||
|
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
|
||||||
want := &ServerInfo{
|
|
||||||
Host: "127.0.0.1",
|
|
||||||
PID: 1234,
|
|
||||||
Concurrency: 10,
|
|
||||||
Queues: map[string]int{"default": 1},
|
|
||||||
StrictPriority: false,
|
|
||||||
Status: "running",
|
|
||||||
Started: started,
|
|
||||||
ActiveWorkerCount: 0,
|
|
||||||
}
|
|
||||||
|
|
||||||
got := ss.GetInfo()
|
|
||||||
if diff := cmp.Diff(want, got, cmpopts.IgnoreFields(ServerInfo{}, "ServerID")); diff != "" {
|
|
||||||
t.Errorf("(*ServerState).GetInfo() = %+v, want %+v; (-want,+got)\n%s",
|
|
||||||
got, want, diff)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test for cancelations being accessed by multiple goroutines.
|
// Test for cancelations being accessed by multiple goroutines.
|
||||||
@@ -216,9 +597,4 @@ func TestCancelationsConcurrentAccess(t *testing.T) {
|
|||||||
if ok {
|
if ok {
|
||||||
t.Errorf("(*Cancelations).Get(%q) = _, true, want <nil>, false", key2)
|
t.Errorf("(*Cancelations).Get(%q) = _, true, want <nil>, false", key2)
|
||||||
}
|
}
|
||||||
|
|
||||||
funcs := c.GetAll()
|
|
||||||
if len(funcs) != 2 {
|
|
||||||
t.Errorf("(*Cancelations).GetAll() returns %d functions, want 2", len(funcs))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
285
internal/errors/errors.go
Normal file
285
internal/errors/errors.go
Normal file
@@ -0,0 +1,285 @@
|
|||||||
|
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license
|
||||||
|
// that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package errors defines the error type and functions used by
|
||||||
|
// asynq and its internal packages.
|
||||||
|
package errors
|
||||||
|
|
||||||
|
// Note: This package is inspired by a blog post about error handling in project Upspin
|
||||||
|
// https://commandcenter.blogspot.com/2017/12/error-handling-in-upspin.html.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Error is the type that implements the error interface.
|
||||||
|
// It contains a number of fields, each of different type.
|
||||||
|
// An Error value may leave some values unset.
|
||||||
|
type Error struct {
|
||||||
|
Code Code
|
||||||
|
Op Op
|
||||||
|
Err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *Error) DebugString() string {
|
||||||
|
var b strings.Builder
|
||||||
|
if e.Op != "" {
|
||||||
|
b.WriteString(string(e.Op))
|
||||||
|
}
|
||||||
|
if e.Code != Unspecified {
|
||||||
|
if b.Len() > 0 {
|
||||||
|
b.WriteString(": ")
|
||||||
|
}
|
||||||
|
b.WriteString(e.Code.String())
|
||||||
|
}
|
||||||
|
if e.Err != nil {
|
||||||
|
if b.Len() > 0 {
|
||||||
|
b.WriteString(": ")
|
||||||
|
}
|
||||||
|
b.WriteString(e.Err.Error())
|
||||||
|
}
|
||||||
|
return b.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *Error) Error() string {
|
||||||
|
var b strings.Builder
|
||||||
|
if e.Code != Unspecified {
|
||||||
|
b.WriteString(e.Code.String())
|
||||||
|
}
|
||||||
|
if e.Err != nil {
|
||||||
|
if b.Len() > 0 {
|
||||||
|
b.WriteString(": ")
|
||||||
|
}
|
||||||
|
b.WriteString(e.Err.Error())
|
||||||
|
}
|
||||||
|
return b.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *Error) Unwrap() error {
|
||||||
|
return e.Err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Code defines the canonical error code.
|
||||||
|
type Code uint8
|
||||||
|
|
||||||
|
// List of canonical error codes.
|
||||||
|
const (
|
||||||
|
Unspecified Code = iota
|
||||||
|
NotFound
|
||||||
|
FailedPrecondition
|
||||||
|
Internal
|
||||||
|
AlreadyExists
|
||||||
|
Unknown
|
||||||
|
// Note: If you add a new value here, make sure to update String method.
|
||||||
|
)
|
||||||
|
|
||||||
|
func (c Code) String() string {
|
||||||
|
switch c {
|
||||||
|
case Unspecified:
|
||||||
|
return "ERROR_CODE_UNSPECIFIED"
|
||||||
|
case NotFound:
|
||||||
|
return "NOT_FOUND"
|
||||||
|
case FailedPrecondition:
|
||||||
|
return "FAILED_PRECONDITION"
|
||||||
|
case Internal:
|
||||||
|
return "INTERNAL_ERROR"
|
||||||
|
case AlreadyExists:
|
||||||
|
return "ALREADY_EXISTS"
|
||||||
|
case Unknown:
|
||||||
|
return "UNKNOWN"
|
||||||
|
}
|
||||||
|
panic(fmt.Sprintf("unknown error code %d", c))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Op describes an operation, usually as the package and method,
|
||||||
|
// such as "rdb.Enqueue".
|
||||||
|
type Op string
|
||||||
|
|
||||||
|
// E builds an error value from its arguments.
|
||||||
|
// There must be at least one argument or E panics.
|
||||||
|
// The type of each argument determines its meaning.
|
||||||
|
// If more than one argument of a given type is presented,
|
||||||
|
// only the last one is recorded.
|
||||||
|
//
|
||||||
|
// The types are:
|
||||||
|
// errors.Op
|
||||||
|
// The operation being performed, usually the method
|
||||||
|
// being invoked (Get, Put, etc.).
|
||||||
|
// errors.Code
|
||||||
|
// The canonical error code, such as NOT_FOUND.
|
||||||
|
// string
|
||||||
|
// Treated as an error message and assigned to the
|
||||||
|
// Err field after a call to errors.New.
|
||||||
|
// error
|
||||||
|
// The underlying error that triggered this one.
|
||||||
|
//
|
||||||
|
// If the error is printed, only those items that have been
|
||||||
|
// set to non-zero values will appear in the result.
|
||||||
|
func E(args ...interface{}) error {
|
||||||
|
if len(args) == 0 {
|
||||||
|
panic("call to errors.E with no arguments")
|
||||||
|
}
|
||||||
|
e := &Error{}
|
||||||
|
for _, arg := range args {
|
||||||
|
switch arg := arg.(type) {
|
||||||
|
case Op:
|
||||||
|
e.Op = arg
|
||||||
|
case Code:
|
||||||
|
e.Code = arg
|
||||||
|
case error:
|
||||||
|
e.Err = arg
|
||||||
|
case string:
|
||||||
|
e.Err = errors.New(arg)
|
||||||
|
default:
|
||||||
|
_, file, line, _ := runtime.Caller(1)
|
||||||
|
log.Printf("errors.E: bad call from %s:%d: %v", file, line, args)
|
||||||
|
return fmt.Errorf("unknown type %T, value %v in error call", arg, arg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
// CanonicalCode returns the canonical code of the given error if one is present.
|
||||||
|
// Otherwise it returns Unspecified.
|
||||||
|
func CanonicalCode(err error) Code {
|
||||||
|
if err == nil {
|
||||||
|
return Unspecified
|
||||||
|
}
|
||||||
|
e, ok := err.(*Error)
|
||||||
|
if !ok {
|
||||||
|
return Unspecified
|
||||||
|
}
|
||||||
|
if e.Code == Unspecified {
|
||||||
|
return CanonicalCode(e.Err)
|
||||||
|
}
|
||||||
|
return e.Code
|
||||||
|
}
|
||||||
|
|
||||||
|
/******************************************
|
||||||
|
Domin Specific Error Types & Values
|
||||||
|
*******************************************/
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrNoProcessableTask indicates that there are no tasks ready to be processed.
|
||||||
|
ErrNoProcessableTask = errors.New("no tasks are ready for processing")
|
||||||
|
|
||||||
|
// ErrDuplicateTask indicates that another task with the same unique key holds the uniqueness lock.
|
||||||
|
ErrDuplicateTask = errors.New("task already exists")
|
||||||
|
)
|
||||||
|
|
||||||
|
// TaskNotFoundError indicates that a task with the given ID does not exist
|
||||||
|
// in the given queue.
|
||||||
|
type TaskNotFoundError struct {
|
||||||
|
Queue string // queue name
|
||||||
|
ID string // task id
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *TaskNotFoundError) Error() string {
|
||||||
|
return fmt.Sprintf("cannot find task with id=%s in queue %q", e.ID, e.Queue)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsTaskNotFound reports whether any error in err's chain is of type TaskNotFoundError.
|
||||||
|
func IsTaskNotFound(err error) bool {
|
||||||
|
var target *TaskNotFoundError
|
||||||
|
return As(err, &target)
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueueNotFoundError indicates that a queue with the given name does not exist.
|
||||||
|
type QueueNotFoundError struct {
|
||||||
|
Queue string // queue name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *QueueNotFoundError) Error() string {
|
||||||
|
return fmt.Sprintf("queue %q does not exist", e.Queue)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsQueueNotFound reports whether any error in err's chain is of type QueueNotFoundError.
|
||||||
|
func IsQueueNotFound(err error) bool {
|
||||||
|
var target *QueueNotFoundError
|
||||||
|
return As(err, &target)
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueueNotEmptyError indicates that the given queue is not empty.
|
||||||
|
type QueueNotEmptyError struct {
|
||||||
|
Queue string // queue name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *QueueNotEmptyError) Error() string {
|
||||||
|
return fmt.Sprintf("queue %q is not empty", e.Queue)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsQueueNotEmpty reports whether any error in err's chain is of type QueueNotEmptyError.
|
||||||
|
func IsQueueNotEmpty(err error) bool {
|
||||||
|
var target *QueueNotEmptyError
|
||||||
|
return As(err, &target)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TaskAlreadyArchivedError indicates that the task in question is already archived.
|
||||||
|
type TaskAlreadyArchivedError struct {
|
||||||
|
Queue string // queue name
|
||||||
|
ID string // task id
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *TaskAlreadyArchivedError) Error() string {
|
||||||
|
return fmt.Sprintf("task is already archived: id=%s, queue=%s", e.ID, e.Queue)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsTaskAlreadyArchived reports whether any error in err's chain is of type TaskAlreadyArchivedError.
|
||||||
|
func IsTaskAlreadyArchived(err error) bool {
|
||||||
|
var target *TaskAlreadyArchivedError
|
||||||
|
return As(err, &target)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RedisCommandError indicates that the given redis command returned error.
|
||||||
|
type RedisCommandError struct {
|
||||||
|
Command string // redis command (e.g. LRANGE, ZADD, etc)
|
||||||
|
Err error // underlying error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *RedisCommandError) Error() string {
|
||||||
|
return fmt.Sprintf("redis command error: %s failed: %v", strings.ToUpper(e.Command), e.Err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *RedisCommandError) Unwrap() error { return e.Err }
|
||||||
|
|
||||||
|
// IsRedisCommandError reports whether any error in err's chain is of type RedisCommandError.
|
||||||
|
func IsRedisCommandError(err error) bool {
|
||||||
|
var target *RedisCommandError
|
||||||
|
return As(err, &target)
|
||||||
|
}
|
||||||
|
|
||||||
|
/*************************************************
|
||||||
|
Standard Library errors package functions
|
||||||
|
*************************************************/
|
||||||
|
|
||||||
|
// New returns an error that formats as the given text.
|
||||||
|
// Each call to New returns a distinct error value even if the text is identical.
|
||||||
|
//
|
||||||
|
// This function is the errors.New function from the standard libarary (https://golang.org/pkg/errors/#New).
|
||||||
|
// It is exported from this package for import convinience.
|
||||||
|
func New(text string) error { return errors.New(text) }
|
||||||
|
|
||||||
|
// Is reports whether any error in err's chain matches target.
|
||||||
|
//
|
||||||
|
// This function is the errors.Is function from the standard libarary (https://golang.org/pkg/errors/#Is).
|
||||||
|
// It is exported from this package for import convinience.
|
||||||
|
func Is(err, target error) bool { return errors.Is(err, target) }
|
||||||
|
|
||||||
|
// As finds the first error in err's chain that matches target, and if so, sets target to that error value and returns true.
|
||||||
|
// Otherwise, it returns false.
|
||||||
|
//
|
||||||
|
// This function is the errors.As function from the standard libarary (https://golang.org/pkg/errors/#As).
|
||||||
|
// It is exported from this package for import convinience.
|
||||||
|
func As(err error, target interface{}) bool { return errors.As(err, target) }
|
||||||
|
|
||||||
|
// Unwrap returns the result of calling the Unwrap method on err, if err's type contains an Unwrap method returning error.
|
||||||
|
// Otherwise, Unwrap returns nil.
|
||||||
|
//
|
||||||
|
// This function is the errors.Unwrap function from the standard libarary (https://golang.org/pkg/errors/#Unwrap).
|
||||||
|
// It is exported from this package for import convinience.
|
||||||
|
func Unwrap(err error) error { return errors.Unwrap(err) }
|
||||||
176
internal/errors/errors_test.go
Normal file
176
internal/errors/errors_test.go
Normal file
@@ -0,0 +1,176 @@
|
|||||||
|
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license
|
||||||
|
// that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package errors
|
||||||
|
|
||||||
|
import "testing"
|
||||||
|
|
||||||
|
func TestErrorDebugString(t *testing.T) {
|
||||||
|
// DebugString should include Op since its meant to be used by
|
||||||
|
// maintainers/contributors of the asynq package.
|
||||||
|
tests := []struct {
|
||||||
|
desc string
|
||||||
|
err error
|
||||||
|
want string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
desc: "With Op, Code, and string",
|
||||||
|
err: E(Op("rdb.DeleteTask"), NotFound, "cannot find task with id=123"),
|
||||||
|
want: "rdb.DeleteTask: NOT_FOUND: cannot find task with id=123",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "With Op, Code and error",
|
||||||
|
err: E(Op("rdb.DeleteTask"), NotFound, &TaskNotFoundError{Queue: "default", ID: "123"}),
|
||||||
|
want: `rdb.DeleteTask: NOT_FOUND: cannot find task with id=123 in queue "default"`,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
if got := tc.err.(*Error).DebugString(); got != tc.want {
|
||||||
|
t.Errorf("%s: got=%q, want=%q", tc.desc, got, tc.want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestErrorString(t *testing.T) {
|
||||||
|
// String method should omit Op since op is an internal detail
|
||||||
|
// and we don't want to provide it to users of the package.
|
||||||
|
tests := []struct {
|
||||||
|
desc string
|
||||||
|
err error
|
||||||
|
want string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
desc: "With Op, Code, and string",
|
||||||
|
err: E(Op("rdb.DeleteTask"), NotFound, "cannot find task with id=123"),
|
||||||
|
want: "NOT_FOUND: cannot find task with id=123",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "With Op, Code and error",
|
||||||
|
err: E(Op("rdb.DeleteTask"), NotFound, &TaskNotFoundError{Queue: "default", ID: "123"}),
|
||||||
|
want: `NOT_FOUND: cannot find task with id=123 in queue "default"`,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
if got := tc.err.Error(); got != tc.want {
|
||||||
|
t.Errorf("%s: got=%q, want=%q", tc.desc, got, tc.want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestErrorIs(t *testing.T) {
|
||||||
|
var ErrCustom = New("custom sentinel error")
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
desc string
|
||||||
|
err error
|
||||||
|
target error
|
||||||
|
want bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
desc: "should unwrap one level",
|
||||||
|
err: E(Op("rdb.DeleteTask"), ErrCustom),
|
||||||
|
target: ErrCustom,
|
||||||
|
want: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
if got := Is(tc.err, tc.target); got != tc.want {
|
||||||
|
t.Errorf("%s: got=%t, want=%t", tc.desc, got, tc.want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestErrorAs(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
desc string
|
||||||
|
err error
|
||||||
|
target interface{}
|
||||||
|
want bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
desc: "should unwrap one level",
|
||||||
|
err: E(Op("rdb.DeleteTask"), NotFound, &QueueNotFoundError{Queue: "email"}),
|
||||||
|
target: &QueueNotFoundError{},
|
||||||
|
want: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
if got := As(tc.err, &tc.target); got != tc.want {
|
||||||
|
t.Errorf("%s: got=%t, want=%t", tc.desc, got, tc.want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestErrorPredicates(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
desc string
|
||||||
|
fn func(err error) bool
|
||||||
|
err error
|
||||||
|
want bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
desc: "IsTaskNotFound should detect presence of TaskNotFoundError in err's chain",
|
||||||
|
fn: IsTaskNotFound,
|
||||||
|
err: E(Op("rdb.ArchiveTask"), NotFound, &TaskNotFoundError{Queue: "default", ID: "9876"}),
|
||||||
|
want: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "IsTaskNotFound should detect absence of TaskNotFoundError in err's chain",
|
||||||
|
fn: IsTaskNotFound,
|
||||||
|
err: E(Op("rdb.ArchiveTask"), NotFound, &QueueNotFoundError{Queue: "default"}),
|
||||||
|
want: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "IsQueueNotFound should detect presence of QueueNotFoundError in err's chain",
|
||||||
|
fn: IsQueueNotFound,
|
||||||
|
err: E(Op("rdb.ArchiveTask"), NotFound, &QueueNotFoundError{Queue: "default"}),
|
||||||
|
want: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
if got := tc.fn(tc.err); got != tc.want {
|
||||||
|
t.Errorf("%s: got=%t, want=%t", tc.desc, got, tc.want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCanonicalCode(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
desc string
|
||||||
|
err error
|
||||||
|
want Code
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
desc: "without nesting",
|
||||||
|
err: E(Op("rdb.DeleteTask"), NotFound, &TaskNotFoundError{Queue: "default", ID: "123"}),
|
||||||
|
want: NotFound,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "with nesting",
|
||||||
|
err: E(FailedPrecondition, E(NotFound)),
|
||||||
|
want: FailedPrecondition,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "returns Unspecified if err is not *Error",
|
||||||
|
err: New("some other error"),
|
||||||
|
want: Unspecified,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "returns Unspecified if err is nil",
|
||||||
|
err: nil,
|
||||||
|
want: Unspecified,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
if got := CanonicalCode(tc.err); got != tc.want {
|
||||||
|
t.Errorf("%s: got=%s, want=%s", tc.desc, got, tc.want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
812
internal/proto/asynq.pb.go
Normal file
812
internal/proto/asynq.pb.go
Normal file
@@ -0,0 +1,812 @@
|
|||||||
|
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license
|
||||||
|
// that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||||
|
// versions:
|
||||||
|
// protoc-gen-go v1.25.0
|
||||||
|
// protoc v3.14.0
|
||||||
|
// source: asynq.proto
|
||||||
|
|
||||||
|
package proto
|
||||||
|
|
||||||
|
import (
|
||||||
|
proto "github.com/golang/protobuf/proto"
|
||||||
|
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||||
|
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||||
|
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
|
||||||
|
reflect "reflect"
|
||||||
|
sync "sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Verify that this generated code is sufficiently up-to-date.
|
||||||
|
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||||
|
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||||
|
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||||
|
)
|
||||||
|
|
||||||
|
// This is a compile-time assertion that a sufficiently up-to-date version
|
||||||
|
// of the legacy proto package is being used.
|
||||||
|
const _ = proto.ProtoPackageIsVersion4
|
||||||
|
|
||||||
|
// TaskMessage is the internal representation of a task with additional
|
||||||
|
// metadata fields.
|
||||||
|
type TaskMessage struct {
|
||||||
|
state protoimpl.MessageState
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
|
||||||
|
// Type indicates the kind of the task to be performed.
|
||||||
|
Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
|
||||||
|
// Payload holds data needed to process the task.
|
||||||
|
Payload []byte `protobuf:"bytes,2,opt,name=payload,proto3" json:"payload,omitempty"`
|
||||||
|
// Unique identifier for the task.
|
||||||
|
Id string `protobuf:"bytes,3,opt,name=id,proto3" json:"id,omitempty"`
|
||||||
|
// Name of the queue to which this task belongs.
|
||||||
|
Queue string `protobuf:"bytes,4,opt,name=queue,proto3" json:"queue,omitempty"`
|
||||||
|
// Max number of retries for this task.
|
||||||
|
Retry int32 `protobuf:"varint,5,opt,name=retry,proto3" json:"retry,omitempty"`
|
||||||
|
// Number of times this task has been retried so far.
|
||||||
|
Retried int32 `protobuf:"varint,6,opt,name=retried,proto3" json:"retried,omitempty"`
|
||||||
|
// Error message from the last failure.
|
||||||
|
ErrorMsg string `protobuf:"bytes,7,opt,name=error_msg,json=errorMsg,proto3" json:"error_msg,omitempty"`
|
||||||
|
// Time of last failure in Unix time,
|
||||||
|
// the number of seconds elapsed since January 1, 1970 UTC.
|
||||||
|
// Use zero to indicate no last failure.
|
||||||
|
LastFailedAt int64 `protobuf:"varint,11,opt,name=last_failed_at,json=lastFailedAt,proto3" json:"last_failed_at,omitempty"`
|
||||||
|
// Timeout specifies timeout in seconds.
|
||||||
|
// Use zero to indicate no timeout.
|
||||||
|
Timeout int64 `protobuf:"varint,8,opt,name=timeout,proto3" json:"timeout,omitempty"`
|
||||||
|
// Deadline specifies the deadline for the task in Unix time,
|
||||||
|
// the number of seconds elapsed since January 1, 1970 UTC.
|
||||||
|
// Use zero to indicate no deadline.
|
||||||
|
Deadline int64 `protobuf:"varint,9,opt,name=deadline,proto3" json:"deadline,omitempty"`
|
||||||
|
// UniqueKey holds the redis key used for uniqueness lock for this task.
|
||||||
|
// Empty string indicates that no uniqueness lock was used.
|
||||||
|
UniqueKey string `protobuf:"bytes,10,opt,name=unique_key,json=uniqueKey,proto3" json:"unique_key,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *TaskMessage) Reset() {
|
||||||
|
*x = TaskMessage{}
|
||||||
|
if protoimpl.UnsafeEnabled {
|
||||||
|
mi := &file_asynq_proto_msgTypes[0]
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *TaskMessage) String() string {
|
||||||
|
return protoimpl.X.MessageStringOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*TaskMessage) ProtoMessage() {}
|
||||||
|
|
||||||
|
func (x *TaskMessage) ProtoReflect() protoreflect.Message {
|
||||||
|
mi := &file_asynq_proto_msgTypes[0]
|
||||||
|
if protoimpl.UnsafeEnabled && x != nil {
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
if ms.LoadMessageInfo() == nil {
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
return ms
|
||||||
|
}
|
||||||
|
return mi.MessageOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Use TaskMessage.ProtoReflect.Descriptor instead.
|
||||||
|
func (*TaskMessage) Descriptor() ([]byte, []int) {
|
||||||
|
return file_asynq_proto_rawDescGZIP(), []int{0}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *TaskMessage) GetType() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.Type
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *TaskMessage) GetPayload() []byte {
|
||||||
|
if x != nil {
|
||||||
|
return x.Payload
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *TaskMessage) GetId() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.Id
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *TaskMessage) GetQueue() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.Queue
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *TaskMessage) GetRetry() int32 {
|
||||||
|
if x != nil {
|
||||||
|
return x.Retry
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *TaskMessage) GetRetried() int32 {
|
||||||
|
if x != nil {
|
||||||
|
return x.Retried
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *TaskMessage) GetErrorMsg() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.ErrorMsg
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *TaskMessage) GetLastFailedAt() int64 {
|
||||||
|
if x != nil {
|
||||||
|
return x.LastFailedAt
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *TaskMessage) GetTimeout() int64 {
|
||||||
|
if x != nil {
|
||||||
|
return x.Timeout
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *TaskMessage) GetDeadline() int64 {
|
||||||
|
if x != nil {
|
||||||
|
return x.Deadline
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *TaskMessage) GetUniqueKey() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.UniqueKey
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServerInfo holds information about a running server.
|
||||||
|
type ServerInfo struct {
|
||||||
|
state protoimpl.MessageState
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
|
||||||
|
// Host machine the server is running on.
|
||||||
|
Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"`
|
||||||
|
// PID of the server process.
|
||||||
|
Pid int32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"`
|
||||||
|
// Unique identifier for this server.
|
||||||
|
ServerId string `protobuf:"bytes,3,opt,name=server_id,json=serverId,proto3" json:"server_id,omitempty"`
|
||||||
|
// Maximum number of concurrency this server will use.
|
||||||
|
Concurrency int32 `protobuf:"varint,4,opt,name=concurrency,proto3" json:"concurrency,omitempty"`
|
||||||
|
// List of queue names with their priorities.
|
||||||
|
// The server will consume tasks from the queues and prioritize
|
||||||
|
// queues with higher priority numbers.
|
||||||
|
Queues map[string]int32 `protobuf:"bytes,5,rep,name=queues,proto3" json:"queues,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"`
|
||||||
|
// If set, the server will always consume tasks from a queue with higher
|
||||||
|
// priority.
|
||||||
|
StrictPriority bool `protobuf:"varint,6,opt,name=strict_priority,json=strictPriority,proto3" json:"strict_priority,omitempty"`
|
||||||
|
// Status indicates the status of the server.
|
||||||
|
Status string `protobuf:"bytes,7,opt,name=status,proto3" json:"status,omitempty"`
|
||||||
|
// Time this server was started.
|
||||||
|
StartTime *timestamppb.Timestamp `protobuf:"bytes,8,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
|
||||||
|
// Number of workers currently processing tasks.
|
||||||
|
ActiveWorkerCount int32 `protobuf:"varint,9,opt,name=active_worker_count,json=activeWorkerCount,proto3" json:"active_worker_count,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *ServerInfo) Reset() {
|
||||||
|
*x = ServerInfo{}
|
||||||
|
if protoimpl.UnsafeEnabled {
|
||||||
|
mi := &file_asynq_proto_msgTypes[1]
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *ServerInfo) String() string {
|
||||||
|
return protoimpl.X.MessageStringOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*ServerInfo) ProtoMessage() {}
|
||||||
|
|
||||||
|
func (x *ServerInfo) ProtoReflect() protoreflect.Message {
|
||||||
|
mi := &file_asynq_proto_msgTypes[1]
|
||||||
|
if protoimpl.UnsafeEnabled && x != nil {
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
if ms.LoadMessageInfo() == nil {
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
return ms
|
||||||
|
}
|
||||||
|
return mi.MessageOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Use ServerInfo.ProtoReflect.Descriptor instead.
|
||||||
|
func (*ServerInfo) Descriptor() ([]byte, []int) {
|
||||||
|
return file_asynq_proto_rawDescGZIP(), []int{1}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *ServerInfo) GetHost() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.Host
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *ServerInfo) GetPid() int32 {
|
||||||
|
if x != nil {
|
||||||
|
return x.Pid
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *ServerInfo) GetServerId() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.ServerId
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *ServerInfo) GetConcurrency() int32 {
|
||||||
|
if x != nil {
|
||||||
|
return x.Concurrency
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *ServerInfo) GetQueues() map[string]int32 {
|
||||||
|
if x != nil {
|
||||||
|
return x.Queues
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *ServerInfo) GetStrictPriority() bool {
|
||||||
|
if x != nil {
|
||||||
|
return x.StrictPriority
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *ServerInfo) GetStatus() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.Status
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *ServerInfo) GetStartTime() *timestamppb.Timestamp {
|
||||||
|
if x != nil {
|
||||||
|
return x.StartTime
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *ServerInfo) GetActiveWorkerCount() int32 {
|
||||||
|
if x != nil {
|
||||||
|
return x.ActiveWorkerCount
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// WorkerInfo holds information about a running worker.
|
||||||
|
type WorkerInfo struct {
|
||||||
|
state protoimpl.MessageState
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
|
||||||
|
// Host matchine this worker is running on.
|
||||||
|
Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"`
|
||||||
|
// PID of the process in which this worker is running.
|
||||||
|
Pid int32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"`
|
||||||
|
// ID of the server in which this worker is running.
|
||||||
|
ServerId string `protobuf:"bytes,3,opt,name=server_id,json=serverId,proto3" json:"server_id,omitempty"`
|
||||||
|
// ID of the task this worker is processing.
|
||||||
|
TaskId string `protobuf:"bytes,4,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"`
|
||||||
|
// Type of the task this worker is processing.
|
||||||
|
TaskType string `protobuf:"bytes,5,opt,name=task_type,json=taskType,proto3" json:"task_type,omitempty"`
|
||||||
|
// Payload of the task this worker is processing.
|
||||||
|
TaskPayload []byte `protobuf:"bytes,6,opt,name=task_payload,json=taskPayload,proto3" json:"task_payload,omitempty"`
|
||||||
|
// Name of the queue the task the worker is processing belongs.
|
||||||
|
Queue string `protobuf:"bytes,7,opt,name=queue,proto3" json:"queue,omitempty"`
|
||||||
|
// Time this worker started processing the task.
|
||||||
|
StartTime *timestamppb.Timestamp `protobuf:"bytes,8,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
|
||||||
|
// Deadline by which the worker needs to complete processing
|
||||||
|
// the task. If worker exceeds the deadline, the task will fail.
|
||||||
|
Deadline *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=deadline,proto3" json:"deadline,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *WorkerInfo) Reset() {
|
||||||
|
*x = WorkerInfo{}
|
||||||
|
if protoimpl.UnsafeEnabled {
|
||||||
|
mi := &file_asynq_proto_msgTypes[2]
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *WorkerInfo) String() string {
|
||||||
|
return protoimpl.X.MessageStringOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*WorkerInfo) ProtoMessage() {}
|
||||||
|
|
||||||
|
func (x *WorkerInfo) ProtoReflect() protoreflect.Message {
|
||||||
|
mi := &file_asynq_proto_msgTypes[2]
|
||||||
|
if protoimpl.UnsafeEnabled && x != nil {
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
if ms.LoadMessageInfo() == nil {
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
return ms
|
||||||
|
}
|
||||||
|
return mi.MessageOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Use WorkerInfo.ProtoReflect.Descriptor instead.
|
||||||
|
func (*WorkerInfo) Descriptor() ([]byte, []int) {
|
||||||
|
return file_asynq_proto_rawDescGZIP(), []int{2}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *WorkerInfo) GetHost() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.Host
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *WorkerInfo) GetPid() int32 {
|
||||||
|
if x != nil {
|
||||||
|
return x.Pid
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *WorkerInfo) GetServerId() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.ServerId
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *WorkerInfo) GetTaskId() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.TaskId
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *WorkerInfo) GetTaskType() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.TaskType
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *WorkerInfo) GetTaskPayload() []byte {
|
||||||
|
if x != nil {
|
||||||
|
return x.TaskPayload
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *WorkerInfo) GetQueue() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.Queue
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *WorkerInfo) GetStartTime() *timestamppb.Timestamp {
|
||||||
|
if x != nil {
|
||||||
|
return x.StartTime
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *WorkerInfo) GetDeadline() *timestamppb.Timestamp {
|
||||||
|
if x != nil {
|
||||||
|
return x.Deadline
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SchedulerEntry holds information about a periodic task registered
|
||||||
|
// with a scheduler.
|
||||||
|
type SchedulerEntry struct {
|
||||||
|
state protoimpl.MessageState
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
|
||||||
|
// Identifier of the scheduler entry.
|
||||||
|
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||||
|
// Periodic schedule spec of the entry.
|
||||||
|
Spec string `protobuf:"bytes,2,opt,name=spec,proto3" json:"spec,omitempty"`
|
||||||
|
// Task type of the periodic task.
|
||||||
|
TaskType string `protobuf:"bytes,3,opt,name=task_type,json=taskType,proto3" json:"task_type,omitempty"`
|
||||||
|
// Task payload of the periodic task.
|
||||||
|
TaskPayload []byte `protobuf:"bytes,4,opt,name=task_payload,json=taskPayload,proto3" json:"task_payload,omitempty"`
|
||||||
|
// Options used to enqueue the periodic task.
|
||||||
|
EnqueueOptions []string `protobuf:"bytes,5,rep,name=enqueue_options,json=enqueueOptions,proto3" json:"enqueue_options,omitempty"`
|
||||||
|
// Next time the task will be enqueued.
|
||||||
|
NextEnqueueTime *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=next_enqueue_time,json=nextEnqueueTime,proto3" json:"next_enqueue_time,omitempty"`
|
||||||
|
// Last time the task was enqueued.
|
||||||
|
// Zero time if task was never enqueued.
|
||||||
|
PrevEnqueueTime *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=prev_enqueue_time,json=prevEnqueueTime,proto3" json:"prev_enqueue_time,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *SchedulerEntry) Reset() {
|
||||||
|
*x = SchedulerEntry{}
|
||||||
|
if protoimpl.UnsafeEnabled {
|
||||||
|
mi := &file_asynq_proto_msgTypes[3]
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *SchedulerEntry) String() string {
|
||||||
|
return protoimpl.X.MessageStringOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*SchedulerEntry) ProtoMessage() {}
|
||||||
|
|
||||||
|
func (x *SchedulerEntry) ProtoReflect() protoreflect.Message {
|
||||||
|
mi := &file_asynq_proto_msgTypes[3]
|
||||||
|
if protoimpl.UnsafeEnabled && x != nil {
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
if ms.LoadMessageInfo() == nil {
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
return ms
|
||||||
|
}
|
||||||
|
return mi.MessageOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Use SchedulerEntry.ProtoReflect.Descriptor instead.
|
||||||
|
func (*SchedulerEntry) Descriptor() ([]byte, []int) {
|
||||||
|
return file_asynq_proto_rawDescGZIP(), []int{3}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *SchedulerEntry) GetId() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.Id
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *SchedulerEntry) GetSpec() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.Spec
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *SchedulerEntry) GetTaskType() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.TaskType
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *SchedulerEntry) GetTaskPayload() []byte {
|
||||||
|
if x != nil {
|
||||||
|
return x.TaskPayload
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *SchedulerEntry) GetEnqueueOptions() []string {
|
||||||
|
if x != nil {
|
||||||
|
return x.EnqueueOptions
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *SchedulerEntry) GetNextEnqueueTime() *timestamppb.Timestamp {
|
||||||
|
if x != nil {
|
||||||
|
return x.NextEnqueueTime
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *SchedulerEntry) GetPrevEnqueueTime() *timestamppb.Timestamp {
|
||||||
|
if x != nil {
|
||||||
|
return x.PrevEnqueueTime
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SchedulerEnqueueEvent holds information about an enqueue event
|
||||||
|
// by a scheduler.
|
||||||
|
type SchedulerEnqueueEvent struct {
|
||||||
|
state protoimpl.MessageState
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
|
||||||
|
// ID of the task that was enqueued.
|
||||||
|
TaskId string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"`
|
||||||
|
// Time the task was enqueued.
|
||||||
|
EnqueueTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=enqueue_time,json=enqueueTime,proto3" json:"enqueue_time,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *SchedulerEnqueueEvent) Reset() {
|
||||||
|
*x = SchedulerEnqueueEvent{}
|
||||||
|
if protoimpl.UnsafeEnabled {
|
||||||
|
mi := &file_asynq_proto_msgTypes[4]
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *SchedulerEnqueueEvent) String() string {
|
||||||
|
return protoimpl.X.MessageStringOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*SchedulerEnqueueEvent) ProtoMessage() {}
|
||||||
|
|
||||||
|
func (x *SchedulerEnqueueEvent) ProtoReflect() protoreflect.Message {
|
||||||
|
mi := &file_asynq_proto_msgTypes[4]
|
||||||
|
if protoimpl.UnsafeEnabled && x != nil {
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
if ms.LoadMessageInfo() == nil {
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
return ms
|
||||||
|
}
|
||||||
|
return mi.MessageOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Use SchedulerEnqueueEvent.ProtoReflect.Descriptor instead.
|
||||||
|
func (*SchedulerEnqueueEvent) Descriptor() ([]byte, []int) {
|
||||||
|
return file_asynq_proto_rawDescGZIP(), []int{4}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *SchedulerEnqueueEvent) GetTaskId() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.TaskId
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *SchedulerEnqueueEvent) GetEnqueueTime() *timestamppb.Timestamp {
|
||||||
|
if x != nil {
|
||||||
|
return x.EnqueueTime
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var File_asynq_proto protoreflect.FileDescriptor
|
||||||
|
|
||||||
|
var file_asynq_proto_rawDesc = []byte{
|
||||||
|
0x0a, 0x0b, 0x61, 0x73, 0x79, 0x6e, 0x71, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x61,
|
||||||
|
0x73, 0x79, 0x6e, 0x71, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f,
|
||||||
|
0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e,
|
||||||
|
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa9, 0x02, 0x0a, 0x0b, 0x54, 0x61, 0x73, 0x6b, 0x4d, 0x65,
|
||||||
|
0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20,
|
||||||
|
0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79,
|
||||||
|
0x6c, 0x6f, 0x61, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c,
|
||||||
|
0x6f, 0x61, 0x64, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
|
||||||
|
0x02, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01,
|
||||||
|
0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x72, 0x65, 0x74,
|
||||||
|
0x72, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x72, 0x65, 0x74, 0x72, 0x79, 0x12,
|
||||||
|
0x18, 0x0a, 0x07, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x05,
|
||||||
|
0x52, 0x07, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x72, 0x72,
|
||||||
|
0x6f, 0x72, 0x5f, 0x6d, 0x73, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x72,
|
||||||
|
0x72, 0x6f, 0x72, 0x4d, 0x73, 0x67, 0x12, 0x24, 0x0a, 0x0e, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x66,
|
||||||
|
0x61, 0x69, 0x6c, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c,
|
||||||
|
0x6c, 0x61, 0x73, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x41, 0x74, 0x12, 0x18, 0x0a, 0x07,
|
||||||
|
0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x74,
|
||||||
|
0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69,
|
||||||
|
0x6e, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69,
|
||||||
|
0x6e, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x5f, 0x6b, 0x65, 0x79,
|
||||||
|
0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x4b, 0x65,
|
||||||
|
0x79, 0x22, 0x8f, 0x03, 0x0a, 0x0a, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f,
|
||||||
|
0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
|
||||||
|
0x68, 0x6f, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28,
|
||||||
|
0x05, 0x52, 0x03, 0x70, 0x69, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72,
|
||||||
|
0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x72, 0x76, 0x65,
|
||||||
|
0x72, 0x49, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e,
|
||||||
|
0x63, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72,
|
||||||
|
0x72, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x35, 0x0a, 0x06, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x18,
|
||||||
|
0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x61, 0x73, 0x79, 0x6e, 0x71, 0x2e, 0x53, 0x65,
|
||||||
|
0x72, 0x76, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x51, 0x75, 0x65, 0x75, 0x65, 0x73, 0x45,
|
||||||
|
0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x12, 0x27, 0x0a, 0x0f,
|
||||||
|
0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x5f, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18,
|
||||||
|
0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x50, 0x72, 0x69,
|
||||||
|
0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18,
|
||||||
|
0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x39, 0x0a,
|
||||||
|
0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28,
|
||||||
|
0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||||
|
0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73,
|
||||||
|
0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x13, 0x61, 0x63, 0x74, 0x69,
|
||||||
|
0x76, 0x65, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18,
|
||||||
|
0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x11, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x57, 0x6f, 0x72,
|
||||||
|
0x6b, 0x65, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x1a, 0x39, 0x0a, 0x0b, 0x51, 0x75, 0x65, 0x75,
|
||||||
|
0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01,
|
||||||
|
0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c,
|
||||||
|
0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a,
|
||||||
|
0x02, 0x38, 0x01, 0x22, 0xb1, 0x02, 0x0a, 0x0a, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x6e,
|
||||||
|
0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
|
||||||
|
0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x02, 0x20,
|
||||||
|
0x01, 0x28, 0x05, 0x52, 0x03, 0x70, 0x69, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x65, 0x72, 0x76,
|
||||||
|
0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x72,
|
||||||
|
0x76, 0x65, 0x72, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64,
|
||||||
|
0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12, 0x1b,
|
||||||
|
0x0a, 0x09, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28,
|
||||||
|
0x09, 0x52, 0x08, 0x74, 0x61, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x74,
|
||||||
|
0x61, 0x73, 0x6b, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28,
|
||||||
|
0x0c, 0x52, 0x0b, 0x74, 0x61, 0x73, 0x6b, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x14,
|
||||||
|
0x0a, 0x05, 0x71, 0x75, 0x65, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x71,
|
||||||
|
0x75, 0x65, 0x75, 0x65, 0x12, 0x39, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69,
|
||||||
|
0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
|
||||||
|
0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73,
|
||||||
|
0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12,
|
||||||
|
0x36, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28,
|
||||||
|
0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||||
|
0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x64,
|
||||||
|
0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x22, 0xad, 0x02, 0x0a, 0x0e, 0x53, 0x63, 0x68, 0x65,
|
||||||
|
0x64, 0x75, 0x6c, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64,
|
||||||
|
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x70,
|
||||||
|
0x65, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x73, 0x70, 0x65, 0x63, 0x12, 0x1b,
|
||||||
|
0x0a, 0x09, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28,
|
||||||
|
0x09, 0x52, 0x08, 0x74, 0x61, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x74,
|
||||||
|
0x61, 0x73, 0x6b, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28,
|
||||||
|
0x0c, 0x52, 0x0b, 0x74, 0x61, 0x73, 0x6b, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x27,
|
||||||
|
0x0a, 0x0f, 0x65, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
|
||||||
|
0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x65, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65,
|
||||||
|
0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x46, 0x0a, 0x11, 0x6e, 0x65, 0x78, 0x74, 0x5f,
|
||||||
|
0x65, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01,
|
||||||
|
0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
|
||||||
|
0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0f,
|
||||||
|
0x6e, 0x65, 0x78, 0x74, 0x45, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12,
|
||||||
|
0x46, 0x0a, 0x11, 0x70, 0x72, 0x65, 0x76, 0x5f, 0x65, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x5f,
|
||||||
|
0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
|
||||||
|
0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d,
|
||||||
|
0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0f, 0x70, 0x72, 0x65, 0x76, 0x45, 0x6e, 0x71, 0x75,
|
||||||
|
0x65, 0x75, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x22, 0x6f, 0x0a, 0x15, 0x53, 0x63, 0x68, 0x65, 0x64,
|
||||||
|
0x75, 0x6c, 0x65, 0x72, 0x45, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74,
|
||||||
|
0x12, 0x17, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
|
||||||
|
0x09, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12, 0x3d, 0x0a, 0x0c, 0x65, 0x6e, 0x71,
|
||||||
|
0x75, 0x65, 0x75, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
|
||||||
|
0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
|
||||||
|
0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x65, 0x6e, 0x71,
|
||||||
|
0x75, 0x65, 0x75, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68,
|
||||||
|
0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x69, 0x62, 0x69, 0x6b, 0x65, 0x6e, 0x2f, 0x61,
|
||||||
|
0x73, 0x79, 0x6e, 0x71, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72,
|
||||||
|
0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
file_asynq_proto_rawDescOnce sync.Once
|
||||||
|
file_asynq_proto_rawDescData = file_asynq_proto_rawDesc
|
||||||
|
)
|
||||||
|
|
||||||
|
func file_asynq_proto_rawDescGZIP() []byte {
|
||||||
|
file_asynq_proto_rawDescOnce.Do(func() {
|
||||||
|
file_asynq_proto_rawDescData = protoimpl.X.CompressGZIP(file_asynq_proto_rawDescData)
|
||||||
|
})
|
||||||
|
return file_asynq_proto_rawDescData
|
||||||
|
}
|
||||||
|
|
||||||
|
var file_asynq_proto_msgTypes = make([]protoimpl.MessageInfo, 6)
|
||||||
|
var file_asynq_proto_goTypes = []interface{}{
|
||||||
|
(*TaskMessage)(nil), // 0: asynq.TaskMessage
|
||||||
|
(*ServerInfo)(nil), // 1: asynq.ServerInfo
|
||||||
|
(*WorkerInfo)(nil), // 2: asynq.WorkerInfo
|
||||||
|
(*SchedulerEntry)(nil), // 3: asynq.SchedulerEntry
|
||||||
|
(*SchedulerEnqueueEvent)(nil), // 4: asynq.SchedulerEnqueueEvent
|
||||||
|
nil, // 5: asynq.ServerInfo.QueuesEntry
|
||||||
|
(*timestamppb.Timestamp)(nil), // 6: google.protobuf.Timestamp
|
||||||
|
}
|
||||||
|
var file_asynq_proto_depIdxs = []int32{
|
||||||
|
5, // 0: asynq.ServerInfo.queues:type_name -> asynq.ServerInfo.QueuesEntry
|
||||||
|
6, // 1: asynq.ServerInfo.start_time:type_name -> google.protobuf.Timestamp
|
||||||
|
6, // 2: asynq.WorkerInfo.start_time:type_name -> google.protobuf.Timestamp
|
||||||
|
6, // 3: asynq.WorkerInfo.deadline:type_name -> google.protobuf.Timestamp
|
||||||
|
6, // 4: asynq.SchedulerEntry.next_enqueue_time:type_name -> google.protobuf.Timestamp
|
||||||
|
6, // 5: asynq.SchedulerEntry.prev_enqueue_time:type_name -> google.protobuf.Timestamp
|
||||||
|
6, // 6: asynq.SchedulerEnqueueEvent.enqueue_time:type_name -> google.protobuf.Timestamp
|
||||||
|
7, // [7:7] is the sub-list for method output_type
|
||||||
|
7, // [7:7] is the sub-list for method input_type
|
||||||
|
7, // [7:7] is the sub-list for extension type_name
|
||||||
|
7, // [7:7] is the sub-list for extension extendee
|
||||||
|
0, // [0:7] is the sub-list for field type_name
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() { file_asynq_proto_init() }
|
||||||
|
func file_asynq_proto_init() {
|
||||||
|
if File_asynq_proto != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if !protoimpl.UnsafeEnabled {
|
||||||
|
file_asynq_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||||
|
switch v := v.(*TaskMessage); i {
|
||||||
|
case 0:
|
||||||
|
return &v.state
|
||||||
|
case 1:
|
||||||
|
return &v.sizeCache
|
||||||
|
case 2:
|
||||||
|
return &v.unknownFields
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
file_asynq_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||||
|
switch v := v.(*ServerInfo); i {
|
||||||
|
case 0:
|
||||||
|
return &v.state
|
||||||
|
case 1:
|
||||||
|
return &v.sizeCache
|
||||||
|
case 2:
|
||||||
|
return &v.unknownFields
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
file_asynq_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||||
|
switch v := v.(*WorkerInfo); i {
|
||||||
|
case 0:
|
||||||
|
return &v.state
|
||||||
|
case 1:
|
||||||
|
return &v.sizeCache
|
||||||
|
case 2:
|
||||||
|
return &v.unknownFields
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
file_asynq_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
|
||||||
|
switch v := v.(*SchedulerEntry); i {
|
||||||
|
case 0:
|
||||||
|
return &v.state
|
||||||
|
case 1:
|
||||||
|
return &v.sizeCache
|
||||||
|
case 2:
|
||||||
|
return &v.unknownFields
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
file_asynq_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
|
||||||
|
switch v := v.(*SchedulerEnqueueEvent); i {
|
||||||
|
case 0:
|
||||||
|
return &v.state
|
||||||
|
case 1:
|
||||||
|
return &v.sizeCache
|
||||||
|
case 2:
|
||||||
|
return &v.unknownFields
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
type x struct{}
|
||||||
|
out := protoimpl.TypeBuilder{
|
||||||
|
File: protoimpl.DescBuilder{
|
||||||
|
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||||
|
RawDescriptor: file_asynq_proto_rawDesc,
|
||||||
|
NumEnums: 0,
|
||||||
|
NumMessages: 6,
|
||||||
|
NumExtensions: 0,
|
||||||
|
NumServices: 0,
|
||||||
|
},
|
||||||
|
GoTypes: file_asynq_proto_goTypes,
|
||||||
|
DependencyIndexes: file_asynq_proto_depIdxs,
|
||||||
|
MessageInfos: file_asynq_proto_msgTypes,
|
||||||
|
}.Build()
|
||||||
|
File_asynq_proto = out.File
|
||||||
|
file_asynq_proto_rawDesc = nil
|
||||||
|
file_asynq_proto_goTypes = nil
|
||||||
|
file_asynq_proto_depIdxs = nil
|
||||||
|
}
|
||||||
154
internal/proto/asynq.proto
Normal file
154
internal/proto/asynq.proto
Normal file
@@ -0,0 +1,154 @@
|
|||||||
|
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license
|
||||||
|
// that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
syntax = "proto3";
|
||||||
|
package asynq;
|
||||||
|
|
||||||
|
import "google/protobuf/timestamp.proto";
|
||||||
|
|
||||||
|
option go_package = "github.com/hibiken/asynq/internal/proto";
|
||||||
|
|
||||||
|
// TaskMessage is the internal representation of a task with additional
|
||||||
|
// metadata fields.
|
||||||
|
message TaskMessage {
|
||||||
|
// Type indicates the kind of the task to be performed.
|
||||||
|
string type = 1;
|
||||||
|
|
||||||
|
// Payload holds data needed to process the task.
|
||||||
|
bytes payload = 2;
|
||||||
|
|
||||||
|
// Unique identifier for the task.
|
||||||
|
string id = 3;
|
||||||
|
|
||||||
|
// Name of the queue to which this task belongs.
|
||||||
|
string queue = 4;
|
||||||
|
|
||||||
|
// Max number of retries for this task.
|
||||||
|
int32 retry = 5;
|
||||||
|
|
||||||
|
// Number of times this task has been retried so far.
|
||||||
|
int32 retried = 6;
|
||||||
|
|
||||||
|
// Error message from the last failure.
|
||||||
|
string error_msg = 7;
|
||||||
|
|
||||||
|
// Time of last failure in Unix time,
|
||||||
|
// the number of seconds elapsed since January 1, 1970 UTC.
|
||||||
|
// Use zero to indicate no last failure.
|
||||||
|
int64 last_failed_at = 11;
|
||||||
|
|
||||||
|
// Timeout specifies timeout in seconds.
|
||||||
|
// Use zero to indicate no timeout.
|
||||||
|
int64 timeout = 8;
|
||||||
|
|
||||||
|
// Deadline specifies the deadline for the task in Unix time,
|
||||||
|
// the number of seconds elapsed since January 1, 1970 UTC.
|
||||||
|
// Use zero to indicate no deadline.
|
||||||
|
int64 deadline = 9;
|
||||||
|
|
||||||
|
// UniqueKey holds the redis key used for uniqueness lock for this task.
|
||||||
|
// Empty string indicates that no uniqueness lock was used.
|
||||||
|
string unique_key = 10;
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
// ServerInfo holds information about a running server.
|
||||||
|
message ServerInfo {
|
||||||
|
// Host machine the server is running on.
|
||||||
|
string host = 1;
|
||||||
|
|
||||||
|
// PID of the server process.
|
||||||
|
int32 pid = 2;
|
||||||
|
|
||||||
|
// Unique identifier for this server.
|
||||||
|
string server_id = 3;
|
||||||
|
|
||||||
|
// Maximum number of concurrency this server will use.
|
||||||
|
int32 concurrency = 4;
|
||||||
|
|
||||||
|
// List of queue names with their priorities.
|
||||||
|
// The server will consume tasks from the queues and prioritize
|
||||||
|
// queues with higher priority numbers.
|
||||||
|
map<string, int32> queues = 5;
|
||||||
|
|
||||||
|
// If set, the server will always consume tasks from a queue with higher
|
||||||
|
// priority.
|
||||||
|
bool strict_priority = 6;
|
||||||
|
|
||||||
|
// Status indicates the status of the server.
|
||||||
|
string status = 7;
|
||||||
|
|
||||||
|
// Time this server was started.
|
||||||
|
google.protobuf.Timestamp start_time = 8;
|
||||||
|
|
||||||
|
// Number of workers currently processing tasks.
|
||||||
|
int32 active_worker_count = 9;
|
||||||
|
};
|
||||||
|
|
||||||
|
// WorkerInfo holds information about a running worker.
|
||||||
|
message WorkerInfo {
|
||||||
|
// Host matchine this worker is running on.
|
||||||
|
string host = 1;
|
||||||
|
|
||||||
|
// PID of the process in which this worker is running.
|
||||||
|
int32 pid = 2;
|
||||||
|
|
||||||
|
// ID of the server in which this worker is running.
|
||||||
|
string server_id = 3;
|
||||||
|
|
||||||
|
// ID of the task this worker is processing.
|
||||||
|
string task_id = 4;
|
||||||
|
|
||||||
|
// Type of the task this worker is processing.
|
||||||
|
string task_type = 5;
|
||||||
|
|
||||||
|
// Payload of the task this worker is processing.
|
||||||
|
bytes task_payload = 6;
|
||||||
|
|
||||||
|
// Name of the queue the task the worker is processing belongs.
|
||||||
|
string queue = 7;
|
||||||
|
|
||||||
|
// Time this worker started processing the task.
|
||||||
|
google.protobuf.Timestamp start_time = 8;
|
||||||
|
|
||||||
|
// Deadline by which the worker needs to complete processing
|
||||||
|
// the task. If worker exceeds the deadline, the task will fail.
|
||||||
|
google.protobuf.Timestamp deadline = 9;
|
||||||
|
};
|
||||||
|
|
||||||
|
// SchedulerEntry holds information about a periodic task registered
|
||||||
|
// with a scheduler.
|
||||||
|
message SchedulerEntry {
|
||||||
|
// Identifier of the scheduler entry.
|
||||||
|
string id = 1;
|
||||||
|
|
||||||
|
// Periodic schedule spec of the entry.
|
||||||
|
string spec = 2;
|
||||||
|
|
||||||
|
// Task type of the periodic task.
|
||||||
|
string task_type = 3;
|
||||||
|
|
||||||
|
// Task payload of the periodic task.
|
||||||
|
bytes task_payload = 4;
|
||||||
|
|
||||||
|
// Options used to enqueue the periodic task.
|
||||||
|
repeated string enqueue_options = 5;
|
||||||
|
|
||||||
|
// Next time the task will be enqueued.
|
||||||
|
google.protobuf.Timestamp next_enqueue_time = 6;
|
||||||
|
|
||||||
|
// Last time the task was enqueued.
|
||||||
|
// Zero time if task was never enqueued.
|
||||||
|
google.protobuf.Timestamp prev_enqueue_time = 7;
|
||||||
|
};
|
||||||
|
|
||||||
|
// SchedulerEnqueueEvent holds information about an enqueue event
|
||||||
|
// by a scheduler.
|
||||||
|
message SchedulerEnqueueEvent {
|
||||||
|
// ID of the task that was enqueued.
|
||||||
|
string task_id = 1;
|
||||||
|
|
||||||
|
// Time the task was enqueued.
|
||||||
|
google.protobuf.Timestamp enqueue_time = 2;
|
||||||
|
};
|
||||||
@@ -5,37 +5,262 @@
|
|||||||
package rdb
|
package rdb
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/go-redis/redis/v7"
|
"github.com/hibiken/asynq/internal/asynqtest"
|
||||||
h "github.com/hibiken/asynq/internal/asynqtest"
|
|
||||||
"github.com/hibiken/asynq/internal/base"
|
"github.com/hibiken/asynq/internal/base"
|
||||||
)
|
)
|
||||||
|
|
||||||
func BenchmarkDone(b *testing.B) {
|
func BenchmarkEnqueue(b *testing.B) {
|
||||||
r := redis.NewClient(&redis.Options{
|
r := setup(b)
|
||||||
Addr: "localhost:6379",
|
msg := asynqtest.NewTaskMessage("task1", nil)
|
||||||
DB: 8,
|
|
||||||
})
|
|
||||||
h.FlushDB(b, r)
|
|
||||||
|
|
||||||
// populate in-progress queue with messages
|
|
||||||
var inProgress []*base.TaskMessage
|
|
||||||
for i := 0; i < 40; i++ {
|
|
||||||
inProgress = append(inProgress,
|
|
||||||
h.NewTaskMessage("send_email", map[string]interface{}{"subject": "hello", "recipient_id": 123}))
|
|
||||||
}
|
|
||||||
h.SeedInProgressQueue(b, r, inProgress)
|
|
||||||
|
|
||||||
rdb := NewRDB(r)
|
|
||||||
|
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
for n := 0; n < b.N; n++ {
|
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
b.StopTimer()
|
b.StopTimer()
|
||||||
msg := h.NewTaskMessage("reindex", map[string]interface{}{"config": "path/to/config/file"})
|
asynqtest.FlushDB(b, r.client)
|
||||||
r.LPush(base.InProgressQueue, h.MustMarshal(b, msg))
|
|
||||||
b.StartTimer()
|
b.StartTimer()
|
||||||
|
|
||||||
rdb.Done(msg)
|
if err := r.Enqueue(msg); err != nil {
|
||||||
|
b.Fatalf("Enqueue failed: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkEnqueueUnique(b *testing.B) {
|
||||||
|
r := setup(b)
|
||||||
|
msg := &base.TaskMessage{
|
||||||
|
Type: "task1",
|
||||||
|
Payload: nil,
|
||||||
|
Queue: base.DefaultQueueName,
|
||||||
|
UniqueKey: base.UniqueKey("default", "task1", nil),
|
||||||
|
}
|
||||||
|
uniqueTTL := 5 * time.Minute
|
||||||
|
b.ResetTimer()
|
||||||
|
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
b.StopTimer()
|
||||||
|
asynqtest.FlushDB(b, r.client)
|
||||||
|
b.StartTimer()
|
||||||
|
|
||||||
|
if err := r.EnqueueUnique(msg, uniqueTTL); err != nil {
|
||||||
|
b.Fatalf("EnqueueUnique failed: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkSchedule(b *testing.B) {
|
||||||
|
r := setup(b)
|
||||||
|
msg := asynqtest.NewTaskMessage("task1", nil)
|
||||||
|
processAt := time.Now().Add(3 * time.Minute)
|
||||||
|
b.ResetTimer()
|
||||||
|
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
b.StopTimer()
|
||||||
|
asynqtest.FlushDB(b, r.client)
|
||||||
|
b.StartTimer()
|
||||||
|
|
||||||
|
if err := r.Schedule(msg, processAt); err != nil {
|
||||||
|
b.Fatalf("Schedule failed: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkScheduleUnique(b *testing.B) {
|
||||||
|
r := setup(b)
|
||||||
|
msg := &base.TaskMessage{
|
||||||
|
Type: "task1",
|
||||||
|
Payload: nil,
|
||||||
|
Queue: base.DefaultQueueName,
|
||||||
|
UniqueKey: base.UniqueKey("default", "task1", nil),
|
||||||
|
}
|
||||||
|
processAt := time.Now().Add(3 * time.Minute)
|
||||||
|
uniqueTTL := 5 * time.Minute
|
||||||
|
b.ResetTimer()
|
||||||
|
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
b.StopTimer()
|
||||||
|
asynqtest.FlushDB(b, r.client)
|
||||||
|
b.StartTimer()
|
||||||
|
|
||||||
|
if err := r.ScheduleUnique(msg, processAt, uniqueTTL); err != nil {
|
||||||
|
b.Fatalf("EnqueueUnique failed: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkDequeueSingleQueue(b *testing.B) {
|
||||||
|
r := setup(b)
|
||||||
|
b.ResetTimer()
|
||||||
|
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
b.StopTimer()
|
||||||
|
asynqtest.FlushDB(b, r.client)
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
m := asynqtest.NewTaskMessageWithQueue(
|
||||||
|
fmt.Sprintf("task%d", i), nil, base.DefaultQueueName)
|
||||||
|
if err := r.Enqueue(m); err != nil {
|
||||||
|
b.Fatalf("Enqueue failed: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
b.StartTimer()
|
||||||
|
|
||||||
|
if _, _, err := r.Dequeue(base.DefaultQueueName); err != nil {
|
||||||
|
b.Fatalf("Dequeue failed: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkDequeueMultipleQueues(b *testing.B) {
|
||||||
|
qnames := []string{"critical", "default", "low"}
|
||||||
|
r := setup(b)
|
||||||
|
b.ResetTimer()
|
||||||
|
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
b.StopTimer()
|
||||||
|
asynqtest.FlushDB(b, r.client)
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
for _, qname := range qnames {
|
||||||
|
m := asynqtest.NewTaskMessageWithQueue(
|
||||||
|
fmt.Sprintf("%s_task%d", qname, i), nil, qname)
|
||||||
|
if err := r.Enqueue(m); err != nil {
|
||||||
|
b.Fatalf("Enqueue failed: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
b.StartTimer()
|
||||||
|
|
||||||
|
if _, _, err := r.Dequeue(qnames...); err != nil {
|
||||||
|
b.Fatalf("Dequeue failed: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkDone(b *testing.B) {
|
||||||
|
r := setup(b)
|
||||||
|
m1 := asynqtest.NewTaskMessage("task1", nil)
|
||||||
|
m2 := asynqtest.NewTaskMessage("task2", nil)
|
||||||
|
m3 := asynqtest.NewTaskMessage("task3", nil)
|
||||||
|
msgs := []*base.TaskMessage{m1, m2, m3}
|
||||||
|
zs := []base.Z{
|
||||||
|
{Message: m1, Score: time.Now().Add(10 * time.Second).Unix()},
|
||||||
|
{Message: m2, Score: time.Now().Add(20 * time.Second).Unix()},
|
||||||
|
{Message: m3, Score: time.Now().Add(30 * time.Second).Unix()},
|
||||||
|
}
|
||||||
|
b.ResetTimer()
|
||||||
|
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
b.StopTimer()
|
||||||
|
asynqtest.FlushDB(b, r.client)
|
||||||
|
asynqtest.SeedActiveQueue(b, r.client, msgs, base.DefaultQueueName)
|
||||||
|
asynqtest.SeedDeadlines(b, r.client, zs, base.DefaultQueueName)
|
||||||
|
b.StartTimer()
|
||||||
|
|
||||||
|
if err := r.Done(msgs[0]); err != nil {
|
||||||
|
b.Fatalf("Done failed: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkRetry(b *testing.B) {
|
||||||
|
r := setup(b)
|
||||||
|
m1 := asynqtest.NewTaskMessage("task1", nil)
|
||||||
|
m2 := asynqtest.NewTaskMessage("task2", nil)
|
||||||
|
m3 := asynqtest.NewTaskMessage("task3", nil)
|
||||||
|
msgs := []*base.TaskMessage{m1, m2, m3}
|
||||||
|
zs := []base.Z{
|
||||||
|
{Message: m1, Score: time.Now().Add(10 * time.Second).Unix()},
|
||||||
|
{Message: m2, Score: time.Now().Add(20 * time.Second).Unix()},
|
||||||
|
{Message: m3, Score: time.Now().Add(30 * time.Second).Unix()},
|
||||||
|
}
|
||||||
|
b.ResetTimer()
|
||||||
|
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
b.StopTimer()
|
||||||
|
asynqtest.FlushDB(b, r.client)
|
||||||
|
asynqtest.SeedActiveQueue(b, r.client, msgs, base.DefaultQueueName)
|
||||||
|
asynqtest.SeedDeadlines(b, r.client, zs, base.DefaultQueueName)
|
||||||
|
b.StartTimer()
|
||||||
|
|
||||||
|
if err := r.Retry(msgs[0], time.Now().Add(1*time.Minute), "error", true /*isFailure*/); err != nil {
|
||||||
|
b.Fatalf("Retry failed: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkArchive(b *testing.B) {
|
||||||
|
r := setup(b)
|
||||||
|
m1 := asynqtest.NewTaskMessage("task1", nil)
|
||||||
|
m2 := asynqtest.NewTaskMessage("task2", nil)
|
||||||
|
m3 := asynqtest.NewTaskMessage("task3", nil)
|
||||||
|
msgs := []*base.TaskMessage{m1, m2, m3}
|
||||||
|
zs := []base.Z{
|
||||||
|
{Message: m1, Score: time.Now().Add(10 * time.Second).Unix()},
|
||||||
|
{Message: m2, Score: time.Now().Add(20 * time.Second).Unix()},
|
||||||
|
{Message: m3, Score: time.Now().Add(30 * time.Second).Unix()},
|
||||||
|
}
|
||||||
|
b.ResetTimer()
|
||||||
|
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
b.StopTimer()
|
||||||
|
asynqtest.FlushDB(b, r.client)
|
||||||
|
asynqtest.SeedActiveQueue(b, r.client, msgs, base.DefaultQueueName)
|
||||||
|
asynqtest.SeedDeadlines(b, r.client, zs, base.DefaultQueueName)
|
||||||
|
b.StartTimer()
|
||||||
|
|
||||||
|
if err := r.Archive(msgs[0], "error"); err != nil {
|
||||||
|
b.Fatalf("Archive failed: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkRequeue(b *testing.B) {
|
||||||
|
r := setup(b)
|
||||||
|
m1 := asynqtest.NewTaskMessage("task1", nil)
|
||||||
|
m2 := asynqtest.NewTaskMessage("task2", nil)
|
||||||
|
m3 := asynqtest.NewTaskMessage("task3", nil)
|
||||||
|
msgs := []*base.TaskMessage{m1, m2, m3}
|
||||||
|
zs := []base.Z{
|
||||||
|
{Message: m1, Score: time.Now().Add(10 * time.Second).Unix()},
|
||||||
|
{Message: m2, Score: time.Now().Add(20 * time.Second).Unix()},
|
||||||
|
{Message: m3, Score: time.Now().Add(30 * time.Second).Unix()},
|
||||||
|
}
|
||||||
|
b.ResetTimer()
|
||||||
|
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
b.StopTimer()
|
||||||
|
asynqtest.FlushDB(b, r.client)
|
||||||
|
asynqtest.SeedActiveQueue(b, r.client, msgs, base.DefaultQueueName)
|
||||||
|
asynqtest.SeedDeadlines(b, r.client, zs, base.DefaultQueueName)
|
||||||
|
b.StartTimer()
|
||||||
|
|
||||||
|
if err := r.Requeue(msgs[0]); err != nil {
|
||||||
|
b.Fatalf("Requeue failed: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkCheckAndEnqueue(b *testing.B) {
|
||||||
|
r := setup(b)
|
||||||
|
now := time.Now()
|
||||||
|
var zs []base.Z
|
||||||
|
for i := -100; i < 100; i++ {
|
||||||
|
msg := asynqtest.NewTaskMessage(fmt.Sprintf("task%d", i), nil)
|
||||||
|
score := now.Add(time.Duration(i) * time.Second).Unix()
|
||||||
|
zs = append(zs, base.Z{Message: msg, Score: score})
|
||||||
|
}
|
||||||
|
b.ResetTimer()
|
||||||
|
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
b.StopTimer()
|
||||||
|
asynqtest.FlushDB(b, r.client)
|
||||||
|
asynqtest.SeedScheduledQueue(b, r.client, zs, base.DefaultQueueName)
|
||||||
|
b.StartTimer()
|
||||||
|
|
||||||
|
if err := r.ForwardIfReady(base.DefaultQueueName); err != nil {
|
||||||
|
b.Fatalf("ForwardIfReady failed: %v", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -26,6 +26,9 @@ type TestBroker struct {
|
|||||||
real base.Broker
|
real base.Broker
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Make sure TestBroker implements Broker interface at compile time.
|
||||||
|
var _ base.Broker = (*TestBroker)(nil)
|
||||||
|
|
||||||
func NewTestBroker(b base.Broker) *TestBroker {
|
func NewTestBroker(b base.Broker) *TestBroker {
|
||||||
return &TestBroker{real: b}
|
return &TestBroker{real: b}
|
||||||
}
|
}
|
||||||
@@ -60,11 +63,11 @@ func (tb *TestBroker) EnqueueUnique(msg *base.TaskMessage, ttl time.Duration) er
|
|||||||
return tb.real.EnqueueUnique(msg, ttl)
|
return tb.real.EnqueueUnique(msg, ttl)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tb *TestBroker) Dequeue(qnames ...string) (*base.TaskMessage, error) {
|
func (tb *TestBroker) Dequeue(qnames ...string) (*base.TaskMessage, time.Time, error) {
|
||||||
tb.mu.Lock()
|
tb.mu.Lock()
|
||||||
defer tb.mu.Unlock()
|
defer tb.mu.Unlock()
|
||||||
if tb.sleeping {
|
if tb.sleeping {
|
||||||
return nil, errRedisDown
|
return nil, time.Time{}, errRedisDown
|
||||||
}
|
}
|
||||||
return tb.real.Dequeue(qnames...)
|
return tb.real.Dequeue(qnames...)
|
||||||
}
|
}
|
||||||
@@ -105,58 +108,58 @@ func (tb *TestBroker) ScheduleUnique(msg *base.TaskMessage, processAt time.Time,
|
|||||||
return tb.real.ScheduleUnique(msg, processAt, ttl)
|
return tb.real.ScheduleUnique(msg, processAt, ttl)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tb *TestBroker) Retry(msg *base.TaskMessage, processAt time.Time, errMsg string) error {
|
func (tb *TestBroker) Retry(msg *base.TaskMessage, processAt time.Time, errMsg string, isFailure bool) error {
|
||||||
tb.mu.Lock()
|
tb.mu.Lock()
|
||||||
defer tb.mu.Unlock()
|
defer tb.mu.Unlock()
|
||||||
if tb.sleeping {
|
if tb.sleeping {
|
||||||
return errRedisDown
|
return errRedisDown
|
||||||
}
|
}
|
||||||
return tb.real.Retry(msg, processAt, errMsg)
|
return tb.real.Retry(msg, processAt, errMsg, isFailure)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tb *TestBroker) Kill(msg *base.TaskMessage, errMsg string) error {
|
func (tb *TestBroker) Archive(msg *base.TaskMessage, errMsg string) error {
|
||||||
tb.mu.Lock()
|
tb.mu.Lock()
|
||||||
defer tb.mu.Unlock()
|
defer tb.mu.Unlock()
|
||||||
if tb.sleeping {
|
if tb.sleeping {
|
||||||
return errRedisDown
|
return errRedisDown
|
||||||
}
|
}
|
||||||
return tb.real.Kill(msg, errMsg)
|
return tb.real.Archive(msg, errMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tb *TestBroker) RequeueAll() (int64, error) {
|
func (tb *TestBroker) ForwardIfReady(qnames ...string) error {
|
||||||
tb.mu.Lock()
|
|
||||||
defer tb.mu.Unlock()
|
|
||||||
if tb.sleeping {
|
|
||||||
return 0, errRedisDown
|
|
||||||
}
|
|
||||||
return tb.real.RequeueAll()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tb *TestBroker) CheckAndEnqueue(qnames ...string) error {
|
|
||||||
tb.mu.Lock()
|
tb.mu.Lock()
|
||||||
defer tb.mu.Unlock()
|
defer tb.mu.Unlock()
|
||||||
if tb.sleeping {
|
if tb.sleeping {
|
||||||
return errRedisDown
|
return errRedisDown
|
||||||
}
|
}
|
||||||
return tb.real.CheckAndEnqueue()
|
return tb.real.ForwardIfReady(qnames...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tb *TestBroker) WriteServerState(ss *base.ServerState, ttl time.Duration) error {
|
func (tb *TestBroker) ListDeadlineExceeded(deadline time.Time, qnames ...string) ([]*base.TaskMessage, error) {
|
||||||
|
tb.mu.Lock()
|
||||||
|
defer tb.mu.Unlock()
|
||||||
|
if tb.sleeping {
|
||||||
|
return nil, errRedisDown
|
||||||
|
}
|
||||||
|
return tb.real.ListDeadlineExceeded(deadline, qnames...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tb *TestBroker) WriteServerState(info *base.ServerInfo, workers []*base.WorkerInfo, ttl time.Duration) error {
|
||||||
tb.mu.Lock()
|
tb.mu.Lock()
|
||||||
defer tb.mu.Unlock()
|
defer tb.mu.Unlock()
|
||||||
if tb.sleeping {
|
if tb.sleeping {
|
||||||
return errRedisDown
|
return errRedisDown
|
||||||
}
|
}
|
||||||
return tb.real.WriteServerState(ss, ttl)
|
return tb.real.WriteServerState(info, workers, ttl)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tb *TestBroker) ClearServerState(ss *base.ServerState) error {
|
func (tb *TestBroker) ClearServerState(host string, pid int, serverID string) error {
|
||||||
tb.mu.Lock()
|
tb.mu.Lock()
|
||||||
defer tb.mu.Unlock()
|
defer tb.mu.Unlock()
|
||||||
if tb.sleeping {
|
if tb.sleeping {
|
||||||
return errRedisDown
|
return errRedisDown
|
||||||
}
|
}
|
||||||
return tb.real.ClearServerState(ss)
|
return tb.real.ClearServerState(host, pid, serverID)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tb *TestBroker) CancelationPubSub() (*redis.PubSub, error) {
|
func (tb *TestBroker) CancelationPubSub() (*redis.PubSub, error) {
|
||||||
@@ -177,6 +180,15 @@ func (tb *TestBroker) PublishCancelation(id string) error {
|
|||||||
return tb.real.PublishCancelation(id)
|
return tb.real.PublishCancelation(id)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (tb *TestBroker) Ping() error {
|
||||||
|
tb.mu.Lock()
|
||||||
|
defer tb.mu.Unlock()
|
||||||
|
if tb.sleeping {
|
||||||
|
return errRedisDown
|
||||||
|
}
|
||||||
|
return tb.real.Ping()
|
||||||
|
}
|
||||||
|
|
||||||
func (tb *TestBroker) Close() error {
|
func (tb *TestBroker) Close() error {
|
||||||
tb.mu.Lock()
|
tb.mu.Lock()
|
||||||
defer tb.mu.Unlock()
|
defer tb.mu.Unlock()
|
||||||
|
|||||||
166
payload.go
166
payload.go
@@ -1,166 +0,0 @@
|
|||||||
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
|
||||||
// Use of this source code is governed by a MIT license
|
|
||||||
// that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package asynq
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/spf13/cast"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Payload holds arbitrary data needed for task execution.
|
|
||||||
type Payload struct {
|
|
||||||
data map[string]interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
type errKeyNotFound struct {
|
|
||||||
key string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *errKeyNotFound) Error() string {
|
|
||||||
return fmt.Sprintf("key %q does not exist", e.key)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Has reports whether key exists.
|
|
||||||
func (p Payload) Has(key string) bool {
|
|
||||||
_, ok := p.data[key]
|
|
||||||
return ok
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetString returns a string value if a string type is associated with
|
|
||||||
// the key, otherwise reports an error.
|
|
||||||
func (p Payload) GetString(key string) (string, error) {
|
|
||||||
v, ok := p.data[key]
|
|
||||||
if !ok {
|
|
||||||
return "", &errKeyNotFound{key}
|
|
||||||
}
|
|
||||||
return cast.ToStringE(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetInt returns an int value if a numeric type is associated with
|
|
||||||
// the key, otherwise reports an error.
|
|
||||||
func (p Payload) GetInt(key string) (int, error) {
|
|
||||||
v, ok := p.data[key]
|
|
||||||
if !ok {
|
|
||||||
return 0, &errKeyNotFound{key}
|
|
||||||
}
|
|
||||||
return cast.ToIntE(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetFloat64 returns a float64 value if a numeric type is associated with
|
|
||||||
// the key, otherwise reports an error.
|
|
||||||
func (p Payload) GetFloat64(key string) (float64, error) {
|
|
||||||
v, ok := p.data[key]
|
|
||||||
if !ok {
|
|
||||||
return 0, &errKeyNotFound{key}
|
|
||||||
}
|
|
||||||
return cast.ToFloat64E(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetBool returns a boolean value if a boolean type is associated with
|
|
||||||
// the key, otherwise reports an error.
|
|
||||||
func (p Payload) GetBool(key string) (bool, error) {
|
|
||||||
v, ok := p.data[key]
|
|
||||||
if !ok {
|
|
||||||
return false, &errKeyNotFound{key}
|
|
||||||
}
|
|
||||||
return cast.ToBoolE(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetStringSlice returns a slice of strings if a string slice type is associated with
|
|
||||||
// the key, otherwise reports an error.
|
|
||||||
func (p Payload) GetStringSlice(key string) ([]string, error) {
|
|
||||||
v, ok := p.data[key]
|
|
||||||
if !ok {
|
|
||||||
return nil, &errKeyNotFound{key}
|
|
||||||
}
|
|
||||||
return cast.ToStringSliceE(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetIntSlice returns a slice of ints if a int slice type is associated with
|
|
||||||
// the key, otherwise reports an error.
|
|
||||||
func (p Payload) GetIntSlice(key string) ([]int, error) {
|
|
||||||
v, ok := p.data[key]
|
|
||||||
if !ok {
|
|
||||||
return nil, &errKeyNotFound{key}
|
|
||||||
}
|
|
||||||
return cast.ToIntSliceE(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetStringMap returns a map of string to empty interface
|
|
||||||
// if a correct map type is associated with the key,
|
|
||||||
// otherwise reports an error.
|
|
||||||
func (p Payload) GetStringMap(key string) (map[string]interface{}, error) {
|
|
||||||
v, ok := p.data[key]
|
|
||||||
if !ok {
|
|
||||||
return nil, &errKeyNotFound{key}
|
|
||||||
}
|
|
||||||
return cast.ToStringMapE(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetStringMapString returns a map of string to string
|
|
||||||
// if a correct map type is associated with the key,
|
|
||||||
// otherwise reports an error.
|
|
||||||
func (p Payload) GetStringMapString(key string) (map[string]string, error) {
|
|
||||||
v, ok := p.data[key]
|
|
||||||
if !ok {
|
|
||||||
return nil, &errKeyNotFound{key}
|
|
||||||
}
|
|
||||||
return cast.ToStringMapStringE(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetStringMapStringSlice returns a map of string to string slice
|
|
||||||
// if a correct map type is associated with the key,
|
|
||||||
// otherwise reports an error.
|
|
||||||
func (p Payload) GetStringMapStringSlice(key string) (map[string][]string, error) {
|
|
||||||
v, ok := p.data[key]
|
|
||||||
if !ok {
|
|
||||||
return nil, &errKeyNotFound{key}
|
|
||||||
}
|
|
||||||
return cast.ToStringMapStringSliceE(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetStringMapInt returns a map of string to int
|
|
||||||
// if a correct map type is associated with the key,
|
|
||||||
// otherwise reports an error.
|
|
||||||
func (p Payload) GetStringMapInt(key string) (map[string]int, error) {
|
|
||||||
v, ok := p.data[key]
|
|
||||||
if !ok {
|
|
||||||
return nil, &errKeyNotFound{key}
|
|
||||||
}
|
|
||||||
return cast.ToStringMapIntE(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetStringMapBool returns a map of string to boolean
|
|
||||||
// if a correct map type is associated with the key,
|
|
||||||
// otherwise reports an error.
|
|
||||||
func (p Payload) GetStringMapBool(key string) (map[string]bool, error) {
|
|
||||||
v, ok := p.data[key]
|
|
||||||
if !ok {
|
|
||||||
return nil, &errKeyNotFound{key}
|
|
||||||
}
|
|
||||||
return cast.ToStringMapBoolE(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetTime returns a time value if a correct map type is associated with the key,
|
|
||||||
// otherwise reports an error.
|
|
||||||
func (p Payload) GetTime(key string) (time.Time, error) {
|
|
||||||
v, ok := p.data[key]
|
|
||||||
if !ok {
|
|
||||||
return time.Time{}, &errKeyNotFound{key}
|
|
||||||
}
|
|
||||||
return cast.ToTimeE(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetDuration returns a duration value if a correct map type is associated with the key,
|
|
||||||
// otherwise reports an error.
|
|
||||||
func (p Payload) GetDuration(key string) (time.Duration, error) {
|
|
||||||
v, ok := p.data[key]
|
|
||||||
if !ok {
|
|
||||||
return 0, &errKeyNotFound{key}
|
|
||||||
}
|
|
||||||
return cast.ToDurationE(v)
|
|
||||||
}
|
|
||||||
651
payload_test.go
651
payload_test.go
@@ -1,651 +0,0 @@
|
|||||||
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
|
||||||
// Use of this source code is governed by a MIT license
|
|
||||||
// that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package asynq
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/google/go-cmp/cmp"
|
|
||||||
h "github.com/hibiken/asynq/internal/asynqtest"
|
|
||||||
"github.com/hibiken/asynq/internal/base"
|
|
||||||
)
|
|
||||||
|
|
||||||
type payloadTest struct {
|
|
||||||
data map[string]interface{}
|
|
||||||
key string
|
|
||||||
nonkey string
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPayloadString(t *testing.T) {
|
|
||||||
tests := []payloadTest{
|
|
||||||
{
|
|
||||||
data: map[string]interface{}{"name": "gopher"},
|
|
||||||
key: "name",
|
|
||||||
nonkey: "unknown",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range tests {
|
|
||||||
payload := Payload{tc.data}
|
|
||||||
|
|
||||||
got, err := payload.GetString(tc.key)
|
|
||||||
if err != nil || got != tc.data[tc.key] {
|
|
||||||
t.Errorf("Payload.GetString(%q) = %v, %v, want %v, nil",
|
|
||||||
tc.key, got, err, tc.data[tc.key])
|
|
||||||
}
|
|
||||||
|
|
||||||
// encode and then decode task messsage.
|
|
||||||
in := h.NewTaskMessage("testing", tc.data)
|
|
||||||
b, err := json.Marshal(in)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
var out base.TaskMessage
|
|
||||||
err = json.Unmarshal(b, &out)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
payload = Payload{out.Payload}
|
|
||||||
got, err = payload.GetString(tc.key)
|
|
||||||
if err != nil || got != tc.data[tc.key] {
|
|
||||||
t.Errorf("With Marshaling: Payload.GetString(%q) = %v, %v, want %v, nil",
|
|
||||||
tc.key, got, err, tc.data[tc.key])
|
|
||||||
}
|
|
||||||
|
|
||||||
// access non-existent key.
|
|
||||||
got, err = payload.GetString(tc.nonkey)
|
|
||||||
if err == nil || got != "" {
|
|
||||||
t.Errorf("Payload.GetString(%q) = %v, %v; want '', error",
|
|
||||||
tc.key, got, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPayloadInt(t *testing.T) {
|
|
||||||
tests := []payloadTest{
|
|
||||||
{
|
|
||||||
data: map[string]interface{}{"user_id": 42},
|
|
||||||
key: "user_id",
|
|
||||||
nonkey: "unknown",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range tests {
|
|
||||||
payload := Payload{tc.data}
|
|
||||||
|
|
||||||
got, err := payload.GetInt(tc.key)
|
|
||||||
if err != nil || got != tc.data[tc.key] {
|
|
||||||
t.Errorf("Payload.GetInt(%q) = %v, %v, want %v, nil",
|
|
||||||
tc.key, got, err, tc.data[tc.key])
|
|
||||||
}
|
|
||||||
|
|
||||||
// encode and then decode task messsage.
|
|
||||||
in := h.NewTaskMessage("testing", tc.data)
|
|
||||||
b, err := json.Marshal(in)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
var out base.TaskMessage
|
|
||||||
err = json.Unmarshal(b, &out)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
payload = Payload{out.Payload}
|
|
||||||
got, err = payload.GetInt(tc.key)
|
|
||||||
if err != nil || got != tc.data[tc.key] {
|
|
||||||
t.Errorf("With Marshaling: Payload.GetInt(%q) = %v, %v, want %v, nil",
|
|
||||||
tc.key, got, err, tc.data[tc.key])
|
|
||||||
}
|
|
||||||
|
|
||||||
// access non-existent key.
|
|
||||||
got, err = payload.GetInt(tc.nonkey)
|
|
||||||
if err == nil || got != 0 {
|
|
||||||
t.Errorf("Payload.GetInt(%q) = %v, %v; want 0, error",
|
|
||||||
tc.key, got, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPayloadFloat64(t *testing.T) {
|
|
||||||
tests := []payloadTest{
|
|
||||||
{
|
|
||||||
data: map[string]interface{}{"pi": 3.14},
|
|
||||||
key: "pi",
|
|
||||||
nonkey: "unknown",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range tests {
|
|
||||||
payload := Payload{tc.data}
|
|
||||||
|
|
||||||
got, err := payload.GetFloat64(tc.key)
|
|
||||||
if err != nil || got != tc.data[tc.key] {
|
|
||||||
t.Errorf("Payload.GetFloat64(%q) = %v, %v, want %v, nil",
|
|
||||||
tc.key, got, err, tc.data[tc.key])
|
|
||||||
}
|
|
||||||
|
|
||||||
// encode and then decode task messsage.
|
|
||||||
in := h.NewTaskMessage("testing", tc.data)
|
|
||||||
b, err := json.Marshal(in)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
var out base.TaskMessage
|
|
||||||
err = json.Unmarshal(b, &out)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
payload = Payload{out.Payload}
|
|
||||||
got, err = payload.GetFloat64(tc.key)
|
|
||||||
if err != nil || got != tc.data[tc.key] {
|
|
||||||
t.Errorf("With Marshaling: Payload.GetFloat64(%q) = %v, %v, want %v, nil",
|
|
||||||
tc.key, got, err, tc.data[tc.key])
|
|
||||||
}
|
|
||||||
|
|
||||||
// access non-existent key.
|
|
||||||
got, err = payload.GetFloat64(tc.nonkey)
|
|
||||||
if err == nil || got != 0 {
|
|
||||||
t.Errorf("Payload.GetFloat64(%q) = %v, %v; want 0, error",
|
|
||||||
tc.key, got, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPayloadBool(t *testing.T) {
|
|
||||||
tests := []payloadTest{
|
|
||||||
{
|
|
||||||
data: map[string]interface{}{"enabled": true},
|
|
||||||
key: "enabled",
|
|
||||||
nonkey: "unknown",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range tests {
|
|
||||||
payload := Payload{tc.data}
|
|
||||||
|
|
||||||
got, err := payload.GetBool(tc.key)
|
|
||||||
if err != nil || got != tc.data[tc.key] {
|
|
||||||
t.Errorf("Payload.GetBool(%q) = %v, %v, want %v, nil",
|
|
||||||
tc.key, got, err, tc.data[tc.key])
|
|
||||||
}
|
|
||||||
|
|
||||||
// encode and then decode task messsage.
|
|
||||||
in := h.NewTaskMessage("testing", tc.data)
|
|
||||||
b, err := json.Marshal(in)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
var out base.TaskMessage
|
|
||||||
err = json.Unmarshal(b, &out)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
payload = Payload{out.Payload}
|
|
||||||
got, err = payload.GetBool(tc.key)
|
|
||||||
if err != nil || got != tc.data[tc.key] {
|
|
||||||
t.Errorf("With Marshaling: Payload.GetBool(%q) = %v, %v, want %v, nil",
|
|
||||||
tc.key, got, err, tc.data[tc.key])
|
|
||||||
}
|
|
||||||
|
|
||||||
// access non-existent key.
|
|
||||||
got, err = payload.GetBool(tc.nonkey)
|
|
||||||
if err == nil || got != false {
|
|
||||||
t.Errorf("Payload.GetBool(%q) = %v, %v; want false, error",
|
|
||||||
tc.key, got, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPayloadStringSlice(t *testing.T) {
|
|
||||||
tests := []payloadTest{
|
|
||||||
{
|
|
||||||
data: map[string]interface{}{"names": []string{"luke", "rey", "anakin"}},
|
|
||||||
key: "names",
|
|
||||||
nonkey: "unknown",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range tests {
|
|
||||||
payload := Payload{tc.data}
|
|
||||||
|
|
||||||
got, err := payload.GetStringSlice(tc.key)
|
|
||||||
diff := cmp.Diff(got, tc.data[tc.key])
|
|
||||||
if err != nil || diff != "" {
|
|
||||||
t.Errorf("Payload.GetStringSlice(%q) = %v, %v, want %v, nil",
|
|
||||||
tc.key, got, err, tc.data[tc.key])
|
|
||||||
}
|
|
||||||
|
|
||||||
// encode and then decode task messsage.
|
|
||||||
in := h.NewTaskMessage("testing", tc.data)
|
|
||||||
b, err := json.Marshal(in)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
var out base.TaskMessage
|
|
||||||
err = json.Unmarshal(b, &out)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
payload = Payload{out.Payload}
|
|
||||||
got, err = payload.GetStringSlice(tc.key)
|
|
||||||
diff = cmp.Diff(got, tc.data[tc.key])
|
|
||||||
if err != nil || diff != "" {
|
|
||||||
t.Errorf("With Marshaling: Payload.GetStringSlice(%q) = %v, %v, want %v, nil",
|
|
||||||
tc.key, got, err, tc.data[tc.key])
|
|
||||||
}
|
|
||||||
|
|
||||||
// access non-existent key.
|
|
||||||
got, err = payload.GetStringSlice(tc.nonkey)
|
|
||||||
if err == nil || got != nil {
|
|
||||||
t.Errorf("Payload.GetStringSlice(%q) = %v, %v; want nil, error",
|
|
||||||
tc.key, got, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPayloadIntSlice(t *testing.T) {
|
|
||||||
tests := []payloadTest{
|
|
||||||
{
|
|
||||||
data: map[string]interface{}{"nums": []int{9, 8, 7}},
|
|
||||||
key: "nums",
|
|
||||||
nonkey: "unknown",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range tests {
|
|
||||||
payload := Payload{tc.data}
|
|
||||||
|
|
||||||
got, err := payload.GetIntSlice(tc.key)
|
|
||||||
diff := cmp.Diff(got, tc.data[tc.key])
|
|
||||||
if err != nil || diff != "" {
|
|
||||||
t.Errorf("Payload.GetIntSlice(%q) = %v, %v, want %v, nil",
|
|
||||||
tc.key, got, err, tc.data[tc.key])
|
|
||||||
}
|
|
||||||
|
|
||||||
// encode and then decode task messsage.
|
|
||||||
in := h.NewTaskMessage("testing", tc.data)
|
|
||||||
b, err := json.Marshal(in)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
var out base.TaskMessage
|
|
||||||
err = json.Unmarshal(b, &out)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
payload = Payload{out.Payload}
|
|
||||||
got, err = payload.GetIntSlice(tc.key)
|
|
||||||
diff = cmp.Diff(got, tc.data[tc.key])
|
|
||||||
if err != nil || diff != "" {
|
|
||||||
t.Errorf("With Marshaling: Payload.GetIntSlice(%q) = %v, %v, want %v, nil",
|
|
||||||
tc.key, got, err, tc.data[tc.key])
|
|
||||||
}
|
|
||||||
|
|
||||||
// access non-existent key.
|
|
||||||
got, err = payload.GetIntSlice(tc.nonkey)
|
|
||||||
if err == nil || got != nil {
|
|
||||||
t.Errorf("Payload.GetIntSlice(%q) = %v, %v; want nil, error",
|
|
||||||
tc.key, got, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPayloadStringMap(t *testing.T) {
|
|
||||||
tests := []payloadTest{
|
|
||||||
{
|
|
||||||
data: map[string]interface{}{"user": map[string]interface{}{"name": "Jon Doe", "score": 2.2}},
|
|
||||||
key: "user",
|
|
||||||
nonkey: "unknown",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range tests {
|
|
||||||
payload := Payload{tc.data}
|
|
||||||
|
|
||||||
got, err := payload.GetStringMap(tc.key)
|
|
||||||
diff := cmp.Diff(got, tc.data[tc.key])
|
|
||||||
if err != nil || diff != "" {
|
|
||||||
t.Errorf("Payload.GetStringMap(%q) = %v, %v, want %v, nil",
|
|
||||||
tc.key, got, err, tc.data[tc.key])
|
|
||||||
}
|
|
||||||
|
|
||||||
// encode and then decode task messsage.
|
|
||||||
in := h.NewTaskMessage("testing", tc.data)
|
|
||||||
b, err := json.Marshal(in)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
var out base.TaskMessage
|
|
||||||
err = json.Unmarshal(b, &out)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
payload = Payload{out.Payload}
|
|
||||||
got, err = payload.GetStringMap(tc.key)
|
|
||||||
diff = cmp.Diff(got, tc.data[tc.key])
|
|
||||||
if err != nil || diff != "" {
|
|
||||||
t.Errorf("With Marshaling: Payload.GetStringMap(%q) = %v, %v, want %v, nil",
|
|
||||||
tc.key, got, err, tc.data[tc.key])
|
|
||||||
}
|
|
||||||
|
|
||||||
// access non-existent key.
|
|
||||||
got, err = payload.GetStringMap(tc.nonkey)
|
|
||||||
if err == nil || got != nil {
|
|
||||||
t.Errorf("Payload.GetStringMap(%q) = %v, %v; want nil, error",
|
|
||||||
tc.key, got, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPayloadStringMapString(t *testing.T) {
|
|
||||||
tests := []payloadTest{
|
|
||||||
{
|
|
||||||
data: map[string]interface{}{"address": map[string]string{"line": "123 Main St", "city": "San Francisco", "state": "CA"}},
|
|
||||||
key: "address",
|
|
||||||
nonkey: "unknown",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range tests {
|
|
||||||
payload := Payload{tc.data}
|
|
||||||
|
|
||||||
got, err := payload.GetStringMapString(tc.key)
|
|
||||||
diff := cmp.Diff(got, tc.data[tc.key])
|
|
||||||
if err != nil || diff != "" {
|
|
||||||
t.Errorf("Payload.GetStringMapString(%q) = %v, %v, want %v, nil",
|
|
||||||
tc.key, got, err, tc.data[tc.key])
|
|
||||||
}
|
|
||||||
|
|
||||||
// encode and then decode task messsage.
|
|
||||||
in := h.NewTaskMessage("testing", tc.data)
|
|
||||||
b, err := json.Marshal(in)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
var out base.TaskMessage
|
|
||||||
err = json.Unmarshal(b, &out)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
payload = Payload{out.Payload}
|
|
||||||
got, err = payload.GetStringMapString(tc.key)
|
|
||||||
diff = cmp.Diff(got, tc.data[tc.key])
|
|
||||||
if err != nil || diff != "" {
|
|
||||||
t.Errorf("With Marshaling: Payload.GetStringMapString(%q) = %v, %v, want %v, nil",
|
|
||||||
tc.key, got, err, tc.data[tc.key])
|
|
||||||
}
|
|
||||||
|
|
||||||
// access non-existent key.
|
|
||||||
got, err = payload.GetStringMapString(tc.nonkey)
|
|
||||||
if err == nil || got != nil {
|
|
||||||
t.Errorf("Payload.GetStringMapString(%q) = %v, %v; want nil, error",
|
|
||||||
tc.key, got, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPayloadStringMapStringSlice(t *testing.T) {
|
|
||||||
favs := map[string][]string{
|
|
||||||
"movies": {"forrest gump", "star wars"},
|
|
||||||
"tv_shows": {"game of thrones", "HIMYM", "breaking bad"},
|
|
||||||
}
|
|
||||||
tests := []payloadTest{
|
|
||||||
{
|
|
||||||
data: map[string]interface{}{"favorites": favs},
|
|
||||||
key: "favorites",
|
|
||||||
nonkey: "unknown",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range tests {
|
|
||||||
payload := Payload{tc.data}
|
|
||||||
|
|
||||||
got, err := payload.GetStringMapStringSlice(tc.key)
|
|
||||||
diff := cmp.Diff(got, tc.data[tc.key])
|
|
||||||
if err != nil || diff != "" {
|
|
||||||
t.Errorf("Payload.GetStringMapStringSlice(%q) = %v, %v, want %v, nil",
|
|
||||||
tc.key, got, err, tc.data[tc.key])
|
|
||||||
}
|
|
||||||
|
|
||||||
// encode and then decode task messsage.
|
|
||||||
in := h.NewTaskMessage("testing", tc.data)
|
|
||||||
b, err := json.Marshal(in)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
var out base.TaskMessage
|
|
||||||
err = json.Unmarshal(b, &out)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
payload = Payload{out.Payload}
|
|
||||||
got, err = payload.GetStringMapStringSlice(tc.key)
|
|
||||||
diff = cmp.Diff(got, tc.data[tc.key])
|
|
||||||
if err != nil || diff != "" {
|
|
||||||
t.Errorf("With Marshaling: Payload.GetStringMapStringSlice(%q) = %v, %v, want %v, nil",
|
|
||||||
tc.key, got, err, tc.data[tc.key])
|
|
||||||
}
|
|
||||||
|
|
||||||
// access non-existent key.
|
|
||||||
got, err = payload.GetStringMapStringSlice(tc.nonkey)
|
|
||||||
if err == nil || got != nil {
|
|
||||||
t.Errorf("Payload.GetStringMapStringSlice(%q) = %v, %v; want nil, error",
|
|
||||||
tc.key, got, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPayloadStringMapInt(t *testing.T) {
|
|
||||||
counter := map[string]int{
|
|
||||||
"a": 1,
|
|
||||||
"b": 101,
|
|
||||||
"c": 42,
|
|
||||||
}
|
|
||||||
tests := []payloadTest{
|
|
||||||
{
|
|
||||||
data: map[string]interface{}{"counts": counter},
|
|
||||||
key: "counts",
|
|
||||||
nonkey: "unknown",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range tests {
|
|
||||||
payload := Payload{tc.data}
|
|
||||||
|
|
||||||
got, err := payload.GetStringMapInt(tc.key)
|
|
||||||
diff := cmp.Diff(got, tc.data[tc.key])
|
|
||||||
if err != nil || diff != "" {
|
|
||||||
t.Errorf("Payload.GetStringMapInt(%q) = %v, %v, want %v, nil",
|
|
||||||
tc.key, got, err, tc.data[tc.key])
|
|
||||||
}
|
|
||||||
|
|
||||||
// encode and then decode task messsage.
|
|
||||||
in := h.NewTaskMessage("testing", tc.data)
|
|
||||||
b, err := json.Marshal(in)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
var out base.TaskMessage
|
|
||||||
err = json.Unmarshal(b, &out)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
payload = Payload{out.Payload}
|
|
||||||
got, err = payload.GetStringMapInt(tc.key)
|
|
||||||
diff = cmp.Diff(got, tc.data[tc.key])
|
|
||||||
if err != nil || diff != "" {
|
|
||||||
t.Errorf("With Marshaling: Payload.GetStringMapInt(%q) = %v, %v, want %v, nil",
|
|
||||||
tc.key, got, err, tc.data[tc.key])
|
|
||||||
}
|
|
||||||
|
|
||||||
// access non-existent key.
|
|
||||||
got, err = payload.GetStringMapInt(tc.nonkey)
|
|
||||||
if err == nil || got != nil {
|
|
||||||
t.Errorf("Payload.GetStringMapInt(%q) = %v, %v; want nil, error",
|
|
||||||
tc.key, got, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPayloadStringMapBool(t *testing.T) {
|
|
||||||
features := map[string]bool{
|
|
||||||
"A": false,
|
|
||||||
"B": true,
|
|
||||||
"C": true,
|
|
||||||
}
|
|
||||||
tests := []payloadTest{
|
|
||||||
{
|
|
||||||
data: map[string]interface{}{"features": features},
|
|
||||||
key: "features",
|
|
||||||
nonkey: "unknown",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range tests {
|
|
||||||
payload := Payload{tc.data}
|
|
||||||
|
|
||||||
got, err := payload.GetStringMapBool(tc.key)
|
|
||||||
diff := cmp.Diff(got, tc.data[tc.key])
|
|
||||||
if err != nil || diff != "" {
|
|
||||||
t.Errorf("Payload.GetStringMapBool(%q) = %v, %v, want %v, nil",
|
|
||||||
tc.key, got, err, tc.data[tc.key])
|
|
||||||
}
|
|
||||||
|
|
||||||
// encode and then decode task messsage.
|
|
||||||
in := h.NewTaskMessage("testing", tc.data)
|
|
||||||
b, err := json.Marshal(in)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
var out base.TaskMessage
|
|
||||||
err = json.Unmarshal(b, &out)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
payload = Payload{out.Payload}
|
|
||||||
got, err = payload.GetStringMapBool(tc.key)
|
|
||||||
diff = cmp.Diff(got, tc.data[tc.key])
|
|
||||||
if err != nil || diff != "" {
|
|
||||||
t.Errorf("With Marshaling: Payload.GetStringMapBool(%q) = %v, %v, want %v, nil",
|
|
||||||
tc.key, got, err, tc.data[tc.key])
|
|
||||||
}
|
|
||||||
|
|
||||||
// access non-existent key.
|
|
||||||
got, err = payload.GetStringMapBool(tc.nonkey)
|
|
||||||
if err == nil || got != nil {
|
|
||||||
t.Errorf("Payload.GetStringMapBool(%q) = %v, %v; want nil, error",
|
|
||||||
tc.key, got, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPayloadTime(t *testing.T) {
|
|
||||||
tests := []payloadTest{
|
|
||||||
{
|
|
||||||
data: map[string]interface{}{"current": time.Now()},
|
|
||||||
key: "current",
|
|
||||||
nonkey: "unknown",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range tests {
|
|
||||||
payload := Payload{tc.data}
|
|
||||||
|
|
||||||
got, err := payload.GetTime(tc.key)
|
|
||||||
diff := cmp.Diff(got, tc.data[tc.key])
|
|
||||||
if err != nil || diff != "" {
|
|
||||||
t.Errorf("Payload.GetTime(%q) = %v, %v, want %v, nil",
|
|
||||||
tc.key, got, err, tc.data[tc.key])
|
|
||||||
}
|
|
||||||
|
|
||||||
// encode and then decode task messsage.
|
|
||||||
in := h.NewTaskMessage("testing", tc.data)
|
|
||||||
b, err := json.Marshal(in)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
var out base.TaskMessage
|
|
||||||
err = json.Unmarshal(b, &out)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
payload = Payload{out.Payload}
|
|
||||||
got, err = payload.GetTime(tc.key)
|
|
||||||
diff = cmp.Diff(got, tc.data[tc.key])
|
|
||||||
if err != nil || diff != "" {
|
|
||||||
t.Errorf("With Marshaling: Payload.GetTime(%q) = %v, %v, want %v, nil",
|
|
||||||
tc.key, got, err, tc.data[tc.key])
|
|
||||||
}
|
|
||||||
|
|
||||||
// access non-existent key.
|
|
||||||
got, err = payload.GetTime(tc.nonkey)
|
|
||||||
if err == nil || !got.IsZero() {
|
|
||||||
t.Errorf("Payload.GetTime(%q) = %v, %v; want %v, error",
|
|
||||||
tc.key, got, err, time.Time{})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPayloadDuration(t *testing.T) {
|
|
||||||
tests := []payloadTest{
|
|
||||||
{
|
|
||||||
data: map[string]interface{}{"duration": 15 * time.Minute},
|
|
||||||
key: "duration",
|
|
||||||
nonkey: "unknown",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range tests {
|
|
||||||
payload := Payload{tc.data}
|
|
||||||
|
|
||||||
got, err := payload.GetDuration(tc.key)
|
|
||||||
diff := cmp.Diff(got, tc.data[tc.key])
|
|
||||||
if err != nil || diff != "" {
|
|
||||||
t.Errorf("Payload.GetDuration(%q) = %v, %v, want %v, nil",
|
|
||||||
tc.key, got, err, tc.data[tc.key])
|
|
||||||
}
|
|
||||||
|
|
||||||
// encode and then decode task messsage.
|
|
||||||
in := h.NewTaskMessage("testing", tc.data)
|
|
||||||
b, err := json.Marshal(in)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
var out base.TaskMessage
|
|
||||||
err = json.Unmarshal(b, &out)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
payload = Payload{out.Payload}
|
|
||||||
got, err = payload.GetDuration(tc.key)
|
|
||||||
diff = cmp.Diff(got, tc.data[tc.key])
|
|
||||||
if err != nil || diff != "" {
|
|
||||||
t.Errorf("With Marshaling: Payload.GetDuration(%q) = %v, %v, want %v, nil",
|
|
||||||
tc.key, got, err, tc.data[tc.key])
|
|
||||||
}
|
|
||||||
|
|
||||||
// access non-existent key.
|
|
||||||
got, err = payload.GetDuration(tc.nonkey)
|
|
||||||
if err == nil || got != 0 {
|
|
||||||
t.Errorf("Payload.GetDuration(%q) = %v, %v; want %v, error",
|
|
||||||
tc.key, got, err, time.Duration(0))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPayloadHas(t *testing.T) {
|
|
||||||
payload := Payload{map[string]interface{}{
|
|
||||||
"user_id": 123,
|
|
||||||
}}
|
|
||||||
|
|
||||||
if !payload.Has("user_id") {
|
|
||||||
t.Errorf("Payload.Has(%q) = false, want true", "user_id")
|
|
||||||
}
|
|
||||||
if payload.Has("name") {
|
|
||||||
t.Errorf("Payload.Has(%q) = true, want false", "name")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
243
processor.go
243
processor.go
@@ -8,13 +8,16 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
|
"runtime"
|
||||||
|
"runtime/debug"
|
||||||
"sort"
|
"sort"
|
||||||
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/hibiken/asynq/internal/base"
|
"github.com/hibiken/asynq/internal/base"
|
||||||
|
"github.com/hibiken/asynq/internal/errors"
|
||||||
"github.com/hibiken/asynq/internal/log"
|
"github.com/hibiken/asynq/internal/log"
|
||||||
"github.com/hibiken/asynq/internal/rdb"
|
|
||||||
"golang.org/x/time/rate"
|
"golang.org/x/time/rate"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -22,8 +25,6 @@ type processor struct {
|
|||||||
logger *log.Logger
|
logger *log.Logger
|
||||||
broker base.Broker
|
broker base.Broker
|
||||||
|
|
||||||
ss *base.ServerState
|
|
||||||
|
|
||||||
handler Handler
|
handler Handler
|
||||||
|
|
||||||
queueConfig map[string]int
|
queueConfig map[string]int
|
||||||
@@ -31,7 +32,8 @@ type processor struct {
|
|||||||
// orderedQueues is set only in strict-priority mode.
|
// orderedQueues is set only in strict-priority mode.
|
||||||
orderedQueues []string
|
orderedQueues []string
|
||||||
|
|
||||||
retryDelayFunc retryDelayFunc
|
retryDelayFunc RetryDelayFunc
|
||||||
|
isFailureFunc func(error) bool
|
||||||
|
|
||||||
errHandler ErrorHandler
|
errHandler ErrorHandler
|
||||||
|
|
||||||
@@ -52,53 +54,61 @@ type processor struct {
|
|||||||
done chan struct{}
|
done chan struct{}
|
||||||
once sync.Once
|
once sync.Once
|
||||||
|
|
||||||
// abort channel is closed when the shutdown of the "processor" goroutine starts.
|
// quit channel is closed when the shutdown of the "processor" goroutine starts.
|
||||||
abort chan struct{}
|
|
||||||
|
|
||||||
// quit channel communicates to the in-flight worker goroutines to stop.
|
|
||||||
quit chan struct{}
|
quit chan struct{}
|
||||||
|
|
||||||
// cancelations is a set of cancel functions for all in-progress tasks.
|
// abort channel communicates to the in-flight worker goroutines to stop.
|
||||||
cancelations *base.Cancelations
|
abort chan struct{}
|
||||||
}
|
|
||||||
|
|
||||||
type retryDelayFunc func(n int, err error, task *Task) time.Duration
|
// cancelations is a set of cancel functions for all active tasks.
|
||||||
|
cancelations *base.Cancelations
|
||||||
|
|
||||||
|
starting chan<- *workerInfo
|
||||||
|
finished chan<- *base.TaskMessage
|
||||||
|
}
|
||||||
|
|
||||||
type processorParams struct {
|
type processorParams struct {
|
||||||
logger *log.Logger
|
logger *log.Logger
|
||||||
broker base.Broker
|
broker base.Broker
|
||||||
ss *base.ServerState
|
retryDelayFunc RetryDelayFunc
|
||||||
retryDelayFunc retryDelayFunc
|
isFailureFunc func(error) bool
|
||||||
syncCh chan<- *syncRequest
|
syncCh chan<- *syncRequest
|
||||||
cancelations *base.Cancelations
|
cancelations *base.Cancelations
|
||||||
|
concurrency int
|
||||||
|
queues map[string]int
|
||||||
|
strictPriority bool
|
||||||
errHandler ErrorHandler
|
errHandler ErrorHandler
|
||||||
shutdownTimeout time.Duration
|
shutdownTimeout time.Duration
|
||||||
|
starting chan<- *workerInfo
|
||||||
|
finished chan<- *base.TaskMessage
|
||||||
}
|
}
|
||||||
|
|
||||||
// newProcessor constructs a new processor.
|
// newProcessor constructs a new processor.
|
||||||
func newProcessor(params processorParams) *processor {
|
func newProcessor(params processorParams) *processor {
|
||||||
info := params.ss.GetInfo()
|
queues := normalizeQueues(params.queues)
|
||||||
qcfg := normalizeQueueCfg(info.Queues)
|
|
||||||
orderedQueues := []string(nil)
|
orderedQueues := []string(nil)
|
||||||
if info.StrictPriority {
|
if params.strictPriority {
|
||||||
orderedQueues = sortByPriority(qcfg)
|
orderedQueues = sortByPriority(queues)
|
||||||
}
|
}
|
||||||
return &processor{
|
return &processor{
|
||||||
logger: params.logger,
|
logger: params.logger,
|
||||||
broker: params.broker,
|
broker: params.broker,
|
||||||
ss: params.ss,
|
queueConfig: queues,
|
||||||
queueConfig: qcfg,
|
|
||||||
orderedQueues: orderedQueues,
|
orderedQueues: orderedQueues,
|
||||||
retryDelayFunc: params.retryDelayFunc,
|
retryDelayFunc: params.retryDelayFunc,
|
||||||
|
isFailureFunc: params.isFailureFunc,
|
||||||
syncRequestCh: params.syncCh,
|
syncRequestCh: params.syncCh,
|
||||||
cancelations: params.cancelations,
|
cancelations: params.cancelations,
|
||||||
errLogLimiter: rate.NewLimiter(rate.Every(3*time.Second), 1),
|
errLogLimiter: rate.NewLimiter(rate.Every(3*time.Second), 1),
|
||||||
sema: make(chan struct{}, info.Concurrency),
|
sema: make(chan struct{}, params.concurrency),
|
||||||
done: make(chan struct{}),
|
done: make(chan struct{}),
|
||||||
abort: make(chan struct{}),
|
|
||||||
quit: make(chan struct{}),
|
quit: make(chan struct{}),
|
||||||
|
abort: make(chan struct{}),
|
||||||
errHandler: params.errHandler,
|
errHandler: params.errHandler,
|
||||||
handler: HandlerFunc(func(ctx context.Context, t *Task) error { return fmt.Errorf("handler not set") }),
|
handler: HandlerFunc(func(ctx context.Context, t *Task) error { return fmt.Errorf("handler not set") }),
|
||||||
|
shutdownTimeout: params.shutdownTimeout,
|
||||||
|
starting: params.starting,
|
||||||
|
finished: params.finished,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -108,37 +118,28 @@ func (p *processor) stop() {
|
|||||||
p.once.Do(func() {
|
p.once.Do(func() {
|
||||||
p.logger.Debug("Processor shutting down...")
|
p.logger.Debug("Processor shutting down...")
|
||||||
// Unblock if processor is waiting for sema token.
|
// Unblock if processor is waiting for sema token.
|
||||||
close(p.abort)
|
close(p.quit)
|
||||||
// Signal the processor goroutine to stop processing tasks
|
// Signal the processor goroutine to stop processing tasks
|
||||||
// from the queue.
|
// from the queue.
|
||||||
p.done <- struct{}{}
|
p.done <- struct{}{}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// NOTE: once terminated, processor cannot be re-started.
|
// NOTE: once shutdown, processor cannot be re-started.
|
||||||
func (p *processor) terminate() {
|
func (p *processor) shutdown() {
|
||||||
p.stop()
|
p.stop()
|
||||||
|
|
||||||
time.AfterFunc(p.shutdownTimeout, func() { close(p.quit) })
|
time.AfterFunc(p.shutdownTimeout, func() { close(p.abort) })
|
||||||
|
|
||||||
p.logger.Info("Waiting for all workers to finish...")
|
p.logger.Info("Waiting for all workers to finish...")
|
||||||
|
|
||||||
// send cancellation signal to all in-progress task handlers
|
|
||||||
for _, cancel := range p.cancelations.GetAll() {
|
|
||||||
cancel()
|
|
||||||
}
|
|
||||||
|
|
||||||
// block until all workers have released the token
|
// block until all workers have released the token
|
||||||
for i := 0; i < cap(p.sema); i++ {
|
for i := 0; i < cap(p.sema); i++ {
|
||||||
p.sema <- struct{}{}
|
p.sema <- struct{}{}
|
||||||
}
|
}
|
||||||
p.logger.Info("All workers have finished")
|
p.logger.Info("All workers have finished")
|
||||||
p.restore() // move any unfinished tasks back to the queue.
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *processor) start(wg *sync.WaitGroup) {
|
func (p *processor) start(wg *sync.WaitGroup) {
|
||||||
// NOTE: The call to "restore" needs to complete before starting
|
|
||||||
// the processor goroutine.
|
|
||||||
p.restore()
|
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
@@ -157,137 +158,167 @@ func (p *processor) start(wg *sync.WaitGroup) {
|
|||||||
// exec pulls a task out of the queue and starts a worker goroutine to
|
// exec pulls a task out of the queue and starts a worker goroutine to
|
||||||
// process the task.
|
// process the task.
|
||||||
func (p *processor) exec() {
|
func (p *processor) exec() {
|
||||||
|
select {
|
||||||
|
case <-p.quit:
|
||||||
|
return
|
||||||
|
case p.sema <- struct{}{}: // acquire token
|
||||||
qnames := p.queues()
|
qnames := p.queues()
|
||||||
msg, err := p.broker.Dequeue(qnames...)
|
msg, deadline, err := p.broker.Dequeue(qnames...)
|
||||||
switch {
|
switch {
|
||||||
case err == rdb.ErrNoProcessableTask:
|
case errors.Is(err, errors.ErrNoProcessableTask):
|
||||||
// queues are empty, this is a normal behavior.
|
|
||||||
if len(qnames) > 1 {
|
|
||||||
// sleep to avoid slamming redis and let scheduler move tasks into queues.
|
|
||||||
// Note: With multiple queues, we are not using blocking pop operation and
|
|
||||||
// polling queues instead. This adds significant load to redis.
|
|
||||||
time.Sleep(time.Second)
|
|
||||||
}
|
|
||||||
p.logger.Debug("All queues are empty")
|
p.logger.Debug("All queues are empty")
|
||||||
|
// Queues are empty, this is a normal behavior.
|
||||||
|
// Sleep to avoid slamming redis and let scheduler move tasks into queues.
|
||||||
|
// Note: We are not using blocking pop operation and polling queues instead.
|
||||||
|
// This adds significant load to redis.
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
<-p.sema // release token
|
||||||
return
|
return
|
||||||
case err != nil:
|
case err != nil:
|
||||||
if p.errLogLimiter.Allow() {
|
if p.errLogLimiter.Allow() {
|
||||||
p.logger.Errorf("Dequeue error: %v", err)
|
p.logger.Errorf("Dequeue error: %v", err)
|
||||||
}
|
}
|
||||||
|
<-p.sema // release token
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
select {
|
p.starting <- &workerInfo{msg, time.Now(), deadline}
|
||||||
case <-p.abort:
|
|
||||||
// shutdown is starting, return immediately after requeuing the message.
|
|
||||||
p.requeue(msg)
|
|
||||||
return
|
|
||||||
case p.sema <- struct{}{}: // acquire token
|
|
||||||
p.ss.AddWorkerStats(msg, time.Now())
|
|
||||||
go func() {
|
go func() {
|
||||||
defer func() {
|
defer func() {
|
||||||
p.ss.DeleteWorkerStats(msg)
|
p.finished <- msg
|
||||||
<-p.sema // release token
|
<-p.sema // release token
|
||||||
}()
|
}()
|
||||||
|
|
||||||
ctx, cancel := createContext(msg)
|
ctx, cancel := createContext(msg, deadline)
|
||||||
p.cancelations.Add(msg.ID.String(), cancel)
|
p.cancelations.Add(msg.ID.String(), cancel)
|
||||||
defer func() {
|
defer func() {
|
||||||
cancel()
|
cancel()
|
||||||
p.cancelations.Delete(msg.ID.String())
|
p.cancelations.Delete(msg.ID.String())
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
// check context before starting a worker goroutine.
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
// already canceled (e.g. deadline exceeded).
|
||||||
|
p.retryOrArchive(ctx, msg, ctx.Err())
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
resCh := make(chan error, 1)
|
resCh := make(chan error, 1)
|
||||||
task := NewTask(msg.Type, msg.Payload)
|
go func() {
|
||||||
go func() { resCh <- perform(ctx, task, p.handler) }()
|
resCh <- p.perform(ctx, NewTask(msg.Type, msg.Payload))
|
||||||
|
}()
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case <-p.quit:
|
case <-p.abort:
|
||||||
// time is up, quit this worker goroutine.
|
// time is up, push the message back to queue and quit this worker goroutine.
|
||||||
p.logger.Warnf("Quitting worker. task id=%s", msg.ID)
|
p.logger.Warnf("Quitting worker. task id=%s", msg.ID)
|
||||||
|
p.requeue(msg)
|
||||||
|
return
|
||||||
|
case <-ctx.Done():
|
||||||
|
p.retryOrArchive(ctx, msg, ctx.Err())
|
||||||
return
|
return
|
||||||
case resErr := <-resCh:
|
case resErr := <-resCh:
|
||||||
// Note: One of three things should happen.
|
// Note: One of three things should happen.
|
||||||
// 1) Done -> Removes the message from InProgress
|
// 1) Done -> Removes the message from Active
|
||||||
// 2) Retry -> Removes the message from InProgress & Adds the message to Retry
|
// 2) Retry -> Removes the message from Active & Adds the message to Retry
|
||||||
// 3) Kill -> Removes the message from InProgress & Adds the message to Dead
|
// 3) Archive -> Removes the message from Active & Adds the message to archive
|
||||||
if resErr != nil {
|
if resErr != nil {
|
||||||
if p.errHandler != nil {
|
p.retryOrArchive(ctx, msg, resErr)
|
||||||
p.errHandler.HandleError(task, resErr, msg.Retried, msg.Retry)
|
|
||||||
}
|
|
||||||
if msg.Retried >= msg.Retry {
|
|
||||||
p.kill(msg, resErr)
|
|
||||||
} else {
|
|
||||||
p.retry(msg, resErr)
|
|
||||||
}
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
p.markAsDone(msg)
|
p.markAsDone(ctx, msg)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// restore moves all tasks from "in-progress" back to queue
|
|
||||||
// to restore all unfinished tasks.
|
|
||||||
func (p *processor) restore() {
|
|
||||||
n, err := p.broker.RequeueAll()
|
|
||||||
if err != nil {
|
|
||||||
p.logger.Errorf("Could not restore unfinished tasks: %v", err)
|
|
||||||
}
|
|
||||||
if n > 0 {
|
|
||||||
p.logger.Infof("Restored %d unfinished tasks back to queue", n)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *processor) requeue(msg *base.TaskMessage) {
|
func (p *processor) requeue(msg *base.TaskMessage) {
|
||||||
err := p.broker.Requeue(msg)
|
err := p.broker.Requeue(msg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
p.logger.Errorf("Could not push task id=%s back to queue: %v", msg.ID, err)
|
p.logger.Errorf("Could not push task id=%s back to queue: %v", msg.ID, err)
|
||||||
|
} else {
|
||||||
|
p.logger.Infof("Pushed task id=%s back to queue", msg.ID)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *processor) markAsDone(msg *base.TaskMessage) {
|
func (p *processor) markAsDone(ctx context.Context, msg *base.TaskMessage) {
|
||||||
err := p.broker.Done(msg)
|
err := p.broker.Done(msg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errMsg := fmt.Sprintf("Could not remove task id=%s from %q", msg.ID, base.InProgressQueue)
|
errMsg := fmt.Sprintf("Could not remove task id=%s type=%q from %q err: %+v", msg.ID, msg.Type, base.ActiveKey(msg.Queue), err)
|
||||||
|
deadline, ok := ctx.Deadline()
|
||||||
|
if !ok {
|
||||||
|
panic("asynq: internal error: missing deadline in context")
|
||||||
|
}
|
||||||
p.logger.Warnf("%s; Will retry syncing", errMsg)
|
p.logger.Warnf("%s; Will retry syncing", errMsg)
|
||||||
p.syncRequestCh <- &syncRequest{
|
p.syncRequestCh <- &syncRequest{
|
||||||
fn: func() error {
|
fn: func() error {
|
||||||
return p.broker.Done(msg)
|
return p.broker.Done(msg)
|
||||||
},
|
},
|
||||||
errMsg: errMsg,
|
errMsg: errMsg,
|
||||||
|
deadline: deadline,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *processor) retry(msg *base.TaskMessage, e error) {
|
// SkipRetry is used as a return value from Handler.ProcessTask to indicate that
|
||||||
|
// the task should not be retried and should be archived instead.
|
||||||
|
var SkipRetry = errors.New("skip retry for the task")
|
||||||
|
|
||||||
|
func (p *processor) retryOrArchive(ctx context.Context, msg *base.TaskMessage, err error) {
|
||||||
|
if p.errHandler != nil {
|
||||||
|
p.errHandler.HandleError(ctx, NewTask(msg.Type, msg.Payload), err)
|
||||||
|
}
|
||||||
|
if !p.isFailureFunc(err) {
|
||||||
|
// retry the task without marking it as failed
|
||||||
|
p.retry(ctx, msg, err, false /*isFailure*/)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if msg.Retried >= msg.Retry || errors.Is(err, SkipRetry) {
|
||||||
|
p.logger.Warnf("Retry exhausted for task id=%s", msg.ID)
|
||||||
|
p.archive(ctx, msg, err)
|
||||||
|
} else {
|
||||||
|
p.retry(ctx, msg, err, true /*isFailure*/)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *processor) retry(ctx context.Context, msg *base.TaskMessage, e error, isFailure bool) {
|
||||||
d := p.retryDelayFunc(msg.Retried, e, NewTask(msg.Type, msg.Payload))
|
d := p.retryDelayFunc(msg.Retried, e, NewTask(msg.Type, msg.Payload))
|
||||||
retryAt := time.Now().Add(d)
|
retryAt := time.Now().Add(d)
|
||||||
err := p.broker.Retry(msg, retryAt, e.Error())
|
err := p.broker.Retry(msg, retryAt, e.Error(), isFailure)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errMsg := fmt.Sprintf("Could not move task id=%s from %q to %q", msg.ID, base.InProgressQueue, base.RetryQueue)
|
errMsg := fmt.Sprintf("Could not move task id=%s from %q to %q", msg.ID, base.ActiveKey(msg.Queue), base.RetryKey(msg.Queue))
|
||||||
|
deadline, ok := ctx.Deadline()
|
||||||
|
if !ok {
|
||||||
|
panic("asynq: internal error: missing deadline in context")
|
||||||
|
}
|
||||||
p.logger.Warnf("%s; Will retry syncing", errMsg)
|
p.logger.Warnf("%s; Will retry syncing", errMsg)
|
||||||
p.syncRequestCh <- &syncRequest{
|
p.syncRequestCh <- &syncRequest{
|
||||||
fn: func() error {
|
fn: func() error {
|
||||||
return p.broker.Retry(msg, retryAt, e.Error())
|
return p.broker.Retry(msg, retryAt, e.Error(), isFailure)
|
||||||
},
|
},
|
||||||
errMsg: errMsg,
|
errMsg: errMsg,
|
||||||
|
deadline: deadline,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *processor) kill(msg *base.TaskMessage, e error) {
|
func (p *processor) archive(ctx context.Context, msg *base.TaskMessage, e error) {
|
||||||
p.logger.Warnf("Retry exhausted for task id=%s", msg.ID)
|
err := p.broker.Archive(msg, e.Error())
|
||||||
err := p.broker.Kill(msg, e.Error())
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errMsg := fmt.Sprintf("Could not move task id=%s from %q to %q", msg.ID, base.InProgressQueue, base.DeadQueue)
|
errMsg := fmt.Sprintf("Could not move task id=%s from %q to %q", msg.ID, base.ActiveKey(msg.Queue), base.ArchivedKey(msg.Queue))
|
||||||
|
deadline, ok := ctx.Deadline()
|
||||||
|
if !ok {
|
||||||
|
panic("asynq: internal error: missing deadline in context")
|
||||||
|
}
|
||||||
p.logger.Warnf("%s; Will retry syncing", errMsg)
|
p.logger.Warnf("%s; Will retry syncing", errMsg)
|
||||||
p.syncRequestCh <- &syncRequest{
|
p.syncRequestCh <- &syncRequest{
|
||||||
fn: func() error {
|
fn: func() error {
|
||||||
return p.broker.Kill(msg, e.Error())
|
return p.broker.Archive(msg, e.Error())
|
||||||
},
|
},
|
||||||
errMsg: errMsg,
|
errMsg: errMsg,
|
||||||
|
deadline: deadline,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -322,13 +353,26 @@ func (p *processor) queues() []string {
|
|||||||
// perform calls the handler with the given task.
|
// perform calls the handler with the given task.
|
||||||
// If the call returns without panic, it simply returns the value,
|
// If the call returns without panic, it simply returns the value,
|
||||||
// otherwise, it recovers from panic and returns an error.
|
// otherwise, it recovers from panic and returns an error.
|
||||||
func perform(ctx context.Context, task *Task, h Handler) (err error) {
|
func (p *processor) perform(ctx context.Context, task *Task) (err error) {
|
||||||
defer func() {
|
defer func() {
|
||||||
if x := recover(); x != nil {
|
if x := recover(); x != nil {
|
||||||
|
p.logger.Errorf("recovering from panic. See the stack trace below for details:\n%s", string(debug.Stack()))
|
||||||
|
_, file, line, ok := runtime.Caller(1) // skip the first frame (panic itself)
|
||||||
|
if ok && strings.Contains(file, "runtime/") {
|
||||||
|
// The panic came from the runtime, most likely due to incorrect
|
||||||
|
// map/slice usage. The parent frame should have the real trigger.
|
||||||
|
_, file, line, ok = runtime.Caller(2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Include the file and line number info in the error, if runtime.Caller returned ok.
|
||||||
|
if ok {
|
||||||
|
err = fmt.Errorf("panic [%s:%d]: %v", file, line, x)
|
||||||
|
} else {
|
||||||
err = fmt.Errorf("panic: %v", x)
|
err = fmt.Errorf("panic: %v", x)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
}()
|
}()
|
||||||
return h.ProcessTask(ctx, task)
|
return p.handler.ProcessTask(ctx, task)
|
||||||
}
|
}
|
||||||
|
|
||||||
// uniq dedupes elements and returns a slice of unique names of length l.
|
// uniq dedupes elements and returns a slice of unique names of length l.
|
||||||
@@ -374,16 +418,15 @@ func (x byPriority) Len() int { return len(x) }
|
|||||||
func (x byPriority) Less(i, j int) bool { return x[i].priority < x[j].priority }
|
func (x byPriority) Less(i, j int) bool { return x[i].priority < x[j].priority }
|
||||||
func (x byPriority) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
func (x byPriority) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||||
|
|
||||||
// normalizeQueueCfg divides priority numbers by their
|
// normalizeQueues divides priority numbers by their greatest common divisor.
|
||||||
// greatest common divisor.
|
func normalizeQueues(queues map[string]int) map[string]int {
|
||||||
func normalizeQueueCfg(queueCfg map[string]int) map[string]int {
|
|
||||||
var xs []int
|
var xs []int
|
||||||
for _, x := range queueCfg {
|
for _, x := range queues {
|
||||||
xs = append(xs, x)
|
xs = append(xs, x)
|
||||||
}
|
}
|
||||||
d := gcd(xs...)
|
d := gcd(xs...)
|
||||||
res := make(map[string]int)
|
res := make(map[string]int)
|
||||||
for q, x := range queueCfg {
|
for q, x := range queues {
|
||||||
res[q] = x / d
|
res[q] = x / d
|
||||||
}
|
}
|
||||||
return res
|
return res
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ package asynq
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"sort"
|
"sort"
|
||||||
"sync"
|
"sync"
|
||||||
@@ -13,20 +14,43 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/google/go-cmp/cmp"
|
"github.com/google/go-cmp/cmp"
|
||||||
"github.com/google/go-cmp/cmp/cmpopts"
|
|
||||||
h "github.com/hibiken/asynq/internal/asynqtest"
|
h "github.com/hibiken/asynq/internal/asynqtest"
|
||||||
"github.com/hibiken/asynq/internal/base"
|
"github.com/hibiken/asynq/internal/base"
|
||||||
"github.com/hibiken/asynq/internal/rdb"
|
"github.com/hibiken/asynq/internal/rdb"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestProcessorSuccess(t *testing.T) {
|
// fakeHeartbeater receives from starting and finished channels and do nothing.
|
||||||
|
func fakeHeartbeater(starting <-chan *workerInfo, finished <-chan *base.TaskMessage, done <-chan struct{}) {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-starting:
|
||||||
|
case <-finished:
|
||||||
|
case <-done:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// fakeSyncer receives from sync channel and do nothing.
|
||||||
|
func fakeSyncer(syncCh <-chan *syncRequest, done <-chan struct{}) {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-syncCh:
|
||||||
|
case <-done:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProcessorSuccessWithSingleQueue(t *testing.T) {
|
||||||
r := setup(t)
|
r := setup(t)
|
||||||
|
defer r.Close()
|
||||||
rdbClient := rdb.NewRDB(r)
|
rdbClient := rdb.NewRDB(r)
|
||||||
|
|
||||||
m1 := h.NewTaskMessage("send_email", nil)
|
m1 := h.NewTaskMessage("task1", nil)
|
||||||
m2 := h.NewTaskMessage("gen_thumbnail", nil)
|
m2 := h.NewTaskMessage("task2", nil)
|
||||||
m3 := h.NewTaskMessage("reindex", nil)
|
m3 := h.NewTaskMessage("task3", nil)
|
||||||
m4 := h.NewTaskMessage("sync", nil)
|
m4 := h.NewTaskMessage("task4", nil)
|
||||||
|
|
||||||
t1 := NewTask(m1.Type, m1.Payload)
|
t1 := NewTask(m1.Type, m1.Payload)
|
||||||
t2 := NewTask(m2.Type, m2.Payload)
|
t2 := NewTask(m2.Type, m2.Payload)
|
||||||
@@ -34,17 +58,17 @@ func TestProcessorSuccess(t *testing.T) {
|
|||||||
t4 := NewTask(m4.Type, m4.Payload)
|
t4 := NewTask(m4.Type, m4.Payload)
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
enqueued []*base.TaskMessage // initial default queue state
|
pending []*base.TaskMessage // initial default queue state
|
||||||
incoming []*base.TaskMessage // tasks to be enqueued during run
|
incoming []*base.TaskMessage // tasks to be enqueued during run
|
||||||
wantProcessed []*Task // tasks to be processed at the end
|
wantProcessed []*Task // tasks to be processed at the end
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
enqueued: []*base.TaskMessage{m1},
|
pending: []*base.TaskMessage{m1},
|
||||||
incoming: []*base.TaskMessage{m2, m3, m4},
|
incoming: []*base.TaskMessage{m2, m3, m4},
|
||||||
wantProcessed: []*Task{t1, t2, t3, t4},
|
wantProcessed: []*Task{t1, t2, t3, t4},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
enqueued: []*base.TaskMessage{},
|
pending: []*base.TaskMessage{},
|
||||||
incoming: []*base.TaskMessage{m1},
|
incoming: []*base.TaskMessage{m1},
|
||||||
wantProcessed: []*Task{t1},
|
wantProcessed: []*Task{t1},
|
||||||
},
|
},
|
||||||
@@ -52,7 +76,7 @@ func TestProcessorSuccess(t *testing.T) {
|
|||||||
|
|
||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
h.FlushDB(t, r) // clean up db before each test case.
|
h.FlushDB(t, r) // clean up db before each test case.
|
||||||
h.SeedEnqueuedQueue(t, r, tc.enqueued) // initialize default queue.
|
h.SeedPendingQueue(t, r, tc.pending, base.DefaultQueueName) // initialize default queue.
|
||||||
|
|
||||||
// instantiate a new processor
|
// instantiate a new processor
|
||||||
var mu sync.Mutex
|
var mu sync.Mutex
|
||||||
@@ -63,16 +87,27 @@ func TestProcessorSuccess(t *testing.T) {
|
|||||||
processed = append(processed, task)
|
processed = append(processed, task)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
ss := base.NewServerState("localhost", 1234, 10, defaultQueueConfig, false)
|
starting := make(chan *workerInfo)
|
||||||
|
finished := make(chan *base.TaskMessage)
|
||||||
|
syncCh := make(chan *syncRequest)
|
||||||
|
done := make(chan struct{})
|
||||||
|
defer func() { close(done) }()
|
||||||
|
go fakeHeartbeater(starting, finished, done)
|
||||||
|
go fakeSyncer(syncCh, done)
|
||||||
p := newProcessor(processorParams{
|
p := newProcessor(processorParams{
|
||||||
logger: testLogger,
|
logger: testLogger,
|
||||||
broker: rdbClient,
|
broker: rdbClient,
|
||||||
ss: ss,
|
retryDelayFunc: DefaultRetryDelayFunc,
|
||||||
retryDelayFunc: defaultDelayFunc,
|
isFailureFunc: defaultIsFailureFunc,
|
||||||
syncCh: nil,
|
syncCh: syncCh,
|
||||||
cancelations: base.NewCancelations(),
|
cancelations: base.NewCancelations(),
|
||||||
|
concurrency: 10,
|
||||||
|
queues: defaultQueueConfig,
|
||||||
|
strictPriority: false,
|
||||||
errHandler: nil,
|
errHandler: nil,
|
||||||
shutdownTimeout: defaultShutdownTimeout,
|
shutdownTimeout: defaultShutdownTimeout,
|
||||||
|
starting: starting,
|
||||||
|
finished: finished,
|
||||||
})
|
})
|
||||||
p.handler = HandlerFunc(handler)
|
p.handler = HandlerFunc(handler)
|
||||||
|
|
||||||
@@ -80,25 +115,200 @@ func TestProcessorSuccess(t *testing.T) {
|
|||||||
for _, msg := range tc.incoming {
|
for _, msg := range tc.incoming {
|
||||||
err := rdbClient.Enqueue(msg)
|
err := rdbClient.Enqueue(msg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
p.terminate()
|
p.shutdown()
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
time.Sleep(time.Second) // wait for one second to allow all enqueued tasks to be processed.
|
time.Sleep(2 * time.Second) // wait for two second to allow all pending tasks to be processed.
|
||||||
p.terminate()
|
if l := r.LLen(base.ActiveKey(base.DefaultQueueName)).Val(); l != 0 {
|
||||||
|
t.Errorf("%q has %d tasks, want 0", base.ActiveKey(base.DefaultQueueName), l)
|
||||||
|
}
|
||||||
|
p.shutdown()
|
||||||
|
|
||||||
if diff := cmp.Diff(tc.wantProcessed, processed, sortTaskOpt, cmp.AllowUnexported(Payload{})); diff != "" {
|
mu.Lock()
|
||||||
|
if diff := cmp.Diff(tc.wantProcessed, processed, sortTaskOpt, cmp.AllowUnexported(Task{})); diff != "" {
|
||||||
t.Errorf("mismatch found in processed tasks; (-want, +got)\n%s", diff)
|
t.Errorf("mismatch found in processed tasks; (-want, +got)\n%s", diff)
|
||||||
}
|
}
|
||||||
|
mu.Unlock()
|
||||||
if l := r.LLen(base.InProgressQueue).Val(); l != 0 {
|
|
||||||
t.Errorf("%q has %d tasks, want 0", base.InProgressQueue, l)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestProcessorSuccessWithMultipleQueues(t *testing.T) {
|
||||||
|
var (
|
||||||
|
r = setup(t)
|
||||||
|
rdbClient = rdb.NewRDB(r)
|
||||||
|
|
||||||
|
m1 = h.NewTaskMessage("task1", nil)
|
||||||
|
m2 = h.NewTaskMessage("task2", nil)
|
||||||
|
m3 = h.NewTaskMessageWithQueue("task3", nil, "high")
|
||||||
|
m4 = h.NewTaskMessageWithQueue("task4", nil, "low")
|
||||||
|
|
||||||
|
t1 = NewTask(m1.Type, m1.Payload)
|
||||||
|
t2 = NewTask(m2.Type, m2.Payload)
|
||||||
|
t3 = NewTask(m3.Type, m3.Payload)
|
||||||
|
t4 = NewTask(m4.Type, m4.Payload)
|
||||||
|
)
|
||||||
|
defer r.Close()
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
pending map[string][]*base.TaskMessage
|
||||||
|
queues []string // list of queues to consume the tasks from
|
||||||
|
wantProcessed []*Task // tasks to be processed at the end
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
pending: map[string][]*base.TaskMessage{
|
||||||
|
"default": {m1, m2},
|
||||||
|
"high": {m3},
|
||||||
|
"low": {m4},
|
||||||
|
},
|
||||||
|
queues: []string{"default", "high", "low"},
|
||||||
|
wantProcessed: []*Task{t1, t2, t3, t4},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
// Set up test case.
|
||||||
|
h.FlushDB(t, r)
|
||||||
|
h.SeedAllPendingQueues(t, r, tc.pending)
|
||||||
|
|
||||||
|
// Instantiate a new processor.
|
||||||
|
var mu sync.Mutex
|
||||||
|
var processed []*Task
|
||||||
|
handler := func(ctx context.Context, task *Task) error {
|
||||||
|
mu.Lock()
|
||||||
|
defer mu.Unlock()
|
||||||
|
processed = append(processed, task)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
starting := make(chan *workerInfo)
|
||||||
|
finished := make(chan *base.TaskMessage)
|
||||||
|
syncCh := make(chan *syncRequest)
|
||||||
|
done := make(chan struct{})
|
||||||
|
defer func() { close(done) }()
|
||||||
|
go fakeHeartbeater(starting, finished, done)
|
||||||
|
go fakeSyncer(syncCh, done)
|
||||||
|
p := newProcessor(processorParams{
|
||||||
|
logger: testLogger,
|
||||||
|
broker: rdbClient,
|
||||||
|
retryDelayFunc: DefaultRetryDelayFunc,
|
||||||
|
isFailureFunc: defaultIsFailureFunc,
|
||||||
|
syncCh: syncCh,
|
||||||
|
cancelations: base.NewCancelations(),
|
||||||
|
concurrency: 10,
|
||||||
|
queues: map[string]int{
|
||||||
|
"default": 2,
|
||||||
|
"high": 3,
|
||||||
|
"low": 1,
|
||||||
|
},
|
||||||
|
strictPriority: false,
|
||||||
|
errHandler: nil,
|
||||||
|
shutdownTimeout: defaultShutdownTimeout,
|
||||||
|
starting: starting,
|
||||||
|
finished: finished,
|
||||||
|
})
|
||||||
|
p.handler = HandlerFunc(handler)
|
||||||
|
|
||||||
|
p.start(&sync.WaitGroup{})
|
||||||
|
// Wait for two second to allow all pending tasks to be processed.
|
||||||
|
time.Sleep(2 * time.Second)
|
||||||
|
// Make sure no messages are stuck in active list.
|
||||||
|
for _, qname := range tc.queues {
|
||||||
|
if l := r.LLen(base.ActiveKey(qname)).Val(); l != 0 {
|
||||||
|
t.Errorf("%q has %d tasks, want 0", base.ActiveKey(qname), l)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
p.shutdown()
|
||||||
|
|
||||||
|
mu.Lock()
|
||||||
|
if diff := cmp.Diff(tc.wantProcessed, processed, sortTaskOpt, cmp.AllowUnexported(Task{})); diff != "" {
|
||||||
|
t.Errorf("mismatch found in processed tasks; (-want, +got)\n%s", diff)
|
||||||
|
}
|
||||||
|
mu.Unlock()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// https://github.com/hibiken/asynq/issues/166
|
||||||
|
func TestProcessTasksWithLargeNumberInPayload(t *testing.T) {
|
||||||
|
r := setup(t)
|
||||||
|
defer r.Close()
|
||||||
|
rdbClient := rdb.NewRDB(r)
|
||||||
|
|
||||||
|
m1 := h.NewTaskMessage("large_number", h.JSON(map[string]interface{}{"data": 111111111111111111}))
|
||||||
|
t1 := NewTask(m1.Type, m1.Payload)
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
pending []*base.TaskMessage // initial default queue state
|
||||||
|
wantProcessed []*Task // tasks to be processed at the end
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
pending: []*base.TaskMessage{m1},
|
||||||
|
wantProcessed: []*Task{t1},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
h.FlushDB(t, r) // clean up db before each test case.
|
||||||
|
h.SeedPendingQueue(t, r, tc.pending, base.DefaultQueueName) // initialize default queue.
|
||||||
|
|
||||||
|
var mu sync.Mutex
|
||||||
|
var processed []*Task
|
||||||
|
handler := func(ctx context.Context, task *Task) error {
|
||||||
|
mu.Lock()
|
||||||
|
defer mu.Unlock()
|
||||||
|
var payload map[string]int
|
||||||
|
if err := json.Unmarshal(task.Payload(), &payload); err != nil {
|
||||||
|
t.Errorf("coult not decode payload: %v", err)
|
||||||
|
}
|
||||||
|
if data, ok := payload["data"]; ok {
|
||||||
|
t.Logf("data == %d", data)
|
||||||
|
} else {
|
||||||
|
t.Errorf("could not get data from payload")
|
||||||
|
}
|
||||||
|
processed = append(processed, task)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
starting := make(chan *workerInfo)
|
||||||
|
finished := make(chan *base.TaskMessage)
|
||||||
|
syncCh := make(chan *syncRequest)
|
||||||
|
done := make(chan struct{})
|
||||||
|
defer func() { close(done) }()
|
||||||
|
go fakeHeartbeater(starting, finished, done)
|
||||||
|
go fakeSyncer(syncCh, done)
|
||||||
|
p := newProcessor(processorParams{
|
||||||
|
logger: testLogger,
|
||||||
|
broker: rdbClient,
|
||||||
|
retryDelayFunc: DefaultRetryDelayFunc,
|
||||||
|
isFailureFunc: defaultIsFailureFunc,
|
||||||
|
syncCh: syncCh,
|
||||||
|
cancelations: base.NewCancelations(),
|
||||||
|
concurrency: 10,
|
||||||
|
queues: defaultQueueConfig,
|
||||||
|
strictPriority: false,
|
||||||
|
errHandler: nil,
|
||||||
|
shutdownTimeout: defaultShutdownTimeout,
|
||||||
|
starting: starting,
|
||||||
|
finished: finished,
|
||||||
|
})
|
||||||
|
p.handler = HandlerFunc(handler)
|
||||||
|
|
||||||
|
p.start(&sync.WaitGroup{})
|
||||||
|
time.Sleep(2 * time.Second) // wait for two second to allow all pending tasks to be processed.
|
||||||
|
if l := r.LLen(base.ActiveKey(base.DefaultQueueName)).Val(); l != 0 {
|
||||||
|
t.Errorf("%q has %d tasks, want 0", base.ActiveKey(base.DefaultQueueName), l)
|
||||||
|
}
|
||||||
|
p.shutdown()
|
||||||
|
|
||||||
|
mu.Lock()
|
||||||
|
if diff := cmp.Diff(tc.wantProcessed, processed, sortTaskOpt, cmp.AllowUnexported(Task{})); diff != "" {
|
||||||
|
t.Errorf("mismatch found in processed tasks; (-want, +got)\n%s", diff)
|
||||||
|
}
|
||||||
|
mu.Unlock()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestProcessorRetry(t *testing.T) {
|
func TestProcessorRetry(t *testing.T) {
|
||||||
r := setup(t)
|
r := setup(t)
|
||||||
|
defer r.Close()
|
||||||
rdbClient := rdb.NewRDB(r)
|
rdbClient := rdb.NewRDB(r)
|
||||||
|
|
||||||
m1 := h.NewTaskMessage("send_email", nil)
|
m1 := h.NewTaskMessage("send_email", nil)
|
||||||
@@ -108,52 +318,63 @@ func TestProcessorRetry(t *testing.T) {
|
|||||||
m4 := h.NewTaskMessage("sync", nil)
|
m4 := h.NewTaskMessage("sync", nil)
|
||||||
|
|
||||||
errMsg := "something went wrong"
|
errMsg := "something went wrong"
|
||||||
// r* is m* after retry
|
wrappedSkipRetry := fmt.Errorf("%s:%w", errMsg, SkipRetry)
|
||||||
r1 := *m1
|
|
||||||
r1.ErrorMsg = errMsg
|
|
||||||
r2 := *m2
|
|
||||||
r2.ErrorMsg = errMsg
|
|
||||||
r2.Retried = m2.Retried + 1
|
|
||||||
r3 := *m3
|
|
||||||
r3.ErrorMsg = errMsg
|
|
||||||
r3.Retried = m3.Retried + 1
|
|
||||||
r4 := *m4
|
|
||||||
r4.ErrorMsg = errMsg
|
|
||||||
r4.Retried = m4.Retried + 1
|
|
||||||
|
|
||||||
now := time.Now()
|
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
enqueued []*base.TaskMessage // initial default queue state
|
desc string // test description
|
||||||
incoming []*base.TaskMessage // tasks to be enqueued during run
|
pending []*base.TaskMessage // initial default queue state
|
||||||
delay time.Duration // retry delay duration
|
delay time.Duration // retry delay duration
|
||||||
handler Handler // task handler
|
handler Handler // task handler
|
||||||
wait time.Duration // wait duration between starting and stopping processor for this test case
|
wait time.Duration // wait duration between starting and stopping processor for this test case
|
||||||
wantRetry []h.ZSetEntry // tasks in retry queue at the end
|
wantErrMsg string // error message the task should record
|
||||||
wantDead []*base.TaskMessage // tasks in dead queue at the end
|
wantRetry []*base.TaskMessage // tasks in retry queue at the end
|
||||||
|
wantArchived []*base.TaskMessage // tasks in archived queue at the end
|
||||||
wantErrCount int // number of times error handler should be called
|
wantErrCount int // number of times error handler should be called
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
enqueued: []*base.TaskMessage{m1, m2},
|
desc: "Should automatically retry errored tasks",
|
||||||
incoming: []*base.TaskMessage{m3, m4},
|
pending: []*base.TaskMessage{m1, m2, m3, m4},
|
||||||
delay: time.Minute,
|
delay: time.Minute,
|
||||||
handler: HandlerFunc(func(ctx context.Context, task *Task) error {
|
handler: HandlerFunc(func(ctx context.Context, task *Task) error {
|
||||||
return fmt.Errorf(errMsg)
|
return fmt.Errorf(errMsg)
|
||||||
}),
|
}),
|
||||||
wait: time.Second,
|
wait: 2 * time.Second,
|
||||||
wantRetry: []h.ZSetEntry{
|
wantErrMsg: errMsg,
|
||||||
{Msg: &r2, Score: float64(now.Add(time.Minute).Unix())},
|
wantRetry: []*base.TaskMessage{m2, m3, m4},
|
||||||
{Msg: &r3, Score: float64(now.Add(time.Minute).Unix())},
|
wantArchived: []*base.TaskMessage{m1},
|
||||||
{Msg: &r4, Score: float64(now.Add(time.Minute).Unix())},
|
|
||||||
},
|
|
||||||
wantDead: []*base.TaskMessage{&r1},
|
|
||||||
wantErrCount: 4,
|
wantErrCount: 4,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
desc: "Should skip retry errored tasks",
|
||||||
|
pending: []*base.TaskMessage{m1, m2},
|
||||||
|
delay: time.Minute,
|
||||||
|
handler: HandlerFunc(func(ctx context.Context, task *Task) error {
|
||||||
|
return SkipRetry // return SkipRetry without wrapping
|
||||||
|
}),
|
||||||
|
wait: 2 * time.Second,
|
||||||
|
wantErrMsg: SkipRetry.Error(),
|
||||||
|
wantRetry: []*base.TaskMessage{},
|
||||||
|
wantArchived: []*base.TaskMessage{m1, m2},
|
||||||
|
wantErrCount: 2, // ErrorHandler should still be called with SkipRetry error
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "Should skip retry errored tasks (with error wrapping)",
|
||||||
|
pending: []*base.TaskMessage{m1, m2},
|
||||||
|
delay: time.Minute,
|
||||||
|
handler: HandlerFunc(func(ctx context.Context, task *Task) error {
|
||||||
|
return wrappedSkipRetry
|
||||||
|
}),
|
||||||
|
wait: 2 * time.Second,
|
||||||
|
wantErrMsg: wrappedSkipRetry.Error(),
|
||||||
|
wantRetry: []*base.TaskMessage{},
|
||||||
|
wantArchived: []*base.TaskMessage{m1, m2},
|
||||||
|
wantErrCount: 2, // ErrorHandler should still be called with SkipRetry error
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
h.FlushDB(t, r) // clean up db before each test case.
|
h.FlushDB(t, r) // clean up db before each test case.
|
||||||
h.SeedEnqueuedQueue(t, r, tc.enqueued) // initialize default queue.
|
h.SeedPendingQueue(t, r, tc.pending, base.DefaultQueueName) // initialize default queue.
|
||||||
|
|
||||||
// instantiate a new processor
|
// instantiate a new processor
|
||||||
delayFunc := func(n int, e error, t *Task) time.Duration {
|
delayFunc := func(n int, e error, t *Task) time.Duration {
|
||||||
@@ -163,48 +384,67 @@ func TestProcessorRetry(t *testing.T) {
|
|||||||
mu sync.Mutex // guards n
|
mu sync.Mutex // guards n
|
||||||
n int // number of times error handler is called
|
n int // number of times error handler is called
|
||||||
)
|
)
|
||||||
errHandler := func(t *Task, err error, retried, maxRetry int) {
|
errHandler := func(ctx context.Context, t *Task, err error) {
|
||||||
mu.Lock()
|
mu.Lock()
|
||||||
defer mu.Unlock()
|
defer mu.Unlock()
|
||||||
n++
|
n++
|
||||||
}
|
}
|
||||||
ss := base.NewServerState("localhost", 1234, 10, defaultQueueConfig, false)
|
starting := make(chan *workerInfo)
|
||||||
|
finished := make(chan *base.TaskMessage)
|
||||||
|
done := make(chan struct{})
|
||||||
|
defer func() { close(done) }()
|
||||||
|
go fakeHeartbeater(starting, finished, done)
|
||||||
p := newProcessor(processorParams{
|
p := newProcessor(processorParams{
|
||||||
logger: testLogger,
|
logger: testLogger,
|
||||||
broker: rdbClient,
|
broker: rdbClient,
|
||||||
ss: ss,
|
|
||||||
retryDelayFunc: delayFunc,
|
retryDelayFunc: delayFunc,
|
||||||
|
isFailureFunc: defaultIsFailureFunc,
|
||||||
syncCh: nil,
|
syncCh: nil,
|
||||||
cancelations: base.NewCancelations(),
|
cancelations: base.NewCancelations(),
|
||||||
|
concurrency: 10,
|
||||||
|
queues: defaultQueueConfig,
|
||||||
|
strictPriority: false,
|
||||||
errHandler: ErrorHandlerFunc(errHandler),
|
errHandler: ErrorHandlerFunc(errHandler),
|
||||||
shutdownTimeout: defaultShutdownTimeout,
|
shutdownTimeout: defaultShutdownTimeout,
|
||||||
|
starting: starting,
|
||||||
|
finished: finished,
|
||||||
})
|
})
|
||||||
p.handler = tc.handler
|
p.handler = tc.handler
|
||||||
|
|
||||||
p.start(&sync.WaitGroup{})
|
p.start(&sync.WaitGroup{})
|
||||||
for _, msg := range tc.incoming {
|
runTime := time.Now() // time when processor is running
|
||||||
err := rdbClient.Enqueue(msg)
|
time.Sleep(tc.wait) // FIXME: This makes test flaky.
|
||||||
if err != nil {
|
p.shutdown()
|
||||||
p.terminate()
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
time.Sleep(tc.wait)
|
|
||||||
p.terminate()
|
|
||||||
|
|
||||||
cmpOpt := cmpopts.EquateApprox(0, float64(time.Second)) // allow up to a second difference in zset score
|
cmpOpt := h.EquateInt64Approx(int64(tc.wait.Seconds())) // allow up to a wait-second difference in zset score
|
||||||
gotRetry := h.GetRetryEntries(t, r)
|
gotRetry := h.GetRetryEntries(t, r, base.DefaultQueueName)
|
||||||
if diff := cmp.Diff(tc.wantRetry, gotRetry, h.SortZSetEntryOpt, cmpOpt); diff != "" {
|
var wantRetry []base.Z // Note: construct wantRetry here since `LastFailedAt` and ZSCORE is relative to each test run.
|
||||||
t.Errorf("mismatch found in %q after running processor; (-want, +got)\n%s", base.RetryQueue, diff)
|
for _, msg := range tc.wantRetry {
|
||||||
|
wantRetry = append(wantRetry,
|
||||||
|
base.Z{
|
||||||
|
Message: h.TaskMessageAfterRetry(*msg, tc.wantErrMsg, runTime),
|
||||||
|
Score: runTime.Add(tc.delay).Unix(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if diff := cmp.Diff(wantRetry, gotRetry, h.SortZSetEntryOpt, cmpOpt); diff != "" {
|
||||||
|
t.Errorf("%s: mismatch found in %q after running processor; (-want, +got)\n%s", tc.desc, base.RetryKey(base.DefaultQueueName), diff)
|
||||||
}
|
}
|
||||||
|
|
||||||
gotDead := h.GetDeadMessages(t, r)
|
gotArchived := h.GetArchivedEntries(t, r, base.DefaultQueueName)
|
||||||
if diff := cmp.Diff(tc.wantDead, gotDead, h.SortMsgOpt); diff != "" {
|
var wantArchived []base.Z // Note: construct wantArchived here since `LastFailedAt` and ZSCORE is relative to each test run.
|
||||||
t.Errorf("mismatch found in %q after running processor; (-want, +got)\n%s", base.DeadQueue, diff)
|
for _, msg := range tc.wantArchived {
|
||||||
|
wantArchived = append(wantArchived,
|
||||||
|
base.Z{
|
||||||
|
Message: h.TaskMessageWithError(*msg, tc.wantErrMsg, runTime),
|
||||||
|
Score: runTime.Unix(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if diff := cmp.Diff(wantArchived, gotArchived, h.SortZSetEntryOpt, cmpOpt); diff != "" {
|
||||||
|
t.Errorf("%s: mismatch found in %q after running processor; (-want, +got)\n%s", tc.desc, base.ArchivedKey(base.DefaultQueueName), diff)
|
||||||
}
|
}
|
||||||
|
|
||||||
if l := r.LLen(base.InProgressQueue).Val(); l != 0 {
|
if l := r.LLen(base.ActiveKey(base.DefaultQueueName)).Val(); l != 0 {
|
||||||
t.Errorf("%q has %d tasks, want 0", base.InProgressQueue, l)
|
t.Errorf("%s: %q has %d tasks, want 0", base.ActiveKey(base.DefaultQueueName), tc.desc, l)
|
||||||
}
|
}
|
||||||
|
|
||||||
if n != tc.wantErrCount {
|
if n != tc.wantErrCount {
|
||||||
@@ -241,16 +481,25 @@ func TestProcessorQueues(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
ss := base.NewServerState("localhost", 1234, 10, tc.queueCfg, false)
|
starting := make(chan *workerInfo)
|
||||||
|
finished := make(chan *base.TaskMessage)
|
||||||
|
done := make(chan struct{})
|
||||||
|
defer func() { close(done) }()
|
||||||
|
go fakeHeartbeater(starting, finished, done)
|
||||||
p := newProcessor(processorParams{
|
p := newProcessor(processorParams{
|
||||||
logger: testLogger,
|
logger: testLogger,
|
||||||
broker: nil,
|
broker: nil,
|
||||||
ss: ss,
|
retryDelayFunc: DefaultRetryDelayFunc,
|
||||||
retryDelayFunc: defaultDelayFunc,
|
isFailureFunc: defaultIsFailureFunc,
|
||||||
syncCh: nil,
|
syncCh: nil,
|
||||||
cancelations: base.NewCancelations(),
|
cancelations: base.NewCancelations(),
|
||||||
|
concurrency: 10,
|
||||||
|
queues: tc.queueCfg,
|
||||||
|
strictPriority: false,
|
||||||
errHandler: nil,
|
errHandler: nil,
|
||||||
shutdownTimeout: defaultShutdownTimeout,
|
shutdownTimeout: defaultShutdownTimeout,
|
||||||
|
starting: starting,
|
||||||
|
finished: finished,
|
||||||
})
|
})
|
||||||
got := p.queues()
|
got := p.queues()
|
||||||
if diff := cmp.Diff(tc.want, got, sortOpt); diff != "" {
|
if diff := cmp.Diff(tc.want, got, sortOpt); diff != "" {
|
||||||
@@ -261,36 +510,42 @@ func TestProcessorQueues(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestProcessorWithStrictPriority(t *testing.T) {
|
func TestProcessorWithStrictPriority(t *testing.T) {
|
||||||
r := setup(t)
|
var (
|
||||||
rdbClient := rdb.NewRDB(r)
|
r = setup(t)
|
||||||
|
|
||||||
m1 := h.NewTaskMessage("send_email", nil)
|
rdbClient = rdb.NewRDB(r)
|
||||||
m2 := h.NewTaskMessage("send_email", nil)
|
|
||||||
m3 := h.NewTaskMessage("send_email", nil)
|
|
||||||
m4 := h.NewTaskMessage("gen_thumbnail", nil)
|
|
||||||
m5 := h.NewTaskMessage("gen_thumbnail", nil)
|
|
||||||
m6 := h.NewTaskMessage("sync", nil)
|
|
||||||
m7 := h.NewTaskMessage("sync", nil)
|
|
||||||
|
|
||||||
t1 := NewTask(m1.Type, m1.Payload)
|
m1 = h.NewTaskMessageWithQueue("task1", nil, "critical")
|
||||||
t2 := NewTask(m2.Type, m2.Payload)
|
m2 = h.NewTaskMessageWithQueue("task2", nil, "critical")
|
||||||
t3 := NewTask(m3.Type, m3.Payload)
|
m3 = h.NewTaskMessageWithQueue("task3", nil, "critical")
|
||||||
t4 := NewTask(m4.Type, m4.Payload)
|
m4 = h.NewTaskMessageWithQueue("task4", nil, base.DefaultQueueName)
|
||||||
t5 := NewTask(m5.Type, m5.Payload)
|
m5 = h.NewTaskMessageWithQueue("task5", nil, base.DefaultQueueName)
|
||||||
t6 := NewTask(m6.Type, m6.Payload)
|
m6 = h.NewTaskMessageWithQueue("task6", nil, "low")
|
||||||
t7 := NewTask(m7.Type, m7.Payload)
|
m7 = h.NewTaskMessageWithQueue("task7", nil, "low")
|
||||||
|
|
||||||
|
t1 = NewTask(m1.Type, m1.Payload)
|
||||||
|
t2 = NewTask(m2.Type, m2.Payload)
|
||||||
|
t3 = NewTask(m3.Type, m3.Payload)
|
||||||
|
t4 = NewTask(m4.Type, m4.Payload)
|
||||||
|
t5 = NewTask(m5.Type, m5.Payload)
|
||||||
|
t6 = NewTask(m6.Type, m6.Payload)
|
||||||
|
t7 = NewTask(m7.Type, m7.Payload)
|
||||||
|
)
|
||||||
|
defer r.Close()
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
enqueued map[string][]*base.TaskMessage // initial queues state
|
pending map[string][]*base.TaskMessage // initial queues state
|
||||||
|
queues []string // list of queues to consume tasks from
|
||||||
wait time.Duration // wait duration between starting and stopping processor for this test case
|
wait time.Duration // wait duration between starting and stopping processor for this test case
|
||||||
wantProcessed []*Task // tasks to be processed at the end
|
wantProcessed []*Task // tasks to be processed at the end
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
enqueued: map[string][]*base.TaskMessage{
|
pending: map[string][]*base.TaskMessage{
|
||||||
base.DefaultQueueName: {m4, m5},
|
base.DefaultQueueName: {m4, m5},
|
||||||
"critical": {m1, m2, m3},
|
"critical": {m1, m2, m3},
|
||||||
"low": {m6, m7},
|
"low": {m6, m7},
|
||||||
},
|
},
|
||||||
|
queues: []string{base.DefaultQueueName, "critical", "low"},
|
||||||
wait: time.Second,
|
wait: time.Second,
|
||||||
wantProcessed: []*Task{t1, t2, t3, t4, t5, t6, t7},
|
wantProcessed: []*Task{t1, t2, t3, t4, t5, t6, t7},
|
||||||
},
|
},
|
||||||
@@ -298,8 +553,8 @@ func TestProcessorWithStrictPriority(t *testing.T) {
|
|||||||
|
|
||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
h.FlushDB(t, r) // clean up db before each test case.
|
h.FlushDB(t, r) // clean up db before each test case.
|
||||||
for qname, msgs := range tc.enqueued {
|
for qname, msgs := range tc.pending {
|
||||||
h.SeedEnqueuedQueue(t, r, msgs, qname)
|
h.SeedPendingQueue(t, r, msgs, qname)
|
||||||
}
|
}
|
||||||
|
|
||||||
// instantiate a new processor
|
// instantiate a new processor
|
||||||
@@ -312,39 +567,52 @@ func TestProcessorWithStrictPriority(t *testing.T) {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
queueCfg := map[string]int{
|
queueCfg := map[string]int{
|
||||||
"critical": 3,
|
|
||||||
base.DefaultQueueName: 2,
|
base.DefaultQueueName: 2,
|
||||||
|
"critical": 3,
|
||||||
"low": 1,
|
"low": 1,
|
||||||
}
|
}
|
||||||
// Note: Set concurrency to 1 to make sure tasks are processed one at a time.
|
starting := make(chan *workerInfo)
|
||||||
ss := base.NewServerState("localhost", 1234, 1 /* concurrency */, queueCfg, true /*strict*/)
|
finished := make(chan *base.TaskMessage)
|
||||||
|
syncCh := make(chan *syncRequest)
|
||||||
|
done := make(chan struct{})
|
||||||
|
defer func() { close(done) }()
|
||||||
|
go fakeHeartbeater(starting, finished, done)
|
||||||
|
go fakeSyncer(syncCh, done)
|
||||||
p := newProcessor(processorParams{
|
p := newProcessor(processorParams{
|
||||||
logger: testLogger,
|
logger: testLogger,
|
||||||
broker: rdbClient,
|
broker: rdbClient,
|
||||||
ss: ss,
|
retryDelayFunc: DefaultRetryDelayFunc,
|
||||||
retryDelayFunc: defaultDelayFunc,
|
isFailureFunc: defaultIsFailureFunc,
|
||||||
syncCh: nil,
|
syncCh: syncCh,
|
||||||
cancelations: base.NewCancelations(),
|
cancelations: base.NewCancelations(),
|
||||||
|
concurrency: 1, // Set concurrency to 1 to make sure tasks are processed one at a time.
|
||||||
|
queues: queueCfg,
|
||||||
|
strictPriority: true,
|
||||||
errHandler: nil,
|
errHandler: nil,
|
||||||
shutdownTimeout: defaultShutdownTimeout,
|
shutdownTimeout: defaultShutdownTimeout,
|
||||||
|
starting: starting,
|
||||||
|
finished: finished,
|
||||||
})
|
})
|
||||||
p.handler = HandlerFunc(handler)
|
p.handler = HandlerFunc(handler)
|
||||||
|
|
||||||
p.start(&sync.WaitGroup{})
|
p.start(&sync.WaitGroup{})
|
||||||
time.Sleep(tc.wait)
|
time.Sleep(tc.wait)
|
||||||
p.terminate()
|
// Make sure no tasks are stuck in active list.
|
||||||
|
for _, qname := range tc.queues {
|
||||||
|
if l := r.LLen(base.ActiveKey(qname)).Val(); l != 0 {
|
||||||
|
t.Errorf("%q has %d tasks, want 0", base.ActiveKey(qname), l)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
p.shutdown()
|
||||||
|
|
||||||
if diff := cmp.Diff(tc.wantProcessed, processed, cmp.AllowUnexported(Payload{})); diff != "" {
|
if diff := cmp.Diff(tc.wantProcessed, processed, sortTaskOpt, cmp.AllowUnexported(Task{})); diff != "" {
|
||||||
t.Errorf("mismatch found in processed tasks; (-want, +got)\n%s", diff)
|
t.Errorf("mismatch found in processed tasks; (-want, +got)\n%s", diff)
|
||||||
}
|
}
|
||||||
|
|
||||||
if l := r.LLen(base.InProgressQueue).Val(); l != 0 {
|
|
||||||
t.Errorf("%q has %d tasks, want 0", base.InProgressQueue, l)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPerform(t *testing.T) {
|
func TestProcessorPerform(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
desc string
|
desc string
|
||||||
handler HandlerFunc
|
handler HandlerFunc
|
||||||
@@ -356,7 +624,7 @@ func TestPerform(t *testing.T) {
|
|||||||
handler: func(ctx context.Context, t *Task) error {
|
handler: func(ctx context.Context, t *Task) error {
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
task: NewTask("gen_thumbnail", map[string]interface{}{"src": "some/img/path"}),
|
task: NewTask("gen_thumbnail", h.JSON(map[string]interface{}{"src": "some/img/path"})),
|
||||||
wantErr: false,
|
wantErr: false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -364,7 +632,7 @@ func TestPerform(t *testing.T) {
|
|||||||
handler: func(ctx context.Context, t *Task) error {
|
handler: func(ctx context.Context, t *Task) error {
|
||||||
return fmt.Errorf("something went wrong")
|
return fmt.Errorf("something went wrong")
|
||||||
},
|
},
|
||||||
task: NewTask("gen_thumbnail", map[string]interface{}{"src": "some/img/path"}),
|
task: NewTask("gen_thumbnail", h.JSON(map[string]interface{}{"src": "some/img/path"})),
|
||||||
wantErr: true,
|
wantErr: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -372,13 +640,20 @@ func TestPerform(t *testing.T) {
|
|||||||
handler: func(ctx context.Context, t *Task) error {
|
handler: func(ctx context.Context, t *Task) error {
|
||||||
panic("something went terribly wrong")
|
panic("something went terribly wrong")
|
||||||
},
|
},
|
||||||
task: NewTask("gen_thumbnail", map[string]interface{}{"src": "some/img/path"}),
|
task: NewTask("gen_thumbnail", h.JSON(map[string]interface{}{"src": "some/img/path"})),
|
||||||
wantErr: true,
|
wantErr: true,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
// Note: We don't need to fully initialize the processor since we are only testing
|
||||||
|
// perform method.
|
||||||
|
p := newProcessor(processorParams{
|
||||||
|
logger: testLogger,
|
||||||
|
queues: defaultQueueConfig,
|
||||||
|
})
|
||||||
|
|
||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
got := perform(context.Background(), tc.task, tc.handler)
|
p.handler = tc.handler
|
||||||
|
got := p.perform(context.Background(), tc.task)
|
||||||
if !tc.wantErr && got != nil {
|
if !tc.wantErr && got != nil {
|
||||||
t.Errorf("%s: perform() = %v, want nil", tc.desc, got)
|
t.Errorf("%s: perform() = %v, want nil", tc.desc, got)
|
||||||
continue
|
continue
|
||||||
@@ -412,7 +687,7 @@ func TestGCD(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNormalizeQueueCfg(t *testing.T) {
|
func TestNormalizeQueues(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
input map[string]int
|
input map[string]int
|
||||||
want map[string]int
|
want map[string]int
|
||||||
@@ -462,9 +737,9 @@ func TestNormalizeQueueCfg(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
got := normalizeQueueCfg(tc.input)
|
got := normalizeQueues(tc.input)
|
||||||
if diff := cmp.Diff(tc.want, got); diff != "" {
|
if diff := cmp.Diff(tc.want, got); diff != "" {
|
||||||
t.Errorf("normalizeQueueCfg(%v) = %v, want %v; (-want, +got):\n%s",
|
t.Errorf("normalizeQueues(%v) = %v, want %v; (-want, +got):\n%s",
|
||||||
tc.input, got, tc.want, diff)
|
tc.input, got, tc.want, diff)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
108
recoverer.go
Normal file
108
recoverer.go
Normal file
@@ -0,0 +1,108 @@
|
|||||||
|
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license
|
||||||
|
// that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package asynq
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/hibiken/asynq/internal/base"
|
||||||
|
"github.com/hibiken/asynq/internal/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
type recoverer struct {
|
||||||
|
logger *log.Logger
|
||||||
|
broker base.Broker
|
||||||
|
retryDelayFunc RetryDelayFunc
|
||||||
|
isFailureFunc func(error) bool
|
||||||
|
|
||||||
|
// channel to communicate back to the long running "recoverer" goroutine.
|
||||||
|
done chan struct{}
|
||||||
|
|
||||||
|
// list of queues to check for deadline.
|
||||||
|
queues []string
|
||||||
|
|
||||||
|
// poll interval.
|
||||||
|
interval time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
type recovererParams struct {
|
||||||
|
logger *log.Logger
|
||||||
|
broker base.Broker
|
||||||
|
queues []string
|
||||||
|
interval time.Duration
|
||||||
|
retryDelayFunc RetryDelayFunc
|
||||||
|
isFailureFunc func(error) bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func newRecoverer(params recovererParams) *recoverer {
|
||||||
|
return &recoverer{
|
||||||
|
logger: params.logger,
|
||||||
|
broker: params.broker,
|
||||||
|
done: make(chan struct{}),
|
||||||
|
queues: params.queues,
|
||||||
|
interval: params.interval,
|
||||||
|
retryDelayFunc: params.retryDelayFunc,
|
||||||
|
isFailureFunc: params.isFailureFunc,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *recoverer) shutdown() {
|
||||||
|
r.logger.Debug("Recoverer shutting down...")
|
||||||
|
// Signal the recoverer goroutine to stop polling.
|
||||||
|
r.done <- struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *recoverer) start(wg *sync.WaitGroup) {
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
r.recover()
|
||||||
|
timer := time.NewTimer(r.interval)
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-r.done:
|
||||||
|
r.logger.Debug("Recoverer done")
|
||||||
|
timer.Stop()
|
||||||
|
return
|
||||||
|
case <-timer.C:
|
||||||
|
r.recover()
|
||||||
|
timer.Reset(r.interval)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *recoverer) recover() {
|
||||||
|
// Get all tasks which have expired 30 seconds ago or earlier.
|
||||||
|
deadline := time.Now().Add(-30 * time.Second)
|
||||||
|
msgs, err := r.broker.ListDeadlineExceeded(deadline, r.queues...)
|
||||||
|
if err != nil {
|
||||||
|
r.logger.Warn("recoverer: could not list deadline exceeded tasks")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for _, msg := range msgs {
|
||||||
|
if msg.Retried >= msg.Retry {
|
||||||
|
r.archive(msg, context.DeadlineExceeded)
|
||||||
|
} else {
|
||||||
|
r.retry(msg, context.DeadlineExceeded)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *recoverer) retry(msg *base.TaskMessage, err error) {
|
||||||
|
delay := r.retryDelayFunc(msg.Retried, err, NewTask(msg.Type, msg.Payload))
|
||||||
|
retryAt := time.Now().Add(delay)
|
||||||
|
if err := r.broker.Retry(msg, retryAt, err.Error(), r.isFailureFunc(err)); err != nil {
|
||||||
|
r.logger.Warnf("recoverer: could not retry deadline exceeded task: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *recoverer) archive(msg *base.TaskMessage, err error) {
|
||||||
|
if err := r.broker.Archive(msg, err.Error()); err != nil {
|
||||||
|
r.logger.Warnf("recoverer: could not move task to archive: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
280
recoverer_test.go
Normal file
280
recoverer_test.go
Normal file
@@ -0,0 +1,280 @@
|
|||||||
|
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license
|
||||||
|
// that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package asynq
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/google/go-cmp/cmp"
|
||||||
|
h "github.com/hibiken/asynq/internal/asynqtest"
|
||||||
|
"github.com/hibiken/asynq/internal/base"
|
||||||
|
"github.com/hibiken/asynq/internal/rdb"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestRecoverer(t *testing.T) {
|
||||||
|
r := setup(t)
|
||||||
|
defer r.Close()
|
||||||
|
rdbClient := rdb.NewRDB(r)
|
||||||
|
|
||||||
|
t1 := h.NewTaskMessageWithQueue("task1", nil, "default")
|
||||||
|
t2 := h.NewTaskMessageWithQueue("task2", nil, "default")
|
||||||
|
t3 := h.NewTaskMessageWithQueue("task3", nil, "critical")
|
||||||
|
t4 := h.NewTaskMessageWithQueue("task4", nil, "default")
|
||||||
|
t4.Retried = t4.Retry // t4 has reached its max retry count
|
||||||
|
|
||||||
|
now := time.Now()
|
||||||
|
oneHourFromNow := now.Add(1 * time.Hour)
|
||||||
|
fiveMinutesFromNow := now.Add(5 * time.Minute)
|
||||||
|
fiveMinutesAgo := now.Add(-5 * time.Minute)
|
||||||
|
oneHourAgo := now.Add(-1 * time.Hour)
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
desc string
|
||||||
|
inProgress map[string][]*base.TaskMessage
|
||||||
|
deadlines map[string][]base.Z
|
||||||
|
retry map[string][]base.Z
|
||||||
|
archived map[string][]base.Z
|
||||||
|
wantActive map[string][]*base.TaskMessage
|
||||||
|
wantDeadlines map[string][]base.Z
|
||||||
|
wantRetry map[string][]*base.TaskMessage
|
||||||
|
wantArchived map[string][]*base.TaskMessage
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
desc: "with one active task",
|
||||||
|
inProgress: map[string][]*base.TaskMessage{
|
||||||
|
"default": {t1},
|
||||||
|
},
|
||||||
|
deadlines: map[string][]base.Z{
|
||||||
|
"default": {{Message: t1, Score: fiveMinutesAgo.Unix()}},
|
||||||
|
},
|
||||||
|
retry: map[string][]base.Z{
|
||||||
|
"default": {},
|
||||||
|
},
|
||||||
|
archived: map[string][]base.Z{
|
||||||
|
"default": {},
|
||||||
|
},
|
||||||
|
wantActive: map[string][]*base.TaskMessage{
|
||||||
|
"default": {},
|
||||||
|
},
|
||||||
|
wantDeadlines: map[string][]base.Z{
|
||||||
|
"default": {},
|
||||||
|
},
|
||||||
|
wantRetry: map[string][]*base.TaskMessage{
|
||||||
|
"default": {t1},
|
||||||
|
},
|
||||||
|
wantArchived: map[string][]*base.TaskMessage{
|
||||||
|
"default": {},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "with a task with max-retry reached",
|
||||||
|
inProgress: map[string][]*base.TaskMessage{
|
||||||
|
"default": {t4},
|
||||||
|
"critical": {},
|
||||||
|
},
|
||||||
|
deadlines: map[string][]base.Z{
|
||||||
|
"default": {{Message: t4, Score: fiveMinutesAgo.Unix()}},
|
||||||
|
"critical": {},
|
||||||
|
},
|
||||||
|
retry: map[string][]base.Z{
|
||||||
|
"default": {},
|
||||||
|
"critical": {},
|
||||||
|
},
|
||||||
|
archived: map[string][]base.Z{
|
||||||
|
"default": {},
|
||||||
|
"critical": {},
|
||||||
|
},
|
||||||
|
wantActive: map[string][]*base.TaskMessage{
|
||||||
|
"default": {},
|
||||||
|
"critical": {},
|
||||||
|
},
|
||||||
|
wantDeadlines: map[string][]base.Z{
|
||||||
|
"default": {},
|
||||||
|
"critical": {},
|
||||||
|
},
|
||||||
|
wantRetry: map[string][]*base.TaskMessage{
|
||||||
|
"default": {},
|
||||||
|
"critical": {},
|
||||||
|
},
|
||||||
|
wantArchived: map[string][]*base.TaskMessage{
|
||||||
|
"default": {t4},
|
||||||
|
"critical": {},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "with multiple active tasks, and one expired",
|
||||||
|
inProgress: map[string][]*base.TaskMessage{
|
||||||
|
"default": {t1, t2},
|
||||||
|
"critical": {t3},
|
||||||
|
},
|
||||||
|
deadlines: map[string][]base.Z{
|
||||||
|
"default": {
|
||||||
|
{Message: t1, Score: oneHourAgo.Unix()},
|
||||||
|
{Message: t2, Score: fiveMinutesFromNow.Unix()},
|
||||||
|
},
|
||||||
|
"critical": {
|
||||||
|
{Message: t3, Score: oneHourFromNow.Unix()},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
retry: map[string][]base.Z{
|
||||||
|
"default": {},
|
||||||
|
"critical": {},
|
||||||
|
},
|
||||||
|
archived: map[string][]base.Z{
|
||||||
|
"default": {},
|
||||||
|
"critical": {},
|
||||||
|
},
|
||||||
|
wantActive: map[string][]*base.TaskMessage{
|
||||||
|
"default": {t2},
|
||||||
|
"critical": {t3},
|
||||||
|
},
|
||||||
|
wantDeadlines: map[string][]base.Z{
|
||||||
|
"default": {{Message: t2, Score: fiveMinutesFromNow.Unix()}},
|
||||||
|
"critical": {{Message: t3, Score: oneHourFromNow.Unix()}},
|
||||||
|
},
|
||||||
|
wantRetry: map[string][]*base.TaskMessage{
|
||||||
|
"default": {t1},
|
||||||
|
"critical": {},
|
||||||
|
},
|
||||||
|
wantArchived: map[string][]*base.TaskMessage{
|
||||||
|
"default": {},
|
||||||
|
"critical": {},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "with multiple expired active tasks",
|
||||||
|
inProgress: map[string][]*base.TaskMessage{
|
||||||
|
"default": {t1, t2},
|
||||||
|
"critical": {t3},
|
||||||
|
},
|
||||||
|
deadlines: map[string][]base.Z{
|
||||||
|
"default": {
|
||||||
|
{Message: t1, Score: oneHourAgo.Unix()},
|
||||||
|
{Message: t2, Score: oneHourFromNow.Unix()},
|
||||||
|
},
|
||||||
|
"critical": {
|
||||||
|
{Message: t3, Score: fiveMinutesAgo.Unix()},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
retry: map[string][]base.Z{
|
||||||
|
"default": {},
|
||||||
|
"cricial": {},
|
||||||
|
},
|
||||||
|
archived: map[string][]base.Z{
|
||||||
|
"default": {},
|
||||||
|
"cricial": {},
|
||||||
|
},
|
||||||
|
wantActive: map[string][]*base.TaskMessage{
|
||||||
|
"default": {t2},
|
||||||
|
"critical": {},
|
||||||
|
},
|
||||||
|
wantDeadlines: map[string][]base.Z{
|
||||||
|
"default": {{Message: t2, Score: oneHourFromNow.Unix()}},
|
||||||
|
},
|
||||||
|
wantRetry: map[string][]*base.TaskMessage{
|
||||||
|
"default": {t1},
|
||||||
|
"critical": {t3},
|
||||||
|
},
|
||||||
|
wantArchived: map[string][]*base.TaskMessage{
|
||||||
|
"default": {},
|
||||||
|
"critical": {},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "with empty active queue",
|
||||||
|
inProgress: map[string][]*base.TaskMessage{
|
||||||
|
"default": {},
|
||||||
|
"critical": {},
|
||||||
|
},
|
||||||
|
deadlines: map[string][]base.Z{
|
||||||
|
"default": {},
|
||||||
|
"critical": {},
|
||||||
|
},
|
||||||
|
retry: map[string][]base.Z{
|
||||||
|
"default": {},
|
||||||
|
"critical": {},
|
||||||
|
},
|
||||||
|
archived: map[string][]base.Z{
|
||||||
|
"default": {},
|
||||||
|
"critical": {},
|
||||||
|
},
|
||||||
|
wantActive: map[string][]*base.TaskMessage{
|
||||||
|
"default": {},
|
||||||
|
"critical": {},
|
||||||
|
},
|
||||||
|
wantDeadlines: map[string][]base.Z{
|
||||||
|
"default": {},
|
||||||
|
"critical": {},
|
||||||
|
},
|
||||||
|
wantRetry: map[string][]*base.TaskMessage{
|
||||||
|
"default": {},
|
||||||
|
"critical": {},
|
||||||
|
},
|
||||||
|
wantArchived: map[string][]*base.TaskMessage{
|
||||||
|
"default": {},
|
||||||
|
"critical": {},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
h.FlushDB(t, r)
|
||||||
|
h.SeedAllActiveQueues(t, r, tc.inProgress)
|
||||||
|
h.SeedAllDeadlines(t, r, tc.deadlines)
|
||||||
|
h.SeedAllRetryQueues(t, r, tc.retry)
|
||||||
|
h.SeedAllArchivedQueues(t, r, tc.archived)
|
||||||
|
|
||||||
|
recoverer := newRecoverer(recovererParams{
|
||||||
|
logger: testLogger,
|
||||||
|
broker: rdbClient,
|
||||||
|
queues: []string{"default", "critical"},
|
||||||
|
interval: 1 * time.Second,
|
||||||
|
retryDelayFunc: func(n int, err error, task *Task) time.Duration { return 30 * time.Second },
|
||||||
|
isFailureFunc: defaultIsFailureFunc,
|
||||||
|
})
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
recoverer.start(&wg)
|
||||||
|
runTime := time.Now() // time when recoverer is running
|
||||||
|
time.Sleep(2 * time.Second)
|
||||||
|
recoverer.shutdown()
|
||||||
|
|
||||||
|
for qname, want := range tc.wantActive {
|
||||||
|
gotActive := h.GetActiveMessages(t, r, qname)
|
||||||
|
if diff := cmp.Diff(want, gotActive, h.SortMsgOpt); diff != "" {
|
||||||
|
t.Errorf("%s; mismatch found in %q; (-want,+got)\n%s", tc.desc, base.ActiveKey(qname), diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for qname, want := range tc.wantDeadlines {
|
||||||
|
gotDeadlines := h.GetDeadlinesEntries(t, r, qname)
|
||||||
|
if diff := cmp.Diff(want, gotDeadlines, h.SortZSetEntryOpt); diff != "" {
|
||||||
|
t.Errorf("%s; mismatch found in %q; (-want,+got)\n%s", tc.desc, base.DeadlinesKey(qname), diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cmpOpt := h.EquateInt64Approx(2) // allow up to two-second difference in `LastFailedAt`
|
||||||
|
for qname, msgs := range tc.wantRetry {
|
||||||
|
gotRetry := h.GetRetryMessages(t, r, qname)
|
||||||
|
var wantRetry []*base.TaskMessage // Note: construct message here since `LastFailedAt` is relative to each test run
|
||||||
|
for _, msg := range msgs {
|
||||||
|
wantRetry = append(wantRetry, h.TaskMessageAfterRetry(*msg, "context deadline exceeded", runTime))
|
||||||
|
}
|
||||||
|
if diff := cmp.Diff(wantRetry, gotRetry, h.SortMsgOpt, cmpOpt); diff != "" {
|
||||||
|
t.Errorf("%s; mismatch found in %q: (-want, +got)\n%s", tc.desc, base.RetryKey(qname), diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for qname, msgs := range tc.wantArchived {
|
||||||
|
gotArchived := h.GetArchivedMessages(t, r, qname)
|
||||||
|
var wantArchived []*base.TaskMessage
|
||||||
|
for _, msg := range msgs {
|
||||||
|
wantArchived = append(wantArchived, h.TaskMessageWithError(*msg, "context deadline exceeded", runTime))
|
||||||
|
}
|
||||||
|
if diff := cmp.Diff(wantArchived, gotArchived, h.SortMsgOpt, cmpOpt); diff != "" {
|
||||||
|
t.Errorf("%s; mismatch found in %q: (-want, +got)\n%s", tc.desc, base.ArchivedKey(qname), diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
279
scheduler.go
279
scheduler.go
@@ -5,73 +5,274 @@
|
|||||||
package asynq
|
package asynq
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/go-redis/redis/v7"
|
||||||
|
"github.com/google/uuid"
|
||||||
"github.com/hibiken/asynq/internal/base"
|
"github.com/hibiken/asynq/internal/base"
|
||||||
"github.com/hibiken/asynq/internal/log"
|
"github.com/hibiken/asynq/internal/log"
|
||||||
|
"github.com/hibiken/asynq/internal/rdb"
|
||||||
|
"github.com/robfig/cron/v3"
|
||||||
)
|
)
|
||||||
|
|
||||||
type scheduler struct {
|
// A Scheduler kicks off tasks at regular intervals based on the user defined schedule.
|
||||||
|
//
|
||||||
|
// Schedulers are safe for concurrent use by multiple goroutines.
|
||||||
|
type Scheduler struct {
|
||||||
|
id string
|
||||||
|
state *base.ServerState
|
||||||
logger *log.Logger
|
logger *log.Logger
|
||||||
broker base.Broker
|
client *Client
|
||||||
|
rdb *rdb.RDB
|
||||||
// channel to communicate back to the long running "scheduler" goroutine.
|
cron *cron.Cron
|
||||||
|
location *time.Location
|
||||||
done chan struct{}
|
done chan struct{}
|
||||||
|
wg sync.WaitGroup
|
||||||
|
errHandler func(task *Task, opts []Option, err error)
|
||||||
|
|
||||||
// poll interval on average
|
// guards idmap
|
||||||
avgInterval time.Duration
|
mu sync.Mutex
|
||||||
|
// idmap maps Scheduler's entry ID to cron.EntryID
|
||||||
// list of queues to move the tasks into.
|
// to avoid using cron.EntryID as the public API of
|
||||||
qnames []string
|
// the Scheduler.
|
||||||
|
idmap map[string]cron.EntryID
|
||||||
}
|
}
|
||||||
|
|
||||||
type schedulerParams struct {
|
// NewScheduler returns a new Scheduler instance given the redis connection option.
|
||||||
logger *log.Logger
|
// The parameter opts is optional, defaults will be used if opts is set to nil
|
||||||
broker base.Broker
|
func NewScheduler(r RedisConnOpt, opts *SchedulerOpts) *Scheduler {
|
||||||
interval time.Duration
|
c, ok := r.MakeRedisClient().(redis.UniversalClient)
|
||||||
queues map[string]int
|
if !ok {
|
||||||
|
panic(fmt.Sprintf("asynq: unsupported RedisConnOpt type %T", r))
|
||||||
|
}
|
||||||
|
if opts == nil {
|
||||||
|
opts = &SchedulerOpts{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func newScheduler(params schedulerParams) *scheduler {
|
logger := log.NewLogger(opts.Logger)
|
||||||
var qnames []string
|
loglevel := opts.LogLevel
|
||||||
for q := range params.queues {
|
if loglevel == level_unspecified {
|
||||||
qnames = append(qnames, q)
|
loglevel = InfoLevel
|
||||||
}
|
}
|
||||||
return &scheduler{
|
logger.SetLevel(toInternalLogLevel(loglevel))
|
||||||
logger: params.logger,
|
|
||||||
broker: params.broker,
|
loc := opts.Location
|
||||||
|
if loc == nil {
|
||||||
|
loc = time.UTC
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Scheduler{
|
||||||
|
id: generateSchedulerID(),
|
||||||
|
state: base.NewServerState(),
|
||||||
|
logger: logger,
|
||||||
|
client: NewClient(r),
|
||||||
|
rdb: rdb.NewRDB(c),
|
||||||
|
cron: cron.New(cron.WithLocation(loc)),
|
||||||
|
location: loc,
|
||||||
done: make(chan struct{}),
|
done: make(chan struct{}),
|
||||||
avgInterval: params.interval,
|
errHandler: opts.EnqueueErrorHandler,
|
||||||
qnames: qnames,
|
idmap: make(map[string]cron.EntryID),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *scheduler) terminate() {
|
func generateSchedulerID() string {
|
||||||
s.logger.Debug("Scheduler shutting down...")
|
host, err := os.Hostname()
|
||||||
// Signal the scheduler goroutine to stop polling.
|
if err != nil {
|
||||||
s.done <- struct{}{}
|
host = "unknown-host"
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%s:%d:%v", host, os.Getpid(), uuid.New())
|
||||||
}
|
}
|
||||||
|
|
||||||
// start starts the "scheduler" goroutine.
|
// SchedulerOpts specifies scheduler options.
|
||||||
func (s *scheduler) start(wg *sync.WaitGroup) {
|
type SchedulerOpts struct {
|
||||||
wg.Add(1)
|
// Logger specifies the logger used by the scheduler instance.
|
||||||
go func() {
|
//
|
||||||
defer wg.Done()
|
// If unset, the default logger is used.
|
||||||
|
Logger Logger
|
||||||
|
|
||||||
|
// LogLevel specifies the minimum log level to enable.
|
||||||
|
//
|
||||||
|
// If unset, InfoLevel is used by default.
|
||||||
|
LogLevel LogLevel
|
||||||
|
|
||||||
|
// Location specifies the time zone location.
|
||||||
|
//
|
||||||
|
// If unset, the UTC time zone (time.UTC) is used.
|
||||||
|
Location *time.Location
|
||||||
|
|
||||||
|
// EnqueueErrorHandler gets called when scheduler cannot enqueue a registered task
|
||||||
|
// due to an error.
|
||||||
|
EnqueueErrorHandler func(task *Task, opts []Option, err error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// enqueueJob encapsulates the job of enqueing a task and recording the event.
|
||||||
|
type enqueueJob struct {
|
||||||
|
id uuid.UUID
|
||||||
|
cronspec string
|
||||||
|
task *Task
|
||||||
|
opts []Option
|
||||||
|
location *time.Location
|
||||||
|
logger *log.Logger
|
||||||
|
client *Client
|
||||||
|
rdb *rdb.RDB
|
||||||
|
errHandler func(task *Task, opts []Option, err error)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (j *enqueueJob) Run() {
|
||||||
|
info, err := j.client.Enqueue(j.task, j.opts...)
|
||||||
|
if err != nil {
|
||||||
|
j.logger.Errorf("scheduler could not enqueue a task %+v: %v", j.task, err)
|
||||||
|
if j.errHandler != nil {
|
||||||
|
j.errHandler(j.task, j.opts, err)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
j.logger.Debugf("scheduler enqueued a task: %+v", info)
|
||||||
|
event := &base.SchedulerEnqueueEvent{
|
||||||
|
TaskID: info.ID,
|
||||||
|
EnqueuedAt: time.Now().In(j.location),
|
||||||
|
}
|
||||||
|
err = j.rdb.RecordSchedulerEnqueueEvent(j.id.String(), event)
|
||||||
|
if err != nil {
|
||||||
|
j.logger.Errorf("scheduler could not record enqueue event of enqueued task %+v: %v", j.task, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Register registers a task to be enqueued on the given schedule specified by the cronspec.
|
||||||
|
// It returns an ID of the newly registered entry.
|
||||||
|
func (s *Scheduler) Register(cronspec string, task *Task, opts ...Option) (entryID string, err error) {
|
||||||
|
job := &enqueueJob{
|
||||||
|
id: uuid.New(),
|
||||||
|
cronspec: cronspec,
|
||||||
|
task: task,
|
||||||
|
opts: opts,
|
||||||
|
location: s.location,
|
||||||
|
client: s.client,
|
||||||
|
rdb: s.rdb,
|
||||||
|
logger: s.logger,
|
||||||
|
errHandler: s.errHandler,
|
||||||
|
}
|
||||||
|
cronID, err := s.cron.AddJob(cronspec, job)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
s.mu.Lock()
|
||||||
|
s.idmap[job.id.String()] = cronID
|
||||||
|
s.mu.Unlock()
|
||||||
|
return job.id.String(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unregister removes a registered entry by entry ID.
|
||||||
|
// Unregister returns a non-nil error if no entries were found for the given entryID.
|
||||||
|
func (s *Scheduler) Unregister(entryID string) error {
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
cronID, ok := s.idmap[entryID]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("asynq: no scheduler entry found")
|
||||||
|
}
|
||||||
|
delete(s.idmap, entryID)
|
||||||
|
s.cron.Remove(cronID)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run starts the scheduler until an os signal to exit the program is received.
|
||||||
|
// It returns an error if scheduler is already running or has been shutdown.
|
||||||
|
func (s *Scheduler) Run() error {
|
||||||
|
if err := s.Start(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
s.waitForSignals()
|
||||||
|
s.Shutdown()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start starts the scheduler.
|
||||||
|
// It returns an error if the scheduler is already running or has been shutdown.
|
||||||
|
func (s *Scheduler) Start() error {
|
||||||
|
switch s.state.Get() {
|
||||||
|
case base.StateActive:
|
||||||
|
return fmt.Errorf("asynq: the scheduler is already running")
|
||||||
|
case base.StateClosed:
|
||||||
|
return fmt.Errorf("asynq: the scheduler has already been stopped")
|
||||||
|
}
|
||||||
|
s.logger.Info("Scheduler starting")
|
||||||
|
s.logger.Infof("Scheduler timezone is set to %v", s.location)
|
||||||
|
s.cron.Start()
|
||||||
|
s.wg.Add(1)
|
||||||
|
go s.runHeartbeater()
|
||||||
|
s.state.Set(base.StateActive)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Shutdown stops and shuts down the scheduler.
|
||||||
|
func (s *Scheduler) Shutdown() {
|
||||||
|
s.logger.Info("Scheduler shutting down")
|
||||||
|
close(s.done) // signal heartbeater to stop
|
||||||
|
ctx := s.cron.Stop()
|
||||||
|
<-ctx.Done()
|
||||||
|
s.wg.Wait()
|
||||||
|
|
||||||
|
s.clearHistory()
|
||||||
|
s.client.Close()
|
||||||
|
s.rdb.Close()
|
||||||
|
s.state.Set(base.StateClosed)
|
||||||
|
s.logger.Info("Scheduler stopped")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Scheduler) runHeartbeater() {
|
||||||
|
defer s.wg.Done()
|
||||||
|
ticker := time.NewTicker(5 * time.Second)
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-s.done:
|
case <-s.done:
|
||||||
s.logger.Debug("Scheduler done")
|
s.logger.Debugf("Scheduler heatbeater shutting down")
|
||||||
|
s.rdb.ClearSchedulerEntries(s.id)
|
||||||
return
|
return
|
||||||
case <-time.After(s.avgInterval):
|
case <-ticker.C:
|
||||||
s.exec()
|
s.beat()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *scheduler) exec() {
|
// beat writes a snapshot of entries to redis.
|
||||||
if err := s.broker.CheckAndEnqueue(s.qnames...); err != nil {
|
func (s *Scheduler) beat() {
|
||||||
s.logger.Errorf("Could not enqueue scheduled tasks: %v", err)
|
var entries []*base.SchedulerEntry
|
||||||
|
for _, entry := range s.cron.Entries() {
|
||||||
|
job := entry.Job.(*enqueueJob)
|
||||||
|
e := &base.SchedulerEntry{
|
||||||
|
ID: job.id.String(),
|
||||||
|
Spec: job.cronspec,
|
||||||
|
Type: job.task.Type(),
|
||||||
|
Payload: job.task.Payload(),
|
||||||
|
Opts: stringifyOptions(job.opts),
|
||||||
|
Next: entry.Next,
|
||||||
|
Prev: entry.Prev,
|
||||||
|
}
|
||||||
|
entries = append(entries, e)
|
||||||
|
}
|
||||||
|
s.logger.Debugf("Writing entries %v", entries)
|
||||||
|
if err := s.rdb.WriteSchedulerEntries(s.id, entries, 5*time.Second); err != nil {
|
||||||
|
s.logger.Warnf("Scheduler could not write heartbeat data: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func stringifyOptions(opts []Option) []string {
|
||||||
|
var res []string
|
||||||
|
for _, opt := range opts {
|
||||||
|
res = append(res, opt.String())
|
||||||
|
}
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Scheduler) clearHistory() {
|
||||||
|
for _, entry := range s.cron.Entries() {
|
||||||
|
job := entry.Job.(*enqueueJob)
|
||||||
|
if err := s.rdb.ClearSchedulerHistory(job.id.String()); err != nil {
|
||||||
|
s.logger.Warnf("Could not clear scheduler history for entry %q: %v", job.id.String(), err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -10,89 +10,147 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/google/go-cmp/cmp"
|
"github.com/google/go-cmp/cmp"
|
||||||
h "github.com/hibiken/asynq/internal/asynqtest"
|
"github.com/hibiken/asynq/internal/asynqtest"
|
||||||
"github.com/hibiken/asynq/internal/base"
|
"github.com/hibiken/asynq/internal/base"
|
||||||
"github.com/hibiken/asynq/internal/rdb"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestScheduler(t *testing.T) {
|
func TestSchedulerRegister(t *testing.T) {
|
||||||
r := setup(t)
|
|
||||||
rdbClient := rdb.NewRDB(r)
|
|
||||||
const pollInterval = time.Second
|
|
||||||
s := newScheduler(schedulerParams{
|
|
||||||
logger: testLogger,
|
|
||||||
broker: rdbClient,
|
|
||||||
interval: pollInterval,
|
|
||||||
queues: defaultQueueConfig,
|
|
||||||
})
|
|
||||||
t1 := h.NewTaskMessage("gen_thumbnail", nil)
|
|
||||||
t2 := h.NewTaskMessage("send_email", nil)
|
|
||||||
t3 := h.NewTaskMessage("reindex", nil)
|
|
||||||
t4 := h.NewTaskMessage("sync", nil)
|
|
||||||
now := time.Now()
|
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
initScheduled []h.ZSetEntry // scheduled queue initial state
|
cronspec string
|
||||||
initRetry []h.ZSetEntry // retry queue initial state
|
task *Task
|
||||||
initQueue []*base.TaskMessage // default queue initial state
|
opts []Option
|
||||||
wait time.Duration // wait duration before checking for final state
|
wait time.Duration
|
||||||
wantScheduled []*base.TaskMessage // schedule queue final state
|
queue string
|
||||||
wantRetry []*base.TaskMessage // retry queue final state
|
want []*base.TaskMessage
|
||||||
wantQueue []*base.TaskMessage // default queue final state
|
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
initScheduled: []h.ZSetEntry{
|
cronspec: "@every 3s",
|
||||||
{Msg: t1, Score: float64(now.Add(time.Hour).Unix())},
|
task: NewTask("task1", nil),
|
||||||
{Msg: t2, Score: float64(now.Add(-2 * time.Second).Unix())},
|
opts: []Option{MaxRetry(10)},
|
||||||
},
|
wait: 10 * time.Second,
|
||||||
initRetry: []h.ZSetEntry{
|
queue: "default",
|
||||||
{Msg: t3, Score: float64(time.Now().Add(-500 * time.Millisecond).Unix())},
|
want: []*base.TaskMessage{
|
||||||
},
|
{
|
||||||
initQueue: []*base.TaskMessage{t4},
|
Type: "task1",
|
||||||
wait: pollInterval * 2,
|
Payload: nil,
|
||||||
wantScheduled: []*base.TaskMessage{t1},
|
Retry: 10,
|
||||||
wantRetry: []*base.TaskMessage{},
|
Timeout: int64(defaultTimeout.Seconds()),
|
||||||
wantQueue: []*base.TaskMessage{t2, t3, t4},
|
Queue: "default",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
initScheduled: []h.ZSetEntry{
|
Type: "task1",
|
||||||
{Msg: t1, Score: float64(now.Unix())},
|
Payload: nil,
|
||||||
{Msg: t2, Score: float64(now.Add(-2 * time.Second).Unix())},
|
Retry: 10,
|
||||||
{Msg: t3, Score: float64(now.Add(-500 * time.Millisecond).Unix())},
|
Timeout: int64(defaultTimeout.Seconds()),
|
||||||
|
Queue: "default",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Type: "task1",
|
||||||
|
Payload: nil,
|
||||||
|
Retry: 10,
|
||||||
|
Timeout: int64(defaultTimeout.Seconds()),
|
||||||
|
Queue: "default",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
initRetry: []h.ZSetEntry{},
|
|
||||||
initQueue: []*base.TaskMessage{t4},
|
|
||||||
wait: pollInterval * 2,
|
|
||||||
wantScheduled: []*base.TaskMessage{},
|
|
||||||
wantRetry: []*base.TaskMessage{},
|
|
||||||
wantQueue: []*base.TaskMessage{t1, t2, t3, t4},
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
r := setup(t)
|
||||||
|
|
||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
h.FlushDB(t, r) // clean up db before each test case.
|
scheduler := NewScheduler(getRedisConnOpt(t), nil)
|
||||||
h.SeedScheduledQueue(t, r, tc.initScheduled) // initialize scheduled queue
|
if _, err := scheduler.Register(tc.cronspec, tc.task, tc.opts...); err != nil {
|
||||||
h.SeedRetryQueue(t, r, tc.initRetry) // initialize retry queue
|
t.Fatal(err)
|
||||||
h.SeedEnqueuedQueue(t, r, tc.initQueue) // initialize default queue
|
}
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
if err := scheduler.Start(); err != nil {
|
||||||
s.start(&wg)
|
t.Fatal(err)
|
||||||
|
}
|
||||||
time.Sleep(tc.wait)
|
time.Sleep(tc.wait)
|
||||||
s.terminate()
|
scheduler.Shutdown()
|
||||||
|
|
||||||
gotScheduled := h.GetScheduledMessages(t, r)
|
got := asynqtest.GetPendingMessages(t, r, tc.queue)
|
||||||
if diff := cmp.Diff(tc.wantScheduled, gotScheduled, h.SortMsgOpt); diff != "" {
|
if diff := cmp.Diff(tc.want, got, asynqtest.IgnoreIDOpt); diff != "" {
|
||||||
t.Errorf("mismatch found in %q after running scheduler: (-want, +got)\n%s", base.ScheduledQueue, diff)
|
t.Errorf("mismatch found in queue %q: (-want,+got)\n%s", tc.queue, diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
gotRetry := h.GetRetryMessages(t, r)
|
func TestSchedulerWhenRedisDown(t *testing.T) {
|
||||||
if diff := cmp.Diff(tc.wantRetry, gotRetry, h.SortMsgOpt); diff != "" {
|
var (
|
||||||
t.Errorf("mismatch found in %q after running scheduler: (-want, +got)\n%s", base.RetryQueue, diff)
|
mu sync.Mutex
|
||||||
|
counter int
|
||||||
|
)
|
||||||
|
errorHandler := func(task *Task, opts []Option, err error) {
|
||||||
|
mu.Lock()
|
||||||
|
counter++
|
||||||
|
mu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
gotEnqueued := h.GetEnqueuedMessages(t, r)
|
// Connect to non-existent redis instance to simulate a redis server being down.
|
||||||
if diff := cmp.Diff(tc.wantQueue, gotEnqueued, h.SortMsgOpt); diff != "" {
|
scheduler := NewScheduler(
|
||||||
t.Errorf("mismatch found in %q after running scheduler: (-want, +got)\n%s", base.DefaultQueue, diff)
|
RedisClientOpt{Addr: ":9876"},
|
||||||
|
&SchedulerOpts{EnqueueErrorHandler: errorHandler},
|
||||||
|
)
|
||||||
|
|
||||||
|
task := NewTask("test", nil)
|
||||||
|
|
||||||
|
if _, err := scheduler.Register("@every 3s", task); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := scheduler.Start(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
// Scheduler should attempt to enqueue the task three times (every 3s).
|
||||||
|
time.Sleep(10 * time.Second)
|
||||||
|
scheduler.Shutdown()
|
||||||
|
|
||||||
|
mu.Lock()
|
||||||
|
if counter != 3 {
|
||||||
|
t.Errorf("EnqueueErrorHandler was called %d times, want 3", counter)
|
||||||
|
}
|
||||||
|
mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSchedulerUnregister(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
cronspec string
|
||||||
|
task *Task
|
||||||
|
opts []Option
|
||||||
|
wait time.Duration
|
||||||
|
queue string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
cronspec: "@every 3s",
|
||||||
|
task: NewTask("task1", nil),
|
||||||
|
opts: []Option{MaxRetry(10)},
|
||||||
|
wait: 10 * time.Second,
|
||||||
|
queue: "default",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
r := setup(t)
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
scheduler := NewScheduler(getRedisConnOpt(t), nil)
|
||||||
|
entryID, err := scheduler.Register(tc.cronspec, tc.task, tc.opts...)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if err := scheduler.Unregister(entryID); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := scheduler.Start(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
time.Sleep(tc.wait)
|
||||||
|
scheduler.Shutdown()
|
||||||
|
|
||||||
|
got := asynqtest.GetPendingMessages(t, r, tc.queue)
|
||||||
|
if len(got) != 0 {
|
||||||
|
t.Errorf("%d tasks were enqueued, want zero", len(got))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -62,7 +62,7 @@ func (mux *ServeMux) Handler(t *Task) (h Handler, pattern string) {
|
|||||||
mux.mu.RLock()
|
mux.mu.RLock()
|
||||||
defer mux.mu.RUnlock()
|
defer mux.mu.RUnlock()
|
||||||
|
|
||||||
h, pattern = mux.match(t.Type)
|
h, pattern = mux.match(t.Type())
|
||||||
if h == nil {
|
if h == nil {
|
||||||
h, pattern = NotFoundHandler(), ""
|
h, pattern = NotFoundHandler(), ""
|
||||||
}
|
}
|
||||||
@@ -98,7 +98,7 @@ func (mux *ServeMux) Handle(pattern string, handler Handler) {
|
|||||||
mux.mu.Lock()
|
mux.mu.Lock()
|
||||||
defer mux.mu.Unlock()
|
defer mux.mu.Unlock()
|
||||||
|
|
||||||
if pattern == "" {
|
if strings.TrimSpace(pattern) == "" {
|
||||||
panic("asynq: invalid pattern")
|
panic("asynq: invalid pattern")
|
||||||
}
|
}
|
||||||
if handler == nil {
|
if handler == nil {
|
||||||
@@ -151,7 +151,7 @@ func (mux *ServeMux) Use(mws ...MiddlewareFunc) {
|
|||||||
|
|
||||||
// NotFound returns an error indicating that the handler was not found for the given task.
|
// NotFound returns an error indicating that the handler was not found for the given task.
|
||||||
func NotFound(ctx context.Context, task *Task) error {
|
func NotFound(ctx context.Context, task *Task) error {
|
||||||
return fmt.Errorf("handler not found for task %q", task.Type)
|
return fmt.Errorf("handler not found for task %q", task.Type())
|
||||||
}
|
}
|
||||||
|
|
||||||
// NotFoundHandler returns a simple task handler that returns a ``not found`` error.
|
// NotFoundHandler returns a simple task handler that returns a ``not found`` error.
|
||||||
|
|||||||
@@ -68,7 +68,7 @@ func TestServeMux(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if called != tc.want {
|
if called != tc.want {
|
||||||
t.Errorf("%q handler was called for task %q, want %q to be called", called, task.Type, tc.want)
|
t.Errorf("%q handler was called for task %q, want %q to be called", called, task.Type(), tc.want)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -124,7 +124,7 @@ func TestServeMuxNotFound(t *testing.T) {
|
|||||||
task := NewTask(tc.typename, nil)
|
task := NewTask(tc.typename, nil)
|
||||||
err := mux.ProcessTask(context.Background(), task)
|
err := mux.ProcessTask(context.Background(), task)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Errorf("ProcessTask did not return error for task %q, should return 'not found' error", task.Type)
|
t.Errorf("ProcessTask did not return error for task %q, should return 'not found' error", task.Type())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -164,7 +164,7 @@ func TestServeMuxMiddlewares(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if called != tc.want {
|
if called != tc.want {
|
||||||
t.Errorf("%q handler was called for task %q, want %q to be called", called, task.Type, tc.want)
|
t.Errorf("%q handler was called for task %q, want %q to be called", called, task.Type(), tc.want)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
243
server.go
243
server.go
@@ -10,43 +10,45 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"os"
|
|
||||||
"runtime"
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/go-redis/redis/v7"
|
||||||
"github.com/hibiken/asynq/internal/base"
|
"github.com/hibiken/asynq/internal/base"
|
||||||
"github.com/hibiken/asynq/internal/log"
|
"github.com/hibiken/asynq/internal/log"
|
||||||
"github.com/hibiken/asynq/internal/rdb"
|
"github.com/hibiken/asynq/internal/rdb"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Server is responsible for managing the background-task processing.
|
// Server is responsible for task processing and task lifecycle management.
|
||||||
//
|
//
|
||||||
// Server pulls tasks off queues and processes them.
|
// Server pulls tasks off queues and processes them.
|
||||||
// If the processing of a task is unsuccessful, server will
|
// If the processing of a task is unsuccessful, server will schedule it for a retry.
|
||||||
// schedule it for a retry.
|
//
|
||||||
// A task will be retried until either the task gets processed successfully
|
// A task will be retried until either the task gets processed successfully
|
||||||
// or until it reaches its max retry count.
|
// or until it reaches its max retry count.
|
||||||
//
|
//
|
||||||
// If a task exhausts its retries, it will be moved to the "dead" queue and
|
// If a task exhausts its retries, it will be moved to the archive and
|
||||||
// will be kept in the queue for some time until a certain condition is met
|
// will be kept in the archive set.
|
||||||
// (e.g., queue size reaches a certain limit, or the task has been in the
|
// Note that the archive size is finite and once it reaches its max size,
|
||||||
// queue for a certain amount of time).
|
// oldest tasks in the archive will be deleted.
|
||||||
type Server struct {
|
type Server struct {
|
||||||
ss *base.ServerState
|
|
||||||
|
|
||||||
logger *log.Logger
|
logger *log.Logger
|
||||||
|
|
||||||
broker base.Broker
|
broker base.Broker
|
||||||
|
|
||||||
|
state *base.ServerState
|
||||||
|
|
||||||
// wait group to wait for all goroutines to finish.
|
// wait group to wait for all goroutines to finish.
|
||||||
wg sync.WaitGroup
|
wg sync.WaitGroup
|
||||||
scheduler *scheduler
|
forwarder *forwarder
|
||||||
processor *processor
|
processor *processor
|
||||||
syncer *syncer
|
syncer *syncer
|
||||||
heartbeater *heartbeater
|
heartbeater *heartbeater
|
||||||
subscriber *subscriber
|
subscriber *subscriber
|
||||||
|
recoverer *recoverer
|
||||||
|
healthchecker *healthchecker
|
||||||
}
|
}
|
||||||
|
|
||||||
// Config specifies the server's background-task processing behavior.
|
// Config specifies the server's background-task processing behavior.
|
||||||
@@ -60,11 +62,15 @@ type Config struct {
|
|||||||
// Function to calculate retry delay for a failed task.
|
// Function to calculate retry delay for a failed task.
|
||||||
//
|
//
|
||||||
// By default, it uses exponential backoff algorithm to calculate the delay.
|
// By default, it uses exponential backoff algorithm to calculate the delay.
|
||||||
|
RetryDelayFunc RetryDelayFunc
|
||||||
|
|
||||||
|
// Predicate function to determine whether the error returned from Handler is a failure.
|
||||||
|
// If the function returns false, Server will not increment the retried counter for the task,
|
||||||
|
// and Server won't record the queue stats (processed and failed stats) to avoid skewing the error
|
||||||
|
// rate of the queue.
|
||||||
//
|
//
|
||||||
// n is the number of times the task has been retried.
|
// By default, if the given error is non-nil the function returns true.
|
||||||
// e is the error returned by the task handler.
|
IsFailure func(error) bool
|
||||||
// t is the task in question.
|
|
||||||
RetryDelayFunc func(n int, e error, t *Task) time.Duration
|
|
||||||
|
|
||||||
// List of queues to process with given priority value. Keys are the names of the
|
// List of queues to process with given priority value. Keys are the names of the
|
||||||
// queues and values are associated priority value.
|
// queues and values are associated priority value.
|
||||||
@@ -74,11 +80,13 @@ type Config struct {
|
|||||||
// Priority is treated as follows to avoid starving low priority queues.
|
// Priority is treated as follows to avoid starving low priority queues.
|
||||||
//
|
//
|
||||||
// Example:
|
// Example:
|
||||||
|
//
|
||||||
// Queues: map[string]int{
|
// Queues: map[string]int{
|
||||||
// "critical": 6,
|
// "critical": 6,
|
||||||
// "default": 3,
|
// "default": 3,
|
||||||
// "low": 1,
|
// "low": 1,
|
||||||
// }
|
// }
|
||||||
|
//
|
||||||
// With the above config and given that all queues are not empty, the tasks
|
// With the above config and given that all queues are not empty, the tasks
|
||||||
// in "critical", "default", "low" should be processed 60%, 30%, 10% of
|
// in "critical", "default", "low" should be processed 60%, 30%, 10% of
|
||||||
// the time respectively.
|
// the time respectively.
|
||||||
@@ -98,7 +106,10 @@ type Config struct {
|
|||||||
// HandleError is invoked only if the task handler returns a non-nil error.
|
// HandleError is invoked only if the task handler returns a non-nil error.
|
||||||
//
|
//
|
||||||
// Example:
|
// Example:
|
||||||
// func reportError(task *asynq.Task, err error, retried, maxRetry int) {
|
//
|
||||||
|
// func reportError(ctx context, task *asynq.Task, err error) {
|
||||||
|
// retried, _ := asynq.GetRetryCount(ctx)
|
||||||
|
// maxRetry, _ := asynq.GetMaxRetry(ctx)
|
||||||
// if retried >= maxRetry {
|
// if retried >= maxRetry {
|
||||||
// err = fmt.Errorf("retry exhausted for task %s: %w", task.Type, err)
|
// err = fmt.Errorf("retry exhausted for task %s: %w", task.Type, err)
|
||||||
// }
|
// }
|
||||||
@@ -123,22 +134,39 @@ type Config struct {
|
|||||||
//
|
//
|
||||||
// If unset or zero, default timeout of 8 seconds is used.
|
// If unset or zero, default timeout of 8 seconds is used.
|
||||||
ShutdownTimeout time.Duration
|
ShutdownTimeout time.Duration
|
||||||
|
|
||||||
|
// HealthCheckFunc is called periodically with any errors encountered during ping to the
|
||||||
|
// connected redis server.
|
||||||
|
HealthCheckFunc func(error)
|
||||||
|
|
||||||
|
// HealthCheckInterval specifies the interval between healthchecks.
|
||||||
|
//
|
||||||
|
// If unset or zero, the interval is set to 15 seconds.
|
||||||
|
HealthCheckInterval time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
// An ErrorHandler handles errors returned by the task handler.
|
// An ErrorHandler handles an error occured during task processing.
|
||||||
type ErrorHandler interface {
|
type ErrorHandler interface {
|
||||||
HandleError(task *Task, err error, retried, maxRetry int)
|
HandleError(ctx context.Context, task *Task, err error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// The ErrorHandlerFunc type is an adapter to allow the use of ordinary functions as a ErrorHandler.
|
// The ErrorHandlerFunc type is an adapter to allow the use of ordinary functions as a ErrorHandler.
|
||||||
// If f is a function with the appropriate signature, ErrorHandlerFunc(f) is a ErrorHandler that calls f.
|
// If f is a function with the appropriate signature, ErrorHandlerFunc(f) is a ErrorHandler that calls f.
|
||||||
type ErrorHandlerFunc func(task *Task, err error, retried, maxRetry int)
|
type ErrorHandlerFunc func(ctx context.Context, task *Task, err error)
|
||||||
|
|
||||||
// HandleError calls fn(task, err, retried, maxRetry)
|
// HandleError calls fn(ctx, task, err)
|
||||||
func (fn ErrorHandlerFunc) HandleError(task *Task, err error, retried, maxRetry int) {
|
func (fn ErrorHandlerFunc) HandleError(ctx context.Context, task *Task, err error) {
|
||||||
fn(task, err, retried, maxRetry)
|
fn(ctx, task, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RetryDelayFunc calculates the retry delay duration for a failed task given
|
||||||
|
// the retry count, error, and the task.
|
||||||
|
//
|
||||||
|
// n is the number of times the task has been retried.
|
||||||
|
// e is the error returned by the task handler.
|
||||||
|
// t is the task in question.
|
||||||
|
type RetryDelayFunc func(n int, e error, t *Task) time.Duration
|
||||||
|
|
||||||
// Logger supports logging at various log levels.
|
// Logger supports logging at various log levels.
|
||||||
type Logger interface {
|
type Logger interface {
|
||||||
// Debug logs a message at Debug level.
|
// Debug logs a message at Debug level.
|
||||||
@@ -239,32 +267,51 @@ func toInternalLogLevel(l LogLevel) log.Level {
|
|||||||
panic(fmt.Sprintf("asynq: unexpected log level: %v", l))
|
panic(fmt.Sprintf("asynq: unexpected log level: %v", l))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Formula taken from https://github.com/mperham/sidekiq.
|
// DefaultRetryDelayFunc is the default RetryDelayFunc used if one is not specified in Config.
|
||||||
func defaultDelayFunc(n int, e error, t *Task) time.Duration {
|
// It uses exponential back-off strategy to calculate the retry delay.
|
||||||
|
func DefaultRetryDelayFunc(n int, e error, t *Task) time.Duration {
|
||||||
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||||
|
// Formula taken from https://github.com/mperham/sidekiq.
|
||||||
s := int(math.Pow(float64(n), 4)) + 15 + (r.Intn(30) * (n + 1))
|
s := int(math.Pow(float64(n), 4)) + 15 + (r.Intn(30) * (n + 1))
|
||||||
return time.Duration(s) * time.Second
|
return time.Duration(s) * time.Second
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func defaultIsFailureFunc(err error) bool { return err != nil }
|
||||||
|
|
||||||
var defaultQueueConfig = map[string]int{
|
var defaultQueueConfig = map[string]int{
|
||||||
base.DefaultQueueName: 1,
|
base.DefaultQueueName: 1,
|
||||||
}
|
}
|
||||||
|
|
||||||
const defaultShutdownTimeout = 8 * time.Second
|
const (
|
||||||
|
defaultShutdownTimeout = 8 * time.Second
|
||||||
|
|
||||||
|
defaultHealthCheckInterval = 15 * time.Second
|
||||||
|
)
|
||||||
|
|
||||||
// NewServer returns a new Server given a redis connection option
|
// NewServer returns a new Server given a redis connection option
|
||||||
// and background processing configuration.
|
// and server configuration.
|
||||||
func NewServer(r RedisConnOpt, cfg Config) *Server {
|
func NewServer(r RedisConnOpt, cfg Config) *Server {
|
||||||
|
c, ok := r.MakeRedisClient().(redis.UniversalClient)
|
||||||
|
if !ok {
|
||||||
|
panic(fmt.Sprintf("asynq: unsupported RedisConnOpt type %T", r))
|
||||||
|
}
|
||||||
n := cfg.Concurrency
|
n := cfg.Concurrency
|
||||||
if n < 1 {
|
if n < 1 {
|
||||||
n = runtime.NumCPU()
|
n = runtime.NumCPU()
|
||||||
}
|
}
|
||||||
delayFunc := cfg.RetryDelayFunc
|
delayFunc := cfg.RetryDelayFunc
|
||||||
if delayFunc == nil {
|
if delayFunc == nil {
|
||||||
delayFunc = defaultDelayFunc
|
delayFunc = DefaultRetryDelayFunc
|
||||||
|
}
|
||||||
|
isFailureFunc := cfg.IsFailure
|
||||||
|
if isFailureFunc == nil {
|
||||||
|
isFailureFunc = defaultIsFailureFunc
|
||||||
}
|
}
|
||||||
queues := make(map[string]int)
|
queues := make(map[string]int)
|
||||||
for qname, p := range cfg.Queues {
|
for qname, p := range cfg.Queues {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
continue // ignore invalid queue names
|
||||||
|
}
|
||||||
if p > 0 {
|
if p > 0 {
|
||||||
queues[qname] = p
|
queues[qname] = p
|
||||||
}
|
}
|
||||||
@@ -272,10 +319,18 @@ func NewServer(r RedisConnOpt, cfg Config) *Server {
|
|||||||
if len(queues) == 0 {
|
if len(queues) == 0 {
|
||||||
queues = defaultQueueConfig
|
queues = defaultQueueConfig
|
||||||
}
|
}
|
||||||
|
var qnames []string
|
||||||
|
for q := range queues {
|
||||||
|
qnames = append(qnames, q)
|
||||||
|
}
|
||||||
shutdownTimeout := cfg.ShutdownTimeout
|
shutdownTimeout := cfg.ShutdownTimeout
|
||||||
if shutdownTimeout == 0 {
|
if shutdownTimeout == 0 {
|
||||||
shutdownTimeout = defaultShutdownTimeout
|
shutdownTimeout = defaultShutdownTimeout
|
||||||
}
|
}
|
||||||
|
healthcheckInterval := cfg.HealthCheckInterval
|
||||||
|
if healthcheckInterval == 0 {
|
||||||
|
healthcheckInterval = defaultHealthCheckInterval
|
||||||
|
}
|
||||||
logger := log.NewLogger(cfg.Logger)
|
logger := log.NewLogger(cfg.Logger)
|
||||||
loglevel := cfg.LogLevel
|
loglevel := cfg.LogLevel
|
||||||
if loglevel == level_unspecified {
|
if loglevel == level_unspecified {
|
||||||
@@ -283,15 +338,11 @@ func NewServer(r RedisConnOpt, cfg Config) *Server {
|
|||||||
}
|
}
|
||||||
logger.SetLevel(toInternalLogLevel(loglevel))
|
logger.SetLevel(toInternalLogLevel(loglevel))
|
||||||
|
|
||||||
host, err := os.Hostname()
|
rdb := rdb.NewRDB(c)
|
||||||
if err != nil {
|
starting := make(chan *workerInfo)
|
||||||
host = "unknown-host"
|
finished := make(chan *base.TaskMessage)
|
||||||
}
|
|
||||||
pid := os.Getpid()
|
|
||||||
|
|
||||||
rdb := rdb.NewRDB(createRedisClient(r))
|
|
||||||
ss := base.NewServerState(host, pid, n, queues, cfg.StrictPriority)
|
|
||||||
syncCh := make(chan *syncRequest)
|
syncCh := make(chan *syncRequest)
|
||||||
|
state := base.NewServerState()
|
||||||
cancels := base.NewCancelations()
|
cancels := base.NewCancelations()
|
||||||
|
|
||||||
syncer := newSyncer(syncerParams{
|
syncer := newSyncer(syncerParams{
|
||||||
@@ -302,14 +353,19 @@ func NewServer(r RedisConnOpt, cfg Config) *Server {
|
|||||||
heartbeater := newHeartbeater(heartbeaterParams{
|
heartbeater := newHeartbeater(heartbeaterParams{
|
||||||
logger: logger,
|
logger: logger,
|
||||||
broker: rdb,
|
broker: rdb,
|
||||||
serverState: ss,
|
|
||||||
interval: 5 * time.Second,
|
interval: 5 * time.Second,
|
||||||
|
concurrency: n,
|
||||||
|
queues: queues,
|
||||||
|
strictPriority: cfg.StrictPriority,
|
||||||
|
state: state,
|
||||||
|
starting: starting,
|
||||||
|
finished: finished,
|
||||||
})
|
})
|
||||||
scheduler := newScheduler(schedulerParams{
|
forwarder := newForwarder(forwarderParams{
|
||||||
logger: logger,
|
logger: logger,
|
||||||
broker: rdb,
|
broker: rdb,
|
||||||
|
queues: qnames,
|
||||||
interval: 5 * time.Second,
|
interval: 5 * time.Second,
|
||||||
queues: queues,
|
|
||||||
})
|
})
|
||||||
subscriber := newSubscriber(subscriberParams{
|
subscriber := newSubscriber(subscriberParams{
|
||||||
logger: logger,
|
logger: logger,
|
||||||
@@ -319,22 +375,43 @@ func NewServer(r RedisConnOpt, cfg Config) *Server {
|
|||||||
processor := newProcessor(processorParams{
|
processor := newProcessor(processorParams{
|
||||||
logger: logger,
|
logger: logger,
|
||||||
broker: rdb,
|
broker: rdb,
|
||||||
ss: ss,
|
|
||||||
retryDelayFunc: delayFunc,
|
retryDelayFunc: delayFunc,
|
||||||
|
isFailureFunc: isFailureFunc,
|
||||||
syncCh: syncCh,
|
syncCh: syncCh,
|
||||||
cancelations: cancels,
|
cancelations: cancels,
|
||||||
|
concurrency: n,
|
||||||
|
queues: queues,
|
||||||
|
strictPriority: cfg.StrictPriority,
|
||||||
errHandler: cfg.ErrorHandler,
|
errHandler: cfg.ErrorHandler,
|
||||||
shutdownTimeout: shutdownTimeout,
|
shutdownTimeout: shutdownTimeout,
|
||||||
|
starting: starting,
|
||||||
|
finished: finished,
|
||||||
})
|
})
|
||||||
return &Server{
|
recoverer := newRecoverer(recovererParams{
|
||||||
ss: ss,
|
|
||||||
logger: logger,
|
logger: logger,
|
||||||
broker: rdb,
|
broker: rdb,
|
||||||
scheduler: scheduler,
|
retryDelayFunc: delayFunc,
|
||||||
|
isFailureFunc: isFailureFunc,
|
||||||
|
queues: qnames,
|
||||||
|
interval: 1 * time.Minute,
|
||||||
|
})
|
||||||
|
healthchecker := newHealthChecker(healthcheckerParams{
|
||||||
|
logger: logger,
|
||||||
|
broker: rdb,
|
||||||
|
interval: healthcheckInterval,
|
||||||
|
healthcheckFunc: cfg.HealthCheckFunc,
|
||||||
|
})
|
||||||
|
return &Server{
|
||||||
|
logger: logger,
|
||||||
|
broker: rdb,
|
||||||
|
state: state,
|
||||||
|
forwarder: forwarder,
|
||||||
processor: processor,
|
processor: processor,
|
||||||
syncer: syncer,
|
syncer: syncer,
|
||||||
heartbeater: heartbeater,
|
heartbeater: heartbeater,
|
||||||
subscriber: subscriber,
|
subscriber: subscriber,
|
||||||
|
recoverer: recoverer,
|
||||||
|
healthchecker: healthchecker,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -343,8 +420,13 @@ func NewServer(r RedisConnOpt, cfg Config) *Server {
|
|||||||
// ProcessTask should return nil if the processing of a task
|
// ProcessTask should return nil if the processing of a task
|
||||||
// is successful.
|
// is successful.
|
||||||
//
|
//
|
||||||
// If ProcessTask return a non-nil error or panics, the task
|
// If ProcessTask returns a non-nil error or panics, the task
|
||||||
// will be retried after delay.
|
// will be retried after delay if retry-count is remaining,
|
||||||
|
// otherwise the task will be archived.
|
||||||
|
//
|
||||||
|
// One exception to this rule is when ProcessTask returns a SkipRetry error.
|
||||||
|
// If the returned error is SkipRetry or an error wraps SkipRetry, retry is
|
||||||
|
// skipped and the task will be immediately archived instead.
|
||||||
type Handler interface {
|
type Handler interface {
|
||||||
ProcessTask(context.Context, *Task) error
|
ProcessTask(context.Context, *Task) error
|
||||||
}
|
}
|
||||||
@@ -360,89 +442,100 @@ func (fn HandlerFunc) ProcessTask(ctx context.Context, task *Task) error {
|
|||||||
return fn(ctx, task)
|
return fn(ctx, task)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ErrServerStopped indicates that the operation is now illegal because of the server being stopped.
|
// ErrServerClosed indicates that the operation is now illegal because of the server has been shutdown.
|
||||||
var ErrServerStopped = errors.New("asynq: the server has been stopped")
|
var ErrServerClosed = errors.New("asynq: Server closed")
|
||||||
|
|
||||||
// Run starts the background-task processing and blocks until
|
// Run starts the task processing and blocks until
|
||||||
// an os signal to exit the program is received. Once it receives
|
// an os signal to exit the program is received. Once it receives
|
||||||
// a signal, it gracefully shuts down all active workers and other
|
// a signal, it gracefully shuts down all active workers and other
|
||||||
// goroutines to process the tasks.
|
// goroutines to process the tasks.
|
||||||
//
|
//
|
||||||
// Run returns any error encountered during server startup time.
|
// Run returns any error encountered at server startup time.
|
||||||
// If the server has already been stopped, ErrServerStopped is returned.
|
// If the server has already been shutdown, ErrServerClosed is returned.
|
||||||
func (srv *Server) Run(handler Handler) error {
|
func (srv *Server) Run(handler Handler) error {
|
||||||
if err := srv.Start(handler); err != nil {
|
if err := srv.Start(handler); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
srv.waitForSignals()
|
srv.waitForSignals()
|
||||||
srv.Stop()
|
srv.Shutdown()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start starts the worker server. Once the server has started,
|
// Start starts the worker server. Once the server has started,
|
||||||
// it pulls tasks off queues and starts a worker goroutine for each task.
|
// it pulls tasks off queues and starts a worker goroutine for each task
|
||||||
|
// and then call Handler to process it.
|
||||||
// Tasks are processed concurrently by the workers up to the number of
|
// Tasks are processed concurrently by the workers up to the number of
|
||||||
// concurrency specified at the initialization time.
|
// concurrency specified in Config.Concurrency.
|
||||||
//
|
//
|
||||||
// Start returns any error encountered during server startup time.
|
// Start returns any error encountered at server startup time.
|
||||||
// If the server has already been stopped, ErrServerStopped is returned.
|
// If the server has already been shutdown, ErrServerClosed is returned.
|
||||||
func (srv *Server) Start(handler Handler) error {
|
func (srv *Server) Start(handler Handler) error {
|
||||||
if handler == nil {
|
if handler == nil {
|
||||||
return fmt.Errorf("asynq: server cannot run with nil handler")
|
return fmt.Errorf("asynq: server cannot run with nil handler")
|
||||||
}
|
}
|
||||||
switch srv.ss.Status() {
|
switch srv.state.Get() {
|
||||||
case base.StatusRunning:
|
case base.StateActive:
|
||||||
return fmt.Errorf("asynq: the server is already running")
|
return fmt.Errorf("asynq: the server is already running")
|
||||||
case base.StatusStopped:
|
case base.StateStopped:
|
||||||
return ErrServerStopped
|
return fmt.Errorf("asynq: the server is in the stopped state. Waiting for shutdown.")
|
||||||
|
case base.StateClosed:
|
||||||
|
return ErrServerClosed
|
||||||
}
|
}
|
||||||
srv.ss.SetStatus(base.StatusRunning)
|
srv.state.Set(base.StateActive)
|
||||||
srv.processor.handler = handler
|
srv.processor.handler = handler
|
||||||
|
|
||||||
srv.logger.Info("Starting processing")
|
srv.logger.Info("Starting processing")
|
||||||
|
|
||||||
srv.heartbeater.start(&srv.wg)
|
srv.heartbeater.start(&srv.wg)
|
||||||
|
srv.healthchecker.start(&srv.wg)
|
||||||
srv.subscriber.start(&srv.wg)
|
srv.subscriber.start(&srv.wg)
|
||||||
srv.syncer.start(&srv.wg)
|
srv.syncer.start(&srv.wg)
|
||||||
srv.scheduler.start(&srv.wg)
|
srv.recoverer.start(&srv.wg)
|
||||||
|
srv.forwarder.start(&srv.wg)
|
||||||
srv.processor.start(&srv.wg)
|
srv.processor.start(&srv.wg)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stop stops the worker server.
|
// Shutdown gracefully shuts down the server.
|
||||||
// It gracefully closes all active workers. The server will wait for
|
// It gracefully closes all active workers. The server will wait for
|
||||||
// active workers to finish processing tasks for duration specified in Config.ShutdownTimeout.
|
// active workers to finish processing tasks for duration specified in Config.ShutdownTimeout.
|
||||||
// If worker didn't finish processing a task during the timeout, the task will be pushed back to Redis.
|
// If worker didn't finish processing a task during the timeout, the task will be pushed back to Redis.
|
||||||
func (srv *Server) Stop() {
|
func (srv *Server) Shutdown() {
|
||||||
switch srv.ss.Status() {
|
switch srv.state.Get() {
|
||||||
case base.StatusIdle, base.StatusStopped:
|
case base.StateNew, base.StateClosed:
|
||||||
// server is not running, do nothing and return.
|
// server is not running, do nothing and return.
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
srv.logger.Info("Starting graceful shutdown")
|
srv.logger.Info("Starting graceful shutdown")
|
||||||
// Note: The order of termination is important.
|
// Note: The order of shutdown is important.
|
||||||
// Sender goroutines should be terminated before the receiver goroutines.
|
// Sender goroutines should be terminated before the receiver goroutines.
|
||||||
// processor -> syncer (via syncCh)
|
// processor -> syncer (via syncCh)
|
||||||
srv.scheduler.terminate()
|
// processor -> heartbeater (via starting, finished channels)
|
||||||
srv.processor.terminate()
|
srv.forwarder.shutdown()
|
||||||
srv.syncer.terminate()
|
srv.processor.shutdown()
|
||||||
srv.subscriber.terminate()
|
srv.recoverer.shutdown()
|
||||||
srv.heartbeater.terminate()
|
srv.syncer.shutdown()
|
||||||
|
srv.subscriber.shutdown()
|
||||||
|
srv.healthchecker.shutdown()
|
||||||
|
srv.heartbeater.shutdown()
|
||||||
|
|
||||||
srv.wg.Wait()
|
srv.wg.Wait()
|
||||||
|
|
||||||
srv.broker.Close()
|
srv.broker.Close()
|
||||||
srv.ss.SetStatus(base.StatusStopped)
|
srv.state.Set(base.StateClosed)
|
||||||
|
|
||||||
srv.logger.Info("Exiting")
|
srv.logger.Info("Exiting")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Quiet signals the server to stop pulling new tasks off queues.
|
// Stop signals the server to stop pulling new tasks off queues.
|
||||||
// Quiet should be used before stopping the server.
|
// Stop can be used before shutting down the server to ensure that all
|
||||||
func (srv *Server) Quiet() {
|
// currently active tasks are processed before server shutdown.
|
||||||
|
//
|
||||||
|
// Stop does not shutdown the server, make sure to call Shutdown before exit.
|
||||||
|
func (srv *Server) Stop() {
|
||||||
srv.logger.Info("Stopping processor")
|
srv.logger.Info("Stopping processor")
|
||||||
srv.processor.stop()
|
srv.processor.stop()
|
||||||
srv.ss.SetStatus(base.StatusQuiet)
|
srv.state.Set(base.StateStopped)
|
||||||
srv.logger.Info("Processor stopped")
|
srv.logger.Info("Processor stopped")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -11,6 +11,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/hibiken/asynq/internal/asynqtest"
|
||||||
"github.com/hibiken/asynq/internal/rdb"
|
"github.com/hibiken/asynq/internal/rdb"
|
||||||
"github.com/hibiken/asynq/internal/testbroker"
|
"github.com/hibiken/asynq/internal/testbroker"
|
||||||
"go.uber.org/goleak"
|
"go.uber.org/goleak"
|
||||||
@@ -21,12 +22,10 @@ func TestServer(t *testing.T) {
|
|||||||
ignoreOpt := goleak.IgnoreTopFunction("github.com/go-redis/redis/v7/internal/pool.(*ConnPool).reaper")
|
ignoreOpt := goleak.IgnoreTopFunction("github.com/go-redis/redis/v7/internal/pool.(*ConnPool).reaper")
|
||||||
defer goleak.VerifyNoLeaks(t, ignoreOpt)
|
defer goleak.VerifyNoLeaks(t, ignoreOpt)
|
||||||
|
|
||||||
r := &RedisClientOpt{
|
redisConnOpt := getRedisConnOpt(t)
|
||||||
Addr: "localhost:6379",
|
c := NewClient(redisConnOpt)
|
||||||
DB: 15,
|
defer c.Close()
|
||||||
}
|
srv := NewServer(redisConnOpt, Config{
|
||||||
c := NewClient(r)
|
|
||||||
srv := NewServer(r, Config{
|
|
||||||
Concurrency: 10,
|
Concurrency: 10,
|
||||||
LogLevel: testLogLevel,
|
LogLevel: testLogLevel,
|
||||||
})
|
})
|
||||||
@@ -41,17 +40,17 @@ func TestServer(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = c.Enqueue(NewTask("send_email", map[string]interface{}{"recipient_id": 123}))
|
_, err = c.Enqueue(NewTask("send_email", asynqtest.JSON(map[string]interface{}{"recipient_id": 123})))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("could not enqueue a task: %v", err)
|
t.Errorf("could not enqueue a task: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = c.EnqueueAt(time.Now().Add(time.Hour), NewTask("send_email", map[string]interface{}{"recipient_id": 456}))
|
_, err = c.Enqueue(NewTask("send_email", asynqtest.JSON(map[string]interface{}{"recipient_id": 456})), ProcessIn(1*time.Hour))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("could not enqueue a task: %v", err)
|
t.Errorf("could not enqueue a task: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
srv.Stop()
|
srv.Shutdown()
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestServerRun(t *testing.T) {
|
func TestServerRun(t *testing.T) {
|
||||||
@@ -72,7 +71,7 @@ func TestServerRun(t *testing.T) {
|
|||||||
go func() {
|
go func() {
|
||||||
select {
|
select {
|
||||||
case <-time.After(10 * time.Second):
|
case <-time.After(10 * time.Second):
|
||||||
t.Fatal("server did not stop after receiving TERM signal")
|
panic("server did not stop after receiving TERM signal")
|
||||||
case <-done:
|
case <-done:
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
@@ -83,16 +82,16 @@ func TestServerRun(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestServerErrServerStopped(t *testing.T) {
|
func TestServerErrServerClosed(t *testing.T) {
|
||||||
srv := NewServer(RedisClientOpt{Addr: ":6379"}, Config{LogLevel: testLogLevel})
|
srv := NewServer(RedisClientOpt{Addr: ":6379"}, Config{LogLevel: testLogLevel})
|
||||||
handler := NewServeMux()
|
handler := NewServeMux()
|
||||||
if err := srv.Start(handler); err != nil {
|
if err := srv.Start(handler); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
srv.Stop()
|
srv.Shutdown()
|
||||||
err := srv.Start(handler)
|
err := srv.Start(handler)
|
||||||
if err != ErrServerStopped {
|
if err != ErrServerClosed {
|
||||||
t.Errorf("Restarting server: (*Server).Start(handler) = %v, want ErrServerStopped error", err)
|
t.Errorf("Restarting server: (*Server).Start(handler) = %v, want ErrServerClosed error", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -101,7 +100,7 @@ func TestServerErrNilHandler(t *testing.T) {
|
|||||||
err := srv.Start(nil)
|
err := srv.Start(nil)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Error("Starting server with nil handler: (*Server).Start(nil) did not return error")
|
t.Error("Starting server with nil handler: (*Server).Start(nil) did not return error")
|
||||||
srv.Stop()
|
srv.Shutdown()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -115,7 +114,7 @@ func TestServerErrServerRunning(t *testing.T) {
|
|||||||
if err == nil {
|
if err == nil {
|
||||||
t.Error("Calling (*Server).Start(handler) on already running server did not return error")
|
t.Error("Calling (*Server).Start(handler) on already running server did not return error")
|
||||||
}
|
}
|
||||||
srv.Stop()
|
srv.Shutdown()
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestServerWithRedisDown(t *testing.T) {
|
func TestServerWithRedisDown(t *testing.T) {
|
||||||
@@ -129,7 +128,7 @@ func TestServerWithRedisDown(t *testing.T) {
|
|||||||
testBroker := testbroker.NewTestBroker(r)
|
testBroker := testbroker.NewTestBroker(r)
|
||||||
srv := NewServer(RedisClientOpt{Addr: ":6379"}, Config{LogLevel: testLogLevel})
|
srv := NewServer(RedisClientOpt{Addr: ":6379"}, Config{LogLevel: testLogLevel})
|
||||||
srv.broker = testBroker
|
srv.broker = testBroker
|
||||||
srv.scheduler.broker = testBroker
|
srv.forwarder.broker = testBroker
|
||||||
srv.heartbeater.broker = testBroker
|
srv.heartbeater.broker = testBroker
|
||||||
srv.processor.broker = testBroker
|
srv.processor.broker = testBroker
|
||||||
srv.subscriber.broker = testBroker
|
srv.subscriber.broker = testBroker
|
||||||
@@ -147,7 +146,7 @@ func TestServerWithRedisDown(t *testing.T) {
|
|||||||
|
|
||||||
time.Sleep(3 * time.Second)
|
time.Sleep(3 * time.Second)
|
||||||
|
|
||||||
srv.Stop()
|
srv.Shutdown()
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestServerWithFlakyBroker(t *testing.T) {
|
func TestServerWithFlakyBroker(t *testing.T) {
|
||||||
@@ -159,19 +158,20 @@ func TestServerWithFlakyBroker(t *testing.T) {
|
|||||||
}()
|
}()
|
||||||
r := rdb.NewRDB(setup(t))
|
r := rdb.NewRDB(setup(t))
|
||||||
testBroker := testbroker.NewTestBroker(r)
|
testBroker := testbroker.NewTestBroker(r)
|
||||||
srv := NewServer(RedisClientOpt{Addr: redisAddr, DB: redisDB}, Config{LogLevel: testLogLevel})
|
redisConnOpt := getRedisConnOpt(t)
|
||||||
|
srv := NewServer(redisConnOpt, Config{LogLevel: testLogLevel})
|
||||||
srv.broker = testBroker
|
srv.broker = testBroker
|
||||||
srv.scheduler.broker = testBroker
|
srv.forwarder.broker = testBroker
|
||||||
srv.heartbeater.broker = testBroker
|
srv.heartbeater.broker = testBroker
|
||||||
srv.processor.broker = testBroker
|
srv.processor.broker = testBroker
|
||||||
srv.subscriber.broker = testBroker
|
srv.subscriber.broker = testBroker
|
||||||
|
|
||||||
c := NewClient(RedisClientOpt{Addr: redisAddr, DB: redisDB})
|
c := NewClient(redisConnOpt)
|
||||||
|
|
||||||
h := func(ctx context.Context, task *Task) error {
|
h := func(ctx context.Context, task *Task) error {
|
||||||
// force task retry.
|
// force task retry.
|
||||||
if task.Type == "bad_task" {
|
if task.Type() == "bad_task" {
|
||||||
return fmt.Errorf("could not process %q", task.Type)
|
return fmt.Errorf("could not process %q", task.Type())
|
||||||
}
|
}
|
||||||
time.Sleep(2 * time.Second)
|
time.Sleep(2 * time.Second)
|
||||||
return nil
|
return nil
|
||||||
@@ -183,15 +183,15 @@ func TestServerWithFlakyBroker(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < 10; i++ {
|
for i := 0; i < 10; i++ {
|
||||||
err := c.Enqueue(NewTask("enqueued", nil), MaxRetry(i))
|
_, err := c.Enqueue(NewTask("enqueued", nil), MaxRetry(i))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
err = c.Enqueue(NewTask("bad_task", nil))
|
_, err = c.Enqueue(NewTask("bad_task", nil))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
err = c.EnqueueIn(time.Duration(i)*time.Second, NewTask("scheduled", nil))
|
_, err = c.Enqueue(NewTask("scheduled", nil), ProcessIn(time.Duration(i)*time.Second))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -207,7 +207,7 @@ func TestServerWithFlakyBroker(t *testing.T) {
|
|||||||
|
|
||||||
time.Sleep(3 * time.Second)
|
time.Sleep(3 * time.Second)
|
||||||
|
|
||||||
srv.Stop()
|
srv.Shutdown()
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestLogLevel(t *testing.T) {
|
func TestLogLevel(t *testing.T) {
|
||||||
|
|||||||
@@ -22,9 +22,16 @@ func (srv *Server) waitForSignals() {
|
|||||||
for {
|
for {
|
||||||
sig := <-sigs
|
sig := <-sigs
|
||||||
if sig == unix.SIGTSTP {
|
if sig == unix.SIGTSTP {
|
||||||
srv.Quiet()
|
srv.Stop()
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *Scheduler) waitForSignals() {
|
||||||
|
s.logger.Info("Send signal TERM or INT to stop the scheduler")
|
||||||
|
sigs := make(chan os.Signal, 1)
|
||||||
|
signal.Notify(sigs, unix.SIGTERM, unix.SIGINT)
|
||||||
|
<-sigs
|
||||||
|
}
|
||||||
|
|||||||
@@ -20,3 +20,10 @@ func (srv *Server) waitForSignals() {
|
|||||||
signal.Notify(sigs, windows.SIGTERM, windows.SIGINT)
|
signal.Notify(sigs, windows.SIGTERM, windows.SIGINT)
|
||||||
<-sigs
|
<-sigs
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *Scheduler) waitForSignals() {
|
||||||
|
s.logger.Info("Send signal TERM or INT to stop the scheduler")
|
||||||
|
sigs := make(chan os.Signal, 1)
|
||||||
|
signal.Notify(sigs, windows.SIGTERM, windows.SIGINT)
|
||||||
|
<-sigs
|
||||||
|
}
|
||||||
|
|||||||
@@ -20,7 +20,7 @@ type subscriber struct {
|
|||||||
// channel to communicate back to the long running "subscriber" goroutine.
|
// channel to communicate back to the long running "subscriber" goroutine.
|
||||||
done chan struct{}
|
done chan struct{}
|
||||||
|
|
||||||
// cancelations hold cancel functions for all in-progress tasks.
|
// cancelations hold cancel functions for all active tasks.
|
||||||
cancelations *base.Cancelations
|
cancelations *base.Cancelations
|
||||||
|
|
||||||
// time to wait before retrying to connect to redis.
|
// time to wait before retrying to connect to redis.
|
||||||
@@ -43,7 +43,7 @@ func newSubscriber(params subscriberParams) *subscriber {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *subscriber) terminate() {
|
func (s *subscriber) shutdown() {
|
||||||
s.logger.Debug("Subscriber shutting down...")
|
s.logger.Debug("Subscriber shutting down...")
|
||||||
// Signal the subscriber goroutine to stop.
|
// Signal the subscriber goroutine to stop.
|
||||||
s.done <- struct{}{}
|
s.done <- struct{}{}
|
||||||
|
|||||||
@@ -16,6 +16,7 @@ import (
|
|||||||
|
|
||||||
func TestSubscriber(t *testing.T) {
|
func TestSubscriber(t *testing.T) {
|
||||||
r := setup(t)
|
r := setup(t)
|
||||||
|
defer r.Close()
|
||||||
rdbClient := rdb.NewRDB(r)
|
rdbClient := rdb.NewRDB(r)
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
@@ -45,7 +46,7 @@ func TestSubscriber(t *testing.T) {
|
|||||||
})
|
})
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
subscriber.start(&wg)
|
subscriber.start(&wg)
|
||||||
defer subscriber.terminate()
|
defer subscriber.shutdown()
|
||||||
|
|
||||||
// wait for subscriber to establish connection to pubsub channel
|
// wait for subscriber to establish connection to pubsub channel
|
||||||
time.Sleep(time.Second)
|
time.Sleep(time.Second)
|
||||||
@@ -76,6 +77,7 @@ func TestSubscriberWithRedisDown(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
r := rdb.NewRDB(setup(t))
|
r := rdb.NewRDB(setup(t))
|
||||||
|
defer r.Close()
|
||||||
testBroker := testbroker.NewTestBroker(r)
|
testBroker := testbroker.NewTestBroker(r)
|
||||||
|
|
||||||
cancelations := base.NewCancelations()
|
cancelations := base.NewCancelations()
|
||||||
@@ -89,7 +91,7 @@ func TestSubscriberWithRedisDown(t *testing.T) {
|
|||||||
testBroker.Sleep() // simulate a situation where subscriber cannot connect to redis.
|
testBroker.Sleep() // simulate a situation where subscriber cannot connect to redis.
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
subscriber.start(&wg)
|
subscriber.start(&wg)
|
||||||
defer subscriber.terminate()
|
defer subscriber.shutdown()
|
||||||
|
|
||||||
time.Sleep(2 * time.Second) // subscriber should wait and retry connecting to redis.
|
time.Sleep(2 * time.Second) // subscriber should wait and retry connecting to redis.
|
||||||
|
|
||||||
|
|||||||
@@ -28,6 +28,7 @@ type syncer struct {
|
|||||||
type syncRequest struct {
|
type syncRequest struct {
|
||||||
fn func() error // sync operation
|
fn func() error // sync operation
|
||||||
errMsg string // error message
|
errMsg string // error message
|
||||||
|
deadline time.Time // request should be dropped if deadline has been exceeded
|
||||||
}
|
}
|
||||||
|
|
||||||
type syncerParams struct {
|
type syncerParams struct {
|
||||||
@@ -45,7 +46,7 @@ func newSyncer(params syncerParams) *syncer {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *syncer) terminate() {
|
func (s *syncer) shutdown() {
|
||||||
s.logger.Debug("Syncer shutting down...")
|
s.logger.Debug("Syncer shutting down...")
|
||||||
// Signal the syncer goroutine to stop.
|
// Signal the syncer goroutine to stop.
|
||||||
s.done <- struct{}{}
|
s.done <- struct{}{}
|
||||||
@@ -72,6 +73,9 @@ func (s *syncer) start(wg *sync.WaitGroup) {
|
|||||||
case <-time.After(s.interval):
|
case <-time.After(s.interval):
|
||||||
var temp []*syncRequest
|
var temp []*syncRequest
|
||||||
for _, req := range requests {
|
for _, req := range requests {
|
||||||
|
if req.deadline.Before(time.Now()) {
|
||||||
|
continue // drop stale request
|
||||||
|
}
|
||||||
if err := req.fn(); err != nil {
|
if err := req.fn(); err != nil {
|
||||||
temp = append(temp, req)
|
temp = append(temp, req)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -22,8 +22,9 @@ func TestSyncer(t *testing.T) {
|
|||||||
h.NewTaskMessage("gen_thumbnail", nil),
|
h.NewTaskMessage("gen_thumbnail", nil),
|
||||||
}
|
}
|
||||||
r := setup(t)
|
r := setup(t)
|
||||||
|
defer r.Close()
|
||||||
rdbClient := rdb.NewRDB(r)
|
rdbClient := rdb.NewRDB(r)
|
||||||
h.SeedInProgressQueue(t, r, inProgress)
|
h.SeedActiveQueue(t, r, inProgress, base.DefaultQueueName)
|
||||||
|
|
||||||
const interval = time.Second
|
const interval = time.Second
|
||||||
syncRequestCh := make(chan *syncRequest)
|
syncRequestCh := make(chan *syncRequest)
|
||||||
@@ -34,7 +35,7 @@ func TestSyncer(t *testing.T) {
|
|||||||
})
|
})
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
syncer.start(&wg)
|
syncer.start(&wg)
|
||||||
defer syncer.terminate()
|
defer syncer.shutdown()
|
||||||
|
|
||||||
for _, msg := range inProgress {
|
for _, msg := range inProgress {
|
||||||
m := msg
|
m := msg
|
||||||
@@ -42,14 +43,15 @@ func TestSyncer(t *testing.T) {
|
|||||||
fn: func() error {
|
fn: func() error {
|
||||||
return rdbClient.Done(m)
|
return rdbClient.Done(m)
|
||||||
},
|
},
|
||||||
|
deadline: time.Now().Add(5 * time.Minute),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
time.Sleep(2 * interval) // ensure that syncer runs at least once
|
time.Sleep(2 * interval) // ensure that syncer runs at least once
|
||||||
|
|
||||||
gotInProgress := h.GetInProgressMessages(t, r)
|
gotActive := h.GetActiveMessages(t, r, base.DefaultQueueName)
|
||||||
if l := len(gotInProgress); l != 0 {
|
if l := len(gotActive); l != 0 {
|
||||||
t.Errorf("%q has length %d; want 0", base.InProgressQueue, l)
|
t.Errorf("%q has length %d; want 0", base.ActiveKey(base.DefaultQueueName), l)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -64,7 +66,7 @@ func TestSyncerRetry(t *testing.T) {
|
|||||||
|
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
syncer.start(&wg)
|
syncer.start(&wg)
|
||||||
defer syncer.terminate()
|
defer syncer.shutdown()
|
||||||
|
|
||||||
var (
|
var (
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
@@ -87,6 +89,7 @@ func TestSyncerRetry(t *testing.T) {
|
|||||||
syncRequestCh <- &syncRequest{
|
syncRequestCh <- &syncRequest{
|
||||||
fn: requestFunc,
|
fn: requestFunc,
|
||||||
errMsg: "error",
|
errMsg: "error",
|
||||||
|
deadline: time.Now().Add(5 * time.Minute),
|
||||||
}
|
}
|
||||||
|
|
||||||
// allow syncer to retry
|
// allow syncer to retry
|
||||||
@@ -98,3 +101,41 @@ func TestSyncerRetry(t *testing.T) {
|
|||||||
}
|
}
|
||||||
mu.Unlock()
|
mu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestSyncerDropsStaleRequests(t *testing.T) {
|
||||||
|
const interval = time.Second
|
||||||
|
syncRequestCh := make(chan *syncRequest)
|
||||||
|
syncer := newSyncer(syncerParams{
|
||||||
|
logger: testLogger,
|
||||||
|
requestsCh: syncRequestCh,
|
||||||
|
interval: interval,
|
||||||
|
})
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
syncer.start(&wg)
|
||||||
|
|
||||||
|
var (
|
||||||
|
mu sync.Mutex
|
||||||
|
n int // number of times request has been processed
|
||||||
|
)
|
||||||
|
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
syncRequestCh <- &syncRequest{
|
||||||
|
fn: func() error {
|
||||||
|
mu.Lock()
|
||||||
|
n++
|
||||||
|
mu.Unlock()
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
deadline: time.Now().Add(time.Duration(-i) * time.Second), // already exceeded deadline
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
time.Sleep(2 * interval) // ensure that syncer runs at least once
|
||||||
|
syncer.shutdown()
|
||||||
|
|
||||||
|
mu.Lock()
|
||||||
|
if n != 0 {
|
||||||
|
t.Errorf("requests has been processed %d times, want 0", n)
|
||||||
|
}
|
||||||
|
mu.Unlock()
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,19 +1,11 @@
|
|||||||
# Asynq CLI
|
# Asynq CLI
|
||||||
|
|
||||||
Asynq CLI is a command line tool to monitor the tasks managed by `asynq` package.
|
Asynq CLI is a command line tool to monitor the queues and tasks managed by `asynq` package.
|
||||||
|
|
||||||
## Table of Contents
|
## Table of Contents
|
||||||
|
|
||||||
- [Installation](#installation)
|
- [Installation](#installation)
|
||||||
- [Quick Start](#quick-start)
|
- [Usage](#usage)
|
||||||
- [Stats](#stats)
|
|
||||||
- [History](#history)
|
|
||||||
- [Servers](#servers)
|
|
||||||
- [List](#list)
|
|
||||||
- [Enqueue](#enqueue)
|
|
||||||
- [Delete](#delete)
|
|
||||||
- [Kill](#kill)
|
|
||||||
- [Cancel](#cancel)
|
|
||||||
- [Config File](#config-file)
|
- [Config File](#config-file)
|
||||||
|
|
||||||
## Installation
|
## Installation
|
||||||
@@ -24,133 +16,41 @@ In order to use the tool, compile it using the following command:
|
|||||||
|
|
||||||
This will create the asynq executable under your `$GOPATH/bin` directory.
|
This will create the asynq executable under your `$GOPATH/bin` directory.
|
||||||
|
|
||||||
## Quickstart
|
## Usage
|
||||||
|
|
||||||
The tool has a few commands to inspect the state of tasks and queues.
|
### Commands
|
||||||
|
|
||||||
Run `asynq help` to see all the available commands.
|
To view details on any command, use `asynq help <command> <subcommand>`.
|
||||||
|
|
||||||
|
- `asynq stats`
|
||||||
|
- `asynq queue [ls inspect history rm pause unpause]`
|
||||||
|
- `asynq task [ls cancel delete archive run delete-all archive-all run-all]`
|
||||||
|
- `asynq server [ls]`
|
||||||
|
|
||||||
|
### Global flags
|
||||||
|
|
||||||
Asynq CLI needs to connect to a redis-server to inspect the state of queues and tasks. Use flags to specify the options to connect to the redis-server used by your application.
|
Asynq CLI needs to connect to a redis-server to inspect the state of queues and tasks. Use flags to specify the options to connect to the redis-server used by your application.
|
||||||
|
To connect to a redis cluster, pass `--cluster` and `--cluster_addrs` flags.
|
||||||
|
|
||||||
By default, CLI will try to connect to a redis server running at `localhost:6379`.
|
By default, CLI will try to connect to a redis server running at `localhost:6379`.
|
||||||
|
|
||||||
### Stats
|
```
|
||||||
|
--config string config file to set flag defaut values (default is $HOME/.asynq.yaml)
|
||||||
|
-n, --db int redis database number (default is 0)
|
||||||
|
-h, --help help for asynq
|
||||||
|
-p, --password string password to use when connecting to redis server
|
||||||
|
-u, --uri string redis server URI (default "127.0.0.1:6379")
|
||||||
|
|
||||||
Stats command gives the overview of the current state of tasks and queues. You can run it in conjunction with `watch` command to repeatedly run `stats`.
|
--cluster connect to redis cluster
|
||||||
|
--cluster_addrs string list of comma-separated redis server addresses
|
||||||
Example:
|
```
|
||||||
|
|
||||||
watch -n 3 asynq stats
|
|
||||||
|
|
||||||
This will run `asynq stats` command every 3 seconds.
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
### History
|
|
||||||
|
|
||||||
History command shows the number of processed and failed tasks from the last x days.
|
|
||||||
|
|
||||||
By default, it shows the stats from the last 10 days. Use `--days` to specify the number of days.
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
asynq history --days=30
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
### Servers
|
|
||||||
|
|
||||||
Servers command shows the list of running worker servers pulling tasks from the given redis instance.
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
asynq servers
|
|
||||||
|
|
||||||
### List
|
|
||||||
|
|
||||||
List command shows all tasks in the specified state in a table format
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
asynq ls retry
|
|
||||||
asynq ls scheduled
|
|
||||||
asynq ls dead
|
|
||||||
asynq ls enqueued:default
|
|
||||||
asynq ls inprogress
|
|
||||||
|
|
||||||
### Enqueue
|
|
||||||
|
|
||||||
There are two commands to enqueue tasks.
|
|
||||||
|
|
||||||
Command `enq` takes a task ID and moves the task to **Enqueued** state. You can obtain the task ID by running `ls` command.
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
asynq enq d:1575732274:bnogo8gt6toe23vhef0g
|
|
||||||
|
|
||||||
Command `enqall` moves all tasks to **Enqueued** state from the specified state.
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
asynq enqall retry
|
|
||||||
|
|
||||||
Running the above command will move all **Retry** tasks to **Enqueued** state.
|
|
||||||
|
|
||||||
### Delete
|
|
||||||
|
|
||||||
There are two commands for task deletion.
|
|
||||||
|
|
||||||
Command `del` takes a task ID and deletes the task. You can obtain the task ID by running `ls` command.
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
asynq del r:1575732274:bnogo8gt6toe23vhef0g
|
|
||||||
|
|
||||||
Command `delall` deletes all tasks which are in the specified state.
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
asynq delall retry
|
|
||||||
|
|
||||||
Running the above command will delete all **Retry** tasks.
|
|
||||||
|
|
||||||
### Kill
|
|
||||||
|
|
||||||
There are two commands to kill (i.e. move to dead state) tasks.
|
|
||||||
|
|
||||||
Command `kill` takes a task ID and kills the task. You can obtain the task ID by running `ls` command.
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
asynq kill r:1575732274:bnogo8gt6toe23vhef0g
|
|
||||||
|
|
||||||
Command `killall` kills all tasks which are in the specified state.
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
asynq killall retry
|
|
||||||
|
|
||||||
Running the above command will move all **Retry** tasks to **Dead** state.
|
|
||||||
|
|
||||||
### Cancel
|
|
||||||
|
|
||||||
Command `cancel` takes a task ID and sends a cancelation signal to the goroutine processing the specified task.
|
|
||||||
You can obtain the task ID by running `ls` command.
|
|
||||||
|
|
||||||
The task should be in "in-progress" state.
|
|
||||||
Handler implementation needs to be context aware in order to actually stop processing.
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
asynq cancel bnogo8gt6toe23vhef0g
|
|
||||||
|
|
||||||
## Config File
|
## Config File
|
||||||
|
|
||||||
You can use a config file to set default values for the flags.
|
You can use a config file to set default values for the flags.
|
||||||
This is useful, for example when you have to connect to a remote redis server.
|
|
||||||
|
|
||||||
By default, `asynq` will try to read config file located in
|
By default, `asynq` will try to read config file located in
|
||||||
`$HOME/.asynq.(yaml|json)`. You can specify the file location via `--config` flag.
|
`$HOME/.asynq.(yml|json)`. You can specify the file location via `--config` flag.
|
||||||
|
|
||||||
Config file example:
|
Config file example:
|
||||||
|
|
||||||
|
|||||||
@@ -1,53 +0,0 @@
|
|||||||
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
|
||||||
// Use of this source code is governed by a MIT license
|
|
||||||
// that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package cmd
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/go-redis/redis/v7"
|
|
||||||
"github.com/hibiken/asynq/internal/rdb"
|
|
||||||
"github.com/spf13/cobra"
|
|
||||||
"github.com/spf13/viper"
|
|
||||||
)
|
|
||||||
|
|
||||||
// cancelCmd represents the cancel command
|
|
||||||
var cancelCmd = &cobra.Command{
|
|
||||||
Use: "cancel [task id]",
|
|
||||||
Short: "Sends a cancelation signal to the goroutine processing the specified task",
|
|
||||||
Long: `Cancel (asynq cancel) will send a cancelation signal to the goroutine processing
|
|
||||||
the specified task.
|
|
||||||
|
|
||||||
The command takes one argument which specifies the task to cancel.
|
|
||||||
The task should be in in-progress state.
|
|
||||||
Identifier for a task should be obtained by running "asynq ls" command.
|
|
||||||
|
|
||||||
Handler implementation needs to be context aware for cancelation signal to
|
|
||||||
actually cancel the processing.
|
|
||||||
|
|
||||||
Example: asynq cancel bnogo8gt6toe23vhef0g`,
|
|
||||||
Args: cobra.ExactArgs(1),
|
|
||||||
Run: cancel,
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
rootCmd.AddCommand(cancelCmd)
|
|
||||||
}
|
|
||||||
|
|
||||||
func cancel(cmd *cobra.Command, args []string) {
|
|
||||||
r := rdb.NewRDB(redis.NewClient(&redis.Options{
|
|
||||||
Addr: viper.GetString("uri"),
|
|
||||||
DB: viper.GetInt("db"),
|
|
||||||
Password: viper.GetString("password"),
|
|
||||||
}))
|
|
||||||
|
|
||||||
err := r.PublishCancelation(args[0])
|
|
||||||
if err != nil {
|
|
||||||
fmt.Printf("could not send cancelation signal: %v\n", err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
fmt.Printf("Successfully sent cancelation siganl for task %s\n", args[0])
|
|
||||||
}
|
|
||||||
129
tools/asynq/cmd/cron.go
Normal file
129
tools/asynq/cmd/cron.go
Normal file
@@ -0,0 +1,129 @@
|
|||||||
|
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license
|
||||||
|
// that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package cmd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"sort"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/hibiken/asynq"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
rootCmd.AddCommand(cronCmd)
|
||||||
|
cronCmd.AddCommand(cronListCmd)
|
||||||
|
cronCmd.AddCommand(cronHistoryCmd)
|
||||||
|
cronHistoryCmd.Flags().Int("page", 1, "page number")
|
||||||
|
cronHistoryCmd.Flags().Int("size", 30, "page size")
|
||||||
|
}
|
||||||
|
|
||||||
|
var cronCmd = &cobra.Command{
|
||||||
|
Use: "cron",
|
||||||
|
Short: "Manage cron",
|
||||||
|
}
|
||||||
|
|
||||||
|
var cronListCmd = &cobra.Command{
|
||||||
|
Use: "ls",
|
||||||
|
Short: "List cron entries",
|
||||||
|
Run: cronList,
|
||||||
|
}
|
||||||
|
|
||||||
|
var cronHistoryCmd = &cobra.Command{
|
||||||
|
Use: "history [ENTRY_ID...]",
|
||||||
|
Short: "Show history of each cron tasks",
|
||||||
|
Args: cobra.MinimumNArgs(1),
|
||||||
|
Run: cronHistory,
|
||||||
|
}
|
||||||
|
|
||||||
|
func cronList(cmd *cobra.Command, args []string) {
|
||||||
|
inspector := createInspector()
|
||||||
|
|
||||||
|
entries, err := inspector.SchedulerEntries()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
if len(entries) == 0 {
|
||||||
|
fmt.Println("No scheduler entries")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort entries by spec.
|
||||||
|
sort.Slice(entries, func(i, j int) bool {
|
||||||
|
x, y := entries[i], entries[j]
|
||||||
|
return x.Spec < y.Spec
|
||||||
|
})
|
||||||
|
|
||||||
|
cols := []string{"EntryID", "Spec", "Type", "Payload", "Options", "Next", "Prev"}
|
||||||
|
printRows := func(w io.Writer, tmpl string) {
|
||||||
|
for _, e := range entries {
|
||||||
|
fmt.Fprintf(w, tmpl, e.ID, e.Spec, e.Task.Type(), formatPayload(e.Task.Payload()), e.Opts,
|
||||||
|
nextEnqueue(e.Next), prevEnqueue(e.Prev))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
printTable(cols, printRows)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns a string describing when the next enqueue will happen.
|
||||||
|
func nextEnqueue(nextEnqueueAt time.Time) string {
|
||||||
|
d := nextEnqueueAt.Sub(time.Now()).Round(time.Second)
|
||||||
|
if d < 0 {
|
||||||
|
return "Now"
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("In %v", d)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns a string describing when the previous enqueue was.
|
||||||
|
func prevEnqueue(prevEnqueuedAt time.Time) string {
|
||||||
|
if prevEnqueuedAt.IsZero() {
|
||||||
|
return "N/A"
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%v ago", time.Since(prevEnqueuedAt).Round(time.Second))
|
||||||
|
}
|
||||||
|
|
||||||
|
func cronHistory(cmd *cobra.Command, args []string) {
|
||||||
|
pageNum, err := cmd.Flags().GetInt("page")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
pageSize, err := cmd.Flags().GetInt("size")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
inspector := createInspector()
|
||||||
|
for i, entryID := range args {
|
||||||
|
if i > 0 {
|
||||||
|
fmt.Printf("\n%s\n", separator)
|
||||||
|
}
|
||||||
|
fmt.Println()
|
||||||
|
|
||||||
|
fmt.Printf("Entry: %s\n\n", entryID)
|
||||||
|
|
||||||
|
events, err := inspector.ListSchedulerEnqueueEvents(
|
||||||
|
entryID, asynq.PageSize(pageSize), asynq.Page(pageNum))
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("error: %v\n", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if len(events) == 0 {
|
||||||
|
fmt.Printf("No scheduler enqueue events found for entry: %s\n", entryID)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
cols := []string{"TaskID", "EnqueuedAt"}
|
||||||
|
printRows := func(w io.Writer, tmpl string) {
|
||||||
|
for _, e := range events {
|
||||||
|
fmt.Fprintf(w, tmpl, e.TaskID, e.EnqueuedAt)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
printTable(cols, printRows)
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,73 +0,0 @@
|
|||||||
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
|
||||||
// Use of this source code is governed by a MIT license
|
|
||||||
// that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package cmd
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/go-redis/redis/v7"
|
|
||||||
"github.com/hibiken/asynq/internal/rdb"
|
|
||||||
"github.com/spf13/cobra"
|
|
||||||
"github.com/spf13/viper"
|
|
||||||
)
|
|
||||||
|
|
||||||
// delCmd represents the del command
|
|
||||||
var delCmd = &cobra.Command{
|
|
||||||
Use: "del [task id]",
|
|
||||||
Short: "Deletes a task given an identifier",
|
|
||||||
Long: `Del (asynq del) will delete a task given an identifier.
|
|
||||||
|
|
||||||
The command takes one argument which specifies the task to delete.
|
|
||||||
The task should be in either scheduled, retry or dead state.
|
|
||||||
Identifier for a task should be obtained by running "asynq ls" command.
|
|
||||||
|
|
||||||
Example: asynq enq d:1575732274:bnogo8gt6toe23vhef0g`,
|
|
||||||
Args: cobra.ExactArgs(1),
|
|
||||||
Run: del,
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
rootCmd.AddCommand(delCmd)
|
|
||||||
|
|
||||||
// Here you will define your flags and configuration settings.
|
|
||||||
|
|
||||||
// Cobra supports Persistent Flags which will work for this command
|
|
||||||
// and all subcommands, e.g.:
|
|
||||||
// delCmd.PersistentFlags().String("foo", "", "A help for foo")
|
|
||||||
|
|
||||||
// Cobra supports local flags which will only run when this command
|
|
||||||
// is called directly, e.g.:
|
|
||||||
// delCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
|
|
||||||
}
|
|
||||||
|
|
||||||
func del(cmd *cobra.Command, args []string) {
|
|
||||||
id, score, qtype, err := parseQueryID(args[0])
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
r := rdb.NewRDB(redis.NewClient(&redis.Options{
|
|
||||||
Addr: viper.GetString("uri"),
|
|
||||||
DB: viper.GetInt("db"),
|
|
||||||
Password: viper.GetString("password"),
|
|
||||||
}))
|
|
||||||
switch qtype {
|
|
||||||
case "s":
|
|
||||||
err = r.DeleteScheduledTask(id, score)
|
|
||||||
case "r":
|
|
||||||
err = r.DeleteRetryTask(id, score)
|
|
||||||
case "d":
|
|
||||||
err = r.DeleteDeadTask(id, score)
|
|
||||||
default:
|
|
||||||
fmt.Println("invalid argument")
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
fmt.Printf("Successfully deleted %v\n", args[0])
|
|
||||||
}
|
|
||||||
@@ -1,71 +0,0 @@
|
|||||||
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
|
||||||
// Use of this source code is governed by a MIT license
|
|
||||||
// that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package cmd
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/go-redis/redis/v7"
|
|
||||||
"github.com/hibiken/asynq/internal/rdb"
|
|
||||||
"github.com/spf13/cobra"
|
|
||||||
"github.com/spf13/viper"
|
|
||||||
)
|
|
||||||
|
|
||||||
var delallValidArgs = []string{"scheduled", "retry", "dead"}
|
|
||||||
|
|
||||||
// delallCmd represents the delall command
|
|
||||||
var delallCmd = &cobra.Command{
|
|
||||||
Use: "delall [state]",
|
|
||||||
Short: "Deletes all tasks in the specified state",
|
|
||||||
Long: `Delall (asynq delall) will delete all tasks in the specified state.
|
|
||||||
|
|
||||||
The argument should be one of "scheduled", "retry", or "dead".
|
|
||||||
|
|
||||||
Example: asynq delall dead -> Deletes all dead tasks`,
|
|
||||||
ValidArgs: delallValidArgs,
|
|
||||||
Args: cobra.ExactValidArgs(1),
|
|
||||||
Run: delall,
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
rootCmd.AddCommand(delallCmd)
|
|
||||||
|
|
||||||
// Here you will define your flags and configuration settings.
|
|
||||||
|
|
||||||
// Cobra supports Persistent Flags which will work for this command
|
|
||||||
// and all subcommands, e.g.:
|
|
||||||
// delallCmd.PersistentFlags().String("foo", "", "A help for foo")
|
|
||||||
|
|
||||||
// Cobra supports local flags which will only run when this command
|
|
||||||
// is called directly, e.g.:
|
|
||||||
// delallCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
|
|
||||||
}
|
|
||||||
|
|
||||||
func delall(cmd *cobra.Command, args []string) {
|
|
||||||
c := redis.NewClient(&redis.Options{
|
|
||||||
Addr: viper.GetString("uri"),
|
|
||||||
DB: viper.GetInt("db"),
|
|
||||||
Password: viper.GetString("password"),
|
|
||||||
})
|
|
||||||
r := rdb.NewRDB(c)
|
|
||||||
var err error
|
|
||||||
switch args[0] {
|
|
||||||
case "scheduled":
|
|
||||||
err = r.DeleteAllScheduledTasks()
|
|
||||||
case "retry":
|
|
||||||
err = r.DeleteAllRetryTasks()
|
|
||||||
case "dead":
|
|
||||||
err = r.DeleteAllDeadTasks()
|
|
||||||
default:
|
|
||||||
fmt.Printf("error: `asynq delall [state]` only accepts %v as the argument.\n", delallValidArgs)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
fmt.Printf("Deleted all tasks in %q state\n", args[0])
|
|
||||||
}
|
|
||||||
@@ -1,76 +0,0 @@
|
|||||||
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
|
||||||
// Use of this source code is governed by a MIT license
|
|
||||||
// that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package cmd
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/go-redis/redis/v7"
|
|
||||||
"github.com/hibiken/asynq/internal/rdb"
|
|
||||||
"github.com/spf13/cobra"
|
|
||||||
"github.com/spf13/viper"
|
|
||||||
)
|
|
||||||
|
|
||||||
// enqCmd represents the enq command
|
|
||||||
var enqCmd = &cobra.Command{
|
|
||||||
Use: "enq [task id]",
|
|
||||||
Short: "Enqueues a task given an identifier",
|
|
||||||
Long: `Enq (asynq enq) will enqueue a task given an identifier.
|
|
||||||
|
|
||||||
The command takes one argument which specifies the task to enqueue.
|
|
||||||
The task should be in either scheduled, retry or dead state.
|
|
||||||
Identifier for a task should be obtained by running "asynq ls" command.
|
|
||||||
|
|
||||||
The task enqueued by this command will be processed as soon as the task
|
|
||||||
gets dequeued by a processor.
|
|
||||||
|
|
||||||
Example: asynq enq d:1575732274:bnogo8gt6toe23vhef0g`,
|
|
||||||
Args: cobra.ExactArgs(1),
|
|
||||||
Run: enq,
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
rootCmd.AddCommand(enqCmd)
|
|
||||||
|
|
||||||
// Here you will define your flags and configuration settings.
|
|
||||||
|
|
||||||
// Cobra supports Persistent Flags which will work for this command
|
|
||||||
// and all subcommands, e.g.:
|
|
||||||
// enqCmd.PersistentFlags().String("foo", "", "A help for foo")
|
|
||||||
|
|
||||||
// Cobra supports local flags which will only run when this command
|
|
||||||
// is called directly, e.g.:
|
|
||||||
// enqCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
|
|
||||||
}
|
|
||||||
|
|
||||||
func enq(cmd *cobra.Command, args []string) {
|
|
||||||
id, score, qtype, err := parseQueryID(args[0])
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
r := rdb.NewRDB(redis.NewClient(&redis.Options{
|
|
||||||
Addr: viper.GetString("uri"),
|
|
||||||
DB: viper.GetInt("db"),
|
|
||||||
Password: viper.GetString("password"),
|
|
||||||
}))
|
|
||||||
switch qtype {
|
|
||||||
case "s":
|
|
||||||
err = r.EnqueueScheduledTask(id, score)
|
|
||||||
case "r":
|
|
||||||
err = r.EnqueueRetryTask(id, score)
|
|
||||||
case "d":
|
|
||||||
err = r.EnqueueDeadTask(id, score)
|
|
||||||
default:
|
|
||||||
fmt.Println("invalid argument")
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
fmt.Printf("Successfully enqueued %v\n", args[0])
|
|
||||||
}
|
|
||||||
@@ -1,75 +0,0 @@
|
|||||||
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
|
||||||
// Use of this source code is governed by a MIT license
|
|
||||||
// that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package cmd
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/go-redis/redis/v7"
|
|
||||||
"github.com/hibiken/asynq/internal/rdb"
|
|
||||||
"github.com/spf13/cobra"
|
|
||||||
"github.com/spf13/viper"
|
|
||||||
)
|
|
||||||
|
|
||||||
var enqallValidArgs = []string{"scheduled", "retry", "dead"}
|
|
||||||
|
|
||||||
// enqallCmd represents the enqall command
|
|
||||||
var enqallCmd = &cobra.Command{
|
|
||||||
Use: "enqall [state]",
|
|
||||||
Short: "Enqueues all tasks in the specified state",
|
|
||||||
Long: `Enqall (asynq enqall) will enqueue all tasks in the specified state.
|
|
||||||
|
|
||||||
The argument should be one of "scheduled", "retry", or "dead".
|
|
||||||
|
|
||||||
The tasks enqueued by this command will be processed as soon as it
|
|
||||||
gets dequeued by a processor.
|
|
||||||
|
|
||||||
Example: asynq enqall dead -> Enqueues all dead tasks`,
|
|
||||||
ValidArgs: enqallValidArgs,
|
|
||||||
Args: cobra.ExactValidArgs(1),
|
|
||||||
Run: enqall,
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
rootCmd.AddCommand(enqallCmd)
|
|
||||||
|
|
||||||
// Here you will define your flags and configuration settings.
|
|
||||||
|
|
||||||
// Cobra supports Persistent Flags which will work for this command
|
|
||||||
// and all subcommands, e.g.:
|
|
||||||
// enqallCmd.PersistentFlags().String("foo", "", "A help for foo")
|
|
||||||
|
|
||||||
// Cobra supports local flags which will only run when this command
|
|
||||||
// is called directly, e.g.:
|
|
||||||
// enqallCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
|
|
||||||
}
|
|
||||||
|
|
||||||
func enqall(cmd *cobra.Command, args []string) {
|
|
||||||
c := redis.NewClient(&redis.Options{
|
|
||||||
Addr: viper.GetString("uri"),
|
|
||||||
DB: viper.GetInt("db"),
|
|
||||||
Password: viper.GetString("password"),
|
|
||||||
})
|
|
||||||
r := rdb.NewRDB(c)
|
|
||||||
var n int64
|
|
||||||
var err error
|
|
||||||
switch args[0] {
|
|
||||||
case "scheduled":
|
|
||||||
n, err = r.EnqueueAllScheduledTasks()
|
|
||||||
case "retry":
|
|
||||||
n, err = r.EnqueueAllRetryTasks()
|
|
||||||
case "dead":
|
|
||||||
n, err = r.EnqueueAllDeadTasks()
|
|
||||||
default:
|
|
||||||
fmt.Printf("error: `asynq enqall [state]` only accepts %v as the argument.\n", enqallValidArgs)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
fmt.Printf("Enqueued %d tasks in %q state\n", n, args[0])
|
|
||||||
}
|
|
||||||
@@ -1,71 +0,0 @@
|
|||||||
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
|
||||||
// Use of this source code is governed by a MIT license
|
|
||||||
// that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package cmd
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"strings"
|
|
||||||
"text/tabwriter"
|
|
||||||
|
|
||||||
"github.com/go-redis/redis/v7"
|
|
||||||
"github.com/hibiken/asynq/internal/rdb"
|
|
||||||
"github.com/spf13/cobra"
|
|
||||||
"github.com/spf13/viper"
|
|
||||||
)
|
|
||||||
|
|
||||||
var days int
|
|
||||||
|
|
||||||
// historyCmd represents the history command
|
|
||||||
var historyCmd = &cobra.Command{
|
|
||||||
Use: "history",
|
|
||||||
Short: "Shows historical aggregate data",
|
|
||||||
Long: `History (asynq history) will show the number of processed and failed tasks
|
|
||||||
from the last x days.
|
|
||||||
|
|
||||||
By default, it will show the data from the last 10 days.
|
|
||||||
|
|
||||||
Example: asynq history -x=30 -> Shows stats from the last 30 days`,
|
|
||||||
Args: cobra.NoArgs,
|
|
||||||
Run: history,
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
rootCmd.AddCommand(historyCmd)
|
|
||||||
historyCmd.Flags().IntVarP(&days, "days", "x", 10, "show data from last x days")
|
|
||||||
}
|
|
||||||
|
|
||||||
func history(cmd *cobra.Command, args []string) {
|
|
||||||
c := redis.NewClient(&redis.Options{
|
|
||||||
Addr: viper.GetString("uri"),
|
|
||||||
DB: viper.GetInt("db"),
|
|
||||||
Password: viper.GetString("password"),
|
|
||||||
})
|
|
||||||
r := rdb.NewRDB(c)
|
|
||||||
|
|
||||||
stats, err := r.HistoricalStats(days)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
printDailyStats(stats)
|
|
||||||
}
|
|
||||||
|
|
||||||
func printDailyStats(stats []*rdb.DailyStats) {
|
|
||||||
format := strings.Repeat("%v\t", 4) + "\n"
|
|
||||||
tw := new(tabwriter.Writer).Init(os.Stdout, 0, 8, 2, ' ', 0)
|
|
||||||
fmt.Fprintf(tw, format, "Date (UTC)", "Processed", "Failed", "Error Rate")
|
|
||||||
fmt.Fprintf(tw, format, "----------", "---------", "------", "----------")
|
|
||||||
for _, s := range stats {
|
|
||||||
var errrate string
|
|
||||||
if s.Processed == 0 {
|
|
||||||
errrate = "N/A"
|
|
||||||
} else {
|
|
||||||
errrate = fmt.Sprintf("%.2f%%", float64(s.Failed)/float64(s.Processed)*100)
|
|
||||||
}
|
|
||||||
fmt.Fprintf(tw, format, s.Time.Format("2006-01-02"), s.Processed, s.Failed, errrate)
|
|
||||||
}
|
|
||||||
tw.Flush()
|
|
||||||
}
|
|
||||||
@@ -1,72 +0,0 @@
|
|||||||
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
|
||||||
// Use of this source code is governed by a MIT license
|
|
||||||
// that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package cmd
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/go-redis/redis/v7"
|
|
||||||
"github.com/hibiken/asynq/internal/rdb"
|
|
||||||
"github.com/spf13/cobra"
|
|
||||||
"github.com/spf13/viper"
|
|
||||||
)
|
|
||||||
|
|
||||||
// killCmd represents the kill command
|
|
||||||
var killCmd = &cobra.Command{
|
|
||||||
Use: "kill [task id]",
|
|
||||||
Short: "Kills a task given an identifier",
|
|
||||||
Long: `Kill (asynq kill) will put a task in dead state given an identifier.
|
|
||||||
|
|
||||||
The command takes one argument which specifies the task to kill.
|
|
||||||
The task should be in either scheduled or retry state.
|
|
||||||
Identifier for a task should be obtained by running "asynq ls" command.
|
|
||||||
|
|
||||||
Example: asynq kill r:1575732274:bnogo8gt6toe23vhef0g`,
|
|
||||||
Args: cobra.ExactArgs(1),
|
|
||||||
Run: kill,
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
rootCmd.AddCommand(killCmd)
|
|
||||||
|
|
||||||
// Here you will define your flags and configuration settings.
|
|
||||||
|
|
||||||
// Cobra supports Persistent Flags which will work for this command
|
|
||||||
// and all subcommands, e.g.:
|
|
||||||
// killCmd.PersistentFlags().String("foo", "", "A help for foo")
|
|
||||||
|
|
||||||
// Cobra supports local flags which will only run when this command
|
|
||||||
// is called directly, e.g.:
|
|
||||||
// killCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
|
|
||||||
}
|
|
||||||
|
|
||||||
func kill(cmd *cobra.Command, args []string) {
|
|
||||||
id, score, qtype, err := parseQueryID(args[0])
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
r := rdb.NewRDB(redis.NewClient(&redis.Options{
|
|
||||||
Addr: viper.GetString("uri"),
|
|
||||||
DB: viper.GetInt("db"),
|
|
||||||
Password: viper.GetString("password"),
|
|
||||||
}))
|
|
||||||
switch qtype {
|
|
||||||
case "s":
|
|
||||||
err = r.KillScheduledTask(id, score)
|
|
||||||
case "r":
|
|
||||||
err = r.KillRetryTask(id, score)
|
|
||||||
default:
|
|
||||||
fmt.Println("invalid argument")
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
fmt.Printf("Successfully killed %v\n", args[0])
|
|
||||||
|
|
||||||
}
|
|
||||||
@@ -1,70 +0,0 @@
|
|||||||
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
|
||||||
// Use of this source code is governed by a MIT license
|
|
||||||
// that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package cmd
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/go-redis/redis/v7"
|
|
||||||
"github.com/hibiken/asynq/internal/rdb"
|
|
||||||
"github.com/spf13/cobra"
|
|
||||||
"github.com/spf13/viper"
|
|
||||||
)
|
|
||||||
|
|
||||||
var killallValidArgs = []string{"scheduled", "retry"}
|
|
||||||
|
|
||||||
// killallCmd represents the killall command
|
|
||||||
var killallCmd = &cobra.Command{
|
|
||||||
Use: "killall [state]",
|
|
||||||
Short: "Kills all tasks in the specified state",
|
|
||||||
Long: `Killall (asynq killall) will update all tasks from the specified state to dead state.
|
|
||||||
|
|
||||||
The argument should be either "scheduled" or "retry".
|
|
||||||
|
|
||||||
Example: asynq killall retry -> Update all retry tasks to dead tasks`,
|
|
||||||
ValidArgs: killallValidArgs,
|
|
||||||
Args: cobra.ExactValidArgs(1),
|
|
||||||
Run: killall,
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
rootCmd.AddCommand(killallCmd)
|
|
||||||
|
|
||||||
// Here you will define your flags and configuration settings.
|
|
||||||
|
|
||||||
// Cobra supports Persistent Flags which will work for this command
|
|
||||||
// and all subcommands, e.g.:
|
|
||||||
// killallCmd.PersistentFlags().String("foo", "", "A help for foo")
|
|
||||||
|
|
||||||
// Cobra supports local flags which will only run when this command
|
|
||||||
// is called directly, e.g.:
|
|
||||||
// killallCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
|
|
||||||
}
|
|
||||||
|
|
||||||
func killall(cmd *cobra.Command, args []string) {
|
|
||||||
c := redis.NewClient(&redis.Options{
|
|
||||||
Addr: viper.GetString("uri"),
|
|
||||||
DB: viper.GetInt("db"),
|
|
||||||
Password: viper.GetString("password"),
|
|
||||||
})
|
|
||||||
r := rdb.NewRDB(c)
|
|
||||||
var n int64
|
|
||||||
var err error
|
|
||||||
switch args[0] {
|
|
||||||
case "scheduled":
|
|
||||||
n, err = r.KillAllScheduledTasks()
|
|
||||||
case "retry":
|
|
||||||
n, err = r.KillAllRetryTasks()
|
|
||||||
default:
|
|
||||||
fmt.Printf("error: `asynq killall [state]` only accepts %v as the argument.\n", killallValidArgs)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
fmt.Printf("Successfully updated %d tasks to \"dead\" state\n", n)
|
|
||||||
}
|
|
||||||
@@ -1,229 +0,0 @@
|
|||||||
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
|
||||||
// Use of this source code is governed by a MIT license
|
|
||||||
// that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package cmd
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/go-redis/redis/v7"
|
|
||||||
"github.com/hibiken/asynq/internal/rdb"
|
|
||||||
"github.com/rs/xid"
|
|
||||||
"github.com/spf13/cobra"
|
|
||||||
"github.com/spf13/viper"
|
|
||||||
)
|
|
||||||
|
|
||||||
var lsValidArgs = []string{"enqueued", "inprogress", "scheduled", "retry", "dead"}
|
|
||||||
|
|
||||||
// lsCmd represents the ls command
|
|
||||||
var lsCmd = &cobra.Command{
|
|
||||||
Use: "ls [state]",
|
|
||||||
Short: "Lists tasks in the specified state",
|
|
||||||
Long: `Ls (asynq ls) will list all tasks in the specified state in a table format.
|
|
||||||
|
|
||||||
The command takes one argument which specifies the state of tasks.
|
|
||||||
The argument value should be one of "enqueued", "inprogress", "scheduled",
|
|
||||||
"retry", or "dead".
|
|
||||||
|
|
||||||
Example:
|
|
||||||
asynq ls dead -> Lists all tasks in dead state
|
|
||||||
|
|
||||||
Enqueued tasks requires a queue name after ":"
|
|
||||||
Example:
|
|
||||||
asynq ls enqueued:default -> List tasks from default queue
|
|
||||||
asynq ls enqueued:critical -> List tasks from critical queue
|
|
||||||
`,
|
|
||||||
Args: cobra.ExactValidArgs(1),
|
|
||||||
Run: ls,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Flags
|
|
||||||
var pageSize int
|
|
||||||
var pageNum int
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
rootCmd.AddCommand(lsCmd)
|
|
||||||
lsCmd.Flags().IntVar(&pageSize, "size", 30, "page size")
|
|
||||||
lsCmd.Flags().IntVar(&pageNum, "page", 0, "page number - zero indexed (default 0)")
|
|
||||||
}
|
|
||||||
|
|
||||||
func ls(cmd *cobra.Command, args []string) {
|
|
||||||
if pageSize < 0 {
|
|
||||||
fmt.Println("page size cannot be negative.")
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
if pageNum < 0 {
|
|
||||||
fmt.Println("page number cannot be negative.")
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
c := redis.NewClient(&redis.Options{
|
|
||||||
Addr: viper.GetString("uri"),
|
|
||||||
DB: viper.GetInt("db"),
|
|
||||||
Password: viper.GetString("password"),
|
|
||||||
})
|
|
||||||
r := rdb.NewRDB(c)
|
|
||||||
parts := strings.Split(args[0], ":")
|
|
||||||
switch parts[0] {
|
|
||||||
case "enqueued":
|
|
||||||
if len(parts) != 2 {
|
|
||||||
fmt.Printf("error: Missing queue name\n`asynq ls enqueued:[queue name]`\n")
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
listEnqueued(r, parts[1])
|
|
||||||
case "inprogress":
|
|
||||||
listInProgress(r)
|
|
||||||
case "scheduled":
|
|
||||||
listScheduled(r)
|
|
||||||
case "retry":
|
|
||||||
listRetry(r)
|
|
||||||
case "dead":
|
|
||||||
listDead(r)
|
|
||||||
default:
|
|
||||||
fmt.Printf("error: `asynq ls [state]`\nonly accepts %v as the argument.\n", lsValidArgs)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// queryID returns an identifier used for "enq" command.
|
|
||||||
// score is the zset score and queryType should be one
|
|
||||||
// of "s", "r" or "d" (scheduled, retry, dead respectively).
|
|
||||||
func queryID(id xid.ID, score int64, qtype string) string {
|
|
||||||
const format = "%v:%v:%v"
|
|
||||||
return fmt.Sprintf(format, qtype, score, id)
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseQueryID is a reverse operation of queryID function.
|
|
||||||
// It takes a queryID and return each part of id with proper
|
|
||||||
// type if valid, otherwise it reports an error.
|
|
||||||
func parseQueryID(queryID string) (id xid.ID, score int64, qtype string, err error) {
|
|
||||||
parts := strings.Split(queryID, ":")
|
|
||||||
if len(parts) != 3 {
|
|
||||||
return xid.NilID(), 0, "", fmt.Errorf("invalid id")
|
|
||||||
}
|
|
||||||
id, err = xid.FromString(parts[2])
|
|
||||||
if err != nil {
|
|
||||||
return xid.NilID(), 0, "", fmt.Errorf("invalid id")
|
|
||||||
}
|
|
||||||
score, err = strconv.ParseInt(parts[1], 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return xid.NilID(), 0, "", fmt.Errorf("invalid id")
|
|
||||||
}
|
|
||||||
qtype = parts[0]
|
|
||||||
if len(qtype) != 1 || !strings.Contains("srd", qtype) {
|
|
||||||
return xid.NilID(), 0, "", fmt.Errorf("invalid id")
|
|
||||||
}
|
|
||||||
return id, score, qtype, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func listEnqueued(r *rdb.RDB, qname string) {
|
|
||||||
tasks, err := r.ListEnqueued(qname, rdb.Pagination{Size: pageSize, Page: pageNum})
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
if len(tasks) == 0 {
|
|
||||||
fmt.Printf("No enqueued tasks in %q queue\n", qname)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
cols := []string{"ID", "Type", "Payload", "Queue"}
|
|
||||||
printRows := func(w io.Writer, tmpl string) {
|
|
||||||
for _, t := range tasks {
|
|
||||||
fmt.Fprintf(w, tmpl, t.ID, t.Type, t.Payload, t.Queue)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
printTable(cols, printRows)
|
|
||||||
fmt.Printf("\nShowing %d tasks from page %d\n", len(tasks), pageNum)
|
|
||||||
}
|
|
||||||
|
|
||||||
func listInProgress(r *rdb.RDB) {
|
|
||||||
tasks, err := r.ListInProgress(rdb.Pagination{Size: pageSize, Page: pageNum})
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
if len(tasks) == 0 {
|
|
||||||
fmt.Println("No in-progress tasks")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
cols := []string{"ID", "Type", "Payload"}
|
|
||||||
printRows := func(w io.Writer, tmpl string) {
|
|
||||||
for _, t := range tasks {
|
|
||||||
fmt.Fprintf(w, tmpl, t.ID, t.Type, t.Payload)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
printTable(cols, printRows)
|
|
||||||
fmt.Printf("\nShowing %d tasks from page %d\n", len(tasks), pageNum)
|
|
||||||
}
|
|
||||||
|
|
||||||
func listScheduled(r *rdb.RDB) {
|
|
||||||
tasks, err := r.ListScheduled(rdb.Pagination{Size: pageSize, Page: pageNum})
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
if len(tasks) == 0 {
|
|
||||||
fmt.Println("No scheduled tasks")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
cols := []string{"ID", "Type", "Payload", "Process In", "Queue"}
|
|
||||||
printRows := func(w io.Writer, tmpl string) {
|
|
||||||
for _, t := range tasks {
|
|
||||||
processIn := fmt.Sprintf("%.0f seconds", t.ProcessAt.Sub(time.Now()).Seconds())
|
|
||||||
fmt.Fprintf(w, tmpl, queryID(t.ID, t.Score, "s"), t.Type, t.Payload, processIn, t.Queue)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
printTable(cols, printRows)
|
|
||||||
fmt.Printf("\nShowing %d tasks from page %d\n", len(tasks), pageNum)
|
|
||||||
}
|
|
||||||
|
|
||||||
func listRetry(r *rdb.RDB) {
|
|
||||||
tasks, err := r.ListRetry(rdb.Pagination{Size: pageSize, Page: pageNum})
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
if len(tasks) == 0 {
|
|
||||||
fmt.Println("No retry tasks")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
cols := []string{"ID", "Type", "Payload", "Next Retry", "Last Error", "Retried", "Max Retry", "Queue"}
|
|
||||||
printRows := func(w io.Writer, tmpl string) {
|
|
||||||
for _, t := range tasks {
|
|
||||||
var nextRetry string
|
|
||||||
if d := t.ProcessAt.Sub(time.Now()); d > 0 {
|
|
||||||
nextRetry = fmt.Sprintf("in %v", d.Round(time.Second))
|
|
||||||
} else {
|
|
||||||
nextRetry = "right now"
|
|
||||||
}
|
|
||||||
fmt.Fprintf(w, tmpl, queryID(t.ID, t.Score, "r"), t.Type, t.Payload, nextRetry, t.ErrorMsg, t.Retried, t.Retry, t.Queue)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
printTable(cols, printRows)
|
|
||||||
fmt.Printf("\nShowing %d tasks from page %d\n", len(tasks), pageNum)
|
|
||||||
}
|
|
||||||
|
|
||||||
func listDead(r *rdb.RDB) {
|
|
||||||
tasks, err := r.ListDead(rdb.Pagination{Size: pageSize, Page: pageNum})
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
if len(tasks) == 0 {
|
|
||||||
fmt.Println("No dead tasks")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
cols := []string{"ID", "Type", "Payload", "Last Failed", "Last Error", "Queue"}
|
|
||||||
printRows := func(w io.Writer, tmpl string) {
|
|
||||||
for _, t := range tasks {
|
|
||||||
fmt.Fprintf(w, tmpl, queryID(t.ID, t.Score, "d"), t.Type, t.Payload, t.LastFailedAt, t.ErrorMsg, t.Queue)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
printTable(cols, printRows)
|
|
||||||
fmt.Printf("\nShowing %d tasks from page %d\n", len(tasks), pageNum)
|
|
||||||
}
|
|
||||||
404
tools/asynq/cmd/migrate.go
Normal file
404
tools/asynq/cmd/migrate.go
Normal file
@@ -0,0 +1,404 @@
|
|||||||
|
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license
|
||||||
|
// that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package cmd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/go-redis/redis/v7"
|
||||||
|
"github.com/google/uuid"
|
||||||
|
"github.com/hibiken/asynq/internal/base"
|
||||||
|
"github.com/hibiken/asynq/internal/errors"
|
||||||
|
"github.com/hibiken/asynq/internal/rdb"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
// migrateCmd represents the migrate command.
|
||||||
|
var migrateCmd = &cobra.Command{
|
||||||
|
Use: "migrate",
|
||||||
|
Short: fmt.Sprintf("Migrate existing tasks and queues to be asynq%s compatible", base.Version),
|
||||||
|
Long: `Migrate (asynq migrate) will migrate existing tasks and queues in redis to be compatible with the latest version of asynq.
|
||||||
|
`,
|
||||||
|
Args: cobra.NoArgs,
|
||||||
|
Run: migrate,
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
rootCmd.AddCommand(migrateCmd)
|
||||||
|
}
|
||||||
|
|
||||||
|
func backupKey(key string) string {
|
||||||
|
return fmt.Sprintf("%s:backup", key)
|
||||||
|
}
|
||||||
|
|
||||||
|
func renameKeyAsBackup(c redis.UniversalClient, key string) error {
|
||||||
|
if c.Exists(key).Val() == 0 {
|
||||||
|
return nil // key doesn't exist; no-op
|
||||||
|
}
|
||||||
|
return c.Rename(key, backupKey(key)).Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
func failIfError(err error, msg string) {
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("error: %s: %v\n", msg, err)
|
||||||
|
fmt.Println("*** Please report this issue at https://github.com/hibiken/asynq/issues ***")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func logIfError(err error, msg string) {
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("warning: %s: %v\n", msg, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func migrate(cmd *cobra.Command, args []string) {
|
||||||
|
r := createRDB()
|
||||||
|
queues, err := r.AllQueues()
|
||||||
|
failIfError(err, "Failed to get queue names")
|
||||||
|
|
||||||
|
// ---------------------------------------------
|
||||||
|
// Pre-check: Ensure no active servers, tasks.
|
||||||
|
// ---------------------------------------------
|
||||||
|
srvs, err := r.ListServers()
|
||||||
|
failIfError(err, "Failed to get server infos")
|
||||||
|
if len(srvs) > 0 {
|
||||||
|
fmt.Println("(error): Server(s) still running. Please ensure that no asynq servers are running when runnning migrate command.")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
for _, qname := range queues {
|
||||||
|
stats, err := r.CurrentStats(qname)
|
||||||
|
failIfError(err, "Failed to get stats")
|
||||||
|
if stats.Active > 0 {
|
||||||
|
fmt.Printf("(error): %d active tasks found. Please ensure that no active tasks exist when running migrate command.\n", stats.Active)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ---------------------------------------------
|
||||||
|
// Rename pending key
|
||||||
|
// ---------------------------------------------
|
||||||
|
fmt.Print("Renaming pending keys...")
|
||||||
|
for _, qname := range queues {
|
||||||
|
oldKey := fmt.Sprintf("asynq:{%s}", qname)
|
||||||
|
if r.Client().Exists(oldKey).Val() == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
newKey := base.PendingKey(qname)
|
||||||
|
err := r.Client().Rename(oldKey, newKey).Err()
|
||||||
|
failIfError(err, "Failed to rename key")
|
||||||
|
}
|
||||||
|
fmt.Print("Done\n")
|
||||||
|
|
||||||
|
// ---------------------------------------------
|
||||||
|
// Rename keys as backup
|
||||||
|
// ---------------------------------------------
|
||||||
|
fmt.Print("Renaming keys for backup...")
|
||||||
|
for _, qname := range queues {
|
||||||
|
keys := []string{
|
||||||
|
base.ActiveKey(qname),
|
||||||
|
base.PendingKey(qname),
|
||||||
|
base.ScheduledKey(qname),
|
||||||
|
base.RetryKey(qname),
|
||||||
|
base.ArchivedKey(qname),
|
||||||
|
}
|
||||||
|
for _, key := range keys {
|
||||||
|
err := renameKeyAsBackup(r.Client(), key)
|
||||||
|
failIfError(err, fmt.Sprintf("Failed to rename key %q for backup", key))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fmt.Print("Done\n")
|
||||||
|
|
||||||
|
// ---------------------------------------------
|
||||||
|
// Update to new schema
|
||||||
|
// ---------------------------------------------
|
||||||
|
fmt.Print("Updating to new schema...")
|
||||||
|
for _, qname := range queues {
|
||||||
|
updatePendingMessages(r, qname)
|
||||||
|
updateZSetMessages(r.Client(), base.ScheduledKey(qname), "scheduled")
|
||||||
|
updateZSetMessages(r.Client(), base.RetryKey(qname), "retry")
|
||||||
|
updateZSetMessages(r.Client(), base.ArchivedKey(qname), "archived")
|
||||||
|
}
|
||||||
|
fmt.Print("Done\n")
|
||||||
|
|
||||||
|
// ---------------------------------------------
|
||||||
|
// Delete backup keys
|
||||||
|
// ---------------------------------------------
|
||||||
|
fmt.Print("Deleting backup keys...")
|
||||||
|
for _, qname := range queues {
|
||||||
|
keys := []string{
|
||||||
|
backupKey(base.ActiveKey(qname)),
|
||||||
|
backupKey(base.PendingKey(qname)),
|
||||||
|
backupKey(base.ScheduledKey(qname)),
|
||||||
|
backupKey(base.RetryKey(qname)),
|
||||||
|
backupKey(base.ArchivedKey(qname)),
|
||||||
|
}
|
||||||
|
for _, key := range keys {
|
||||||
|
err := r.Client().Del(key).Err()
|
||||||
|
failIfError(err, "Failed to delete backup key")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fmt.Print("Done\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
func UnmarshalOldMessage(encoded string) (*base.TaskMessage, error) {
|
||||||
|
oldMsg, err := DecodeMessage(encoded)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
payload, err := json.Marshal(oldMsg.Payload)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("could not marshal payload: %v", err)
|
||||||
|
}
|
||||||
|
return &base.TaskMessage{
|
||||||
|
Type: oldMsg.Type,
|
||||||
|
Payload: payload,
|
||||||
|
ID: oldMsg.ID,
|
||||||
|
Queue: oldMsg.Queue,
|
||||||
|
Retry: oldMsg.Retry,
|
||||||
|
Retried: oldMsg.Retried,
|
||||||
|
ErrorMsg: oldMsg.ErrorMsg,
|
||||||
|
LastFailedAt: 0,
|
||||||
|
Timeout: oldMsg.Timeout,
|
||||||
|
Deadline: oldMsg.Deadline,
|
||||||
|
UniqueKey: oldMsg.UniqueKey,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// TaskMessage from v0.17
|
||||||
|
type OldTaskMessage struct {
|
||||||
|
// Type indicates the kind of the task to be performed.
|
||||||
|
Type string
|
||||||
|
|
||||||
|
// Payload holds data needed to process the task.
|
||||||
|
Payload map[string]interface{}
|
||||||
|
|
||||||
|
// ID is a unique identifier for each task.
|
||||||
|
ID uuid.UUID
|
||||||
|
|
||||||
|
// Queue is a name this message should be enqueued to.
|
||||||
|
Queue string
|
||||||
|
|
||||||
|
// Retry is the max number of retry for this task.
|
||||||
|
Retry int
|
||||||
|
|
||||||
|
// Retried is the number of times we've retried this task so far.
|
||||||
|
Retried int
|
||||||
|
|
||||||
|
// ErrorMsg holds the error message from the last failure.
|
||||||
|
ErrorMsg string
|
||||||
|
|
||||||
|
// Timeout specifies timeout in seconds.
|
||||||
|
// If task processing doesn't complete within the timeout, the task will be retried
|
||||||
|
// if retry count is remaining. Otherwise it will be moved to the archive.
|
||||||
|
//
|
||||||
|
// Use zero to indicate no timeout.
|
||||||
|
Timeout int64
|
||||||
|
|
||||||
|
// Deadline specifies the deadline for the task in Unix time,
|
||||||
|
// the number of seconds elapsed since January 1, 1970 UTC.
|
||||||
|
// If task processing doesn't complete before the deadline, the task will be retried
|
||||||
|
// if retry count is remaining. Otherwise it will be moved to the archive.
|
||||||
|
//
|
||||||
|
// Use zero to indicate no deadline.
|
||||||
|
Deadline int64
|
||||||
|
|
||||||
|
// UniqueKey holds the redis key used for uniqueness lock for this task.
|
||||||
|
//
|
||||||
|
// Empty string indicates that no uniqueness lock was used.
|
||||||
|
UniqueKey string
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeMessage unmarshals the given encoded string and returns a decoded task message.
|
||||||
|
// Code from v0.17.
|
||||||
|
func DecodeMessage(s string) (*OldTaskMessage, error) {
|
||||||
|
d := json.NewDecoder(strings.NewReader(s))
|
||||||
|
d.UseNumber()
|
||||||
|
var msg OldTaskMessage
|
||||||
|
if err := d.Decode(&msg); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &msg, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func updatePendingMessages(r *rdb.RDB, qname string) {
|
||||||
|
data, err := r.Client().LRange(backupKey(base.PendingKey(qname)), 0, -1).Result()
|
||||||
|
failIfError(err, "Failed to read backup pending key")
|
||||||
|
|
||||||
|
for _, s := range data {
|
||||||
|
msg, err := UnmarshalOldMessage(s)
|
||||||
|
failIfError(err, "Failed to unmarshal message")
|
||||||
|
|
||||||
|
if msg.UniqueKey != "" {
|
||||||
|
ttl, err := r.Client().TTL(msg.UniqueKey).Result()
|
||||||
|
failIfError(err, "Failed to get ttl")
|
||||||
|
|
||||||
|
if ttl > 0 {
|
||||||
|
err = r.Client().Del(msg.UniqueKey).Err()
|
||||||
|
logIfError(err, "Failed to delete unique key")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Regenerate unique key.
|
||||||
|
msg.UniqueKey = base.UniqueKey(msg.Queue, msg.Type, msg.Payload)
|
||||||
|
if ttl > 0 {
|
||||||
|
err = r.EnqueueUnique(msg, ttl)
|
||||||
|
} else {
|
||||||
|
err = r.Enqueue(msg)
|
||||||
|
}
|
||||||
|
failIfError(err, "Failed to enqueue message")
|
||||||
|
|
||||||
|
} else {
|
||||||
|
err := r.Enqueue(msg)
|
||||||
|
failIfError(err, "Failed to enqueue message")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// KEYS[1] -> asynq:{<qname>}:t:<task_id>
|
||||||
|
// KEYS[2] -> asynq:{<qname>}:scheduled
|
||||||
|
// ARGV[1] -> task message data
|
||||||
|
// ARGV[2] -> zset score
|
||||||
|
// ARGV[3] -> task ID
|
||||||
|
// ARGV[4] -> task timeout in seconds (0 if not timeout)
|
||||||
|
// ARGV[5] -> task deadline in unix time (0 if no deadline)
|
||||||
|
// ARGV[6] -> task state (e.g. "retry", "archived")
|
||||||
|
var taskZAddCmd = redis.NewScript(`
|
||||||
|
redis.call("HSET", KEYS[1],
|
||||||
|
"msg", ARGV[1],
|
||||||
|
"state", ARGV[6],
|
||||||
|
"timeout", ARGV[4],
|
||||||
|
"deadline", ARGV[5])
|
||||||
|
redis.call("ZADD", KEYS[2], ARGV[2], ARGV[3])
|
||||||
|
return 1
|
||||||
|
`)
|
||||||
|
|
||||||
|
// ZAddTask adds task to zset.
|
||||||
|
func ZAddTask(c redis.UniversalClient, key string, msg *base.TaskMessage, score float64, state string) error {
|
||||||
|
// Special case; LastFailedAt field is new so assign a value inferred from zscore.
|
||||||
|
if state == "archived" {
|
||||||
|
msg.LastFailedAt = int64(score)
|
||||||
|
}
|
||||||
|
|
||||||
|
encoded, err := base.EncodeMessage(msg)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := c.SAdd(base.AllQueues, msg.Queue).Err(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
keys := []string{
|
||||||
|
base.TaskKey(msg.Queue, msg.ID.String()),
|
||||||
|
key,
|
||||||
|
}
|
||||||
|
argv := []interface{}{
|
||||||
|
encoded,
|
||||||
|
score,
|
||||||
|
msg.ID.String(),
|
||||||
|
msg.Timeout,
|
||||||
|
msg.Deadline,
|
||||||
|
state,
|
||||||
|
}
|
||||||
|
return taskZAddCmd.Run(c, keys, argv...).Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
// KEYS[1] -> unique key
|
||||||
|
// KEYS[2] -> asynq:{<qname>}:t:<task_id>
|
||||||
|
// KEYS[3] -> zset key (e.g. asynq:{<qname>}:scheduled)
|
||||||
|
// --
|
||||||
|
// ARGV[1] -> task ID
|
||||||
|
// ARGV[2] -> uniqueness lock TTL
|
||||||
|
// ARGV[3] -> score (process_at timestamp)
|
||||||
|
// ARGV[4] -> task message
|
||||||
|
// ARGV[5] -> task timeout in seconds (0 if not timeout)
|
||||||
|
// ARGV[6] -> task deadline in unix time (0 if no deadline)
|
||||||
|
// ARGV[7] -> task state (oneof "scheduled", "retry", "archived")
|
||||||
|
var taskZAddUniqueCmd = redis.NewScript(`
|
||||||
|
local ok = redis.call("SET", KEYS[1], ARGV[1], "NX", "EX", ARGV[2])
|
||||||
|
if not ok then
|
||||||
|
return 0
|
||||||
|
end
|
||||||
|
redis.call("HSET", KEYS[2],
|
||||||
|
"msg", ARGV[4],
|
||||||
|
"state", ARGV[7],
|
||||||
|
"timeout", ARGV[5],
|
||||||
|
"deadline", ARGV[6],
|
||||||
|
"unique_key", KEYS[1])
|
||||||
|
redis.call("ZADD", KEYS[3], ARGV[3], ARGV[1])
|
||||||
|
return 1
|
||||||
|
`)
|
||||||
|
|
||||||
|
// ScheduleUnique adds the task to the backlog queue to be processed in the future if the uniqueness lock can be acquired.
|
||||||
|
// It returns ErrDuplicateTask if the lock cannot be acquired.
|
||||||
|
func ZAddTaskUnique(c redis.UniversalClient, key string, msg *base.TaskMessage, score float64, state string, ttl time.Duration) error {
|
||||||
|
encoded, err := base.EncodeMessage(msg)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := c.SAdd(base.AllQueues, msg.Queue).Err(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
keys := []string{
|
||||||
|
msg.UniqueKey,
|
||||||
|
base.TaskKey(msg.Queue, msg.ID.String()),
|
||||||
|
key,
|
||||||
|
}
|
||||||
|
argv := []interface{}{
|
||||||
|
msg.ID.String(),
|
||||||
|
int(ttl.Seconds()),
|
||||||
|
score,
|
||||||
|
encoded,
|
||||||
|
msg.Timeout,
|
||||||
|
msg.Deadline,
|
||||||
|
state,
|
||||||
|
}
|
||||||
|
res, err := taskZAddUniqueCmd.Run(c, keys, argv...).Result()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
n, ok := res.(int64)
|
||||||
|
if !ok {
|
||||||
|
return errors.E(errors.Internal, fmt.Sprintf("cast error: unexpected return value from Lua script: %v", res))
|
||||||
|
}
|
||||||
|
if n == 0 {
|
||||||
|
return errors.E(errors.AlreadyExists, errors.ErrDuplicateTask)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func updateZSetMessages(c redis.UniversalClient, key, state string) {
|
||||||
|
zs, err := c.ZRangeWithScores(backupKey(key), 0, -1).Result()
|
||||||
|
failIfError(err, "Failed to read")
|
||||||
|
|
||||||
|
for _, z := range zs {
|
||||||
|
msg, err := UnmarshalOldMessage(z.Member.(string))
|
||||||
|
failIfError(err, "Failed to unmarshal message")
|
||||||
|
|
||||||
|
if msg.UniqueKey != "" {
|
||||||
|
ttl, err := c.TTL(msg.UniqueKey).Result()
|
||||||
|
failIfError(err, "Failed to get ttl")
|
||||||
|
|
||||||
|
if ttl > 0 {
|
||||||
|
err = c.Del(msg.UniqueKey).Err()
|
||||||
|
logIfError(err, "Failed to delete unique key")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Regenerate unique key.
|
||||||
|
msg.UniqueKey = base.UniqueKey(msg.Queue, msg.Type, msg.Payload)
|
||||||
|
if ttl > 0 {
|
||||||
|
err = ZAddTaskUnique(c, key, msg, z.Score, state, ttl)
|
||||||
|
} else {
|
||||||
|
err = ZAddTask(c, key, msg, z.Score, state)
|
||||||
|
}
|
||||||
|
failIfError(err, "Failed to zadd message")
|
||||||
|
} else {
|
||||||
|
err := ZAddTask(c, key, msg, z.Score, state)
|
||||||
|
failIfError(err, "Failed to enqueue scheduled message")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
255
tools/asynq/cmd/queue.go
Normal file
255
tools/asynq/cmd/queue.go
Normal file
@@ -0,0 +1,255 @@
|
|||||||
|
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license
|
||||||
|
// that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package cmd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/fatih/color"
|
||||||
|
"github.com/hibiken/asynq"
|
||||||
|
"github.com/hibiken/asynq/internal/errors"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
const separator = "================================================="
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
rootCmd.AddCommand(queueCmd)
|
||||||
|
queueCmd.AddCommand(queueListCmd)
|
||||||
|
queueCmd.AddCommand(queueInspectCmd)
|
||||||
|
queueCmd.AddCommand(queueHistoryCmd)
|
||||||
|
queueHistoryCmd.Flags().IntP("days", "x", 10, "show data from last x days")
|
||||||
|
|
||||||
|
queueCmd.AddCommand(queuePauseCmd)
|
||||||
|
queueCmd.AddCommand(queueUnpauseCmd)
|
||||||
|
queueCmd.AddCommand(queueRemoveCmd)
|
||||||
|
queueRemoveCmd.Flags().BoolP("force", "f", false, "remove the queue regardless of its size")
|
||||||
|
}
|
||||||
|
|
||||||
|
var queueCmd = &cobra.Command{
|
||||||
|
Use: "queue",
|
||||||
|
Short: "Manage queues",
|
||||||
|
}
|
||||||
|
|
||||||
|
var queueListCmd = &cobra.Command{
|
||||||
|
Use: "ls",
|
||||||
|
Short: "List queues",
|
||||||
|
// TODO: Use RunE instead?
|
||||||
|
Run: queueList,
|
||||||
|
}
|
||||||
|
|
||||||
|
var queueInspectCmd = &cobra.Command{
|
||||||
|
Use: "inspect QUEUE [QUEUE...]",
|
||||||
|
Short: "Display detailed information on one or more queues",
|
||||||
|
Args: cobra.MinimumNArgs(1),
|
||||||
|
// TODO: Use RunE instead?
|
||||||
|
Run: queueInspect,
|
||||||
|
}
|
||||||
|
|
||||||
|
var queueHistoryCmd = &cobra.Command{
|
||||||
|
Use: "history QUEUE [QUEUE...]",
|
||||||
|
Short: "Display historical aggregate data from one or more queues",
|
||||||
|
Args: cobra.MinimumNArgs(1),
|
||||||
|
Run: queueHistory,
|
||||||
|
}
|
||||||
|
|
||||||
|
var queuePauseCmd = &cobra.Command{
|
||||||
|
Use: "pause QUEUE [QUEUE...]",
|
||||||
|
Short: "Pause one or more queues",
|
||||||
|
Args: cobra.MinimumNArgs(1),
|
||||||
|
Run: queuePause,
|
||||||
|
}
|
||||||
|
|
||||||
|
var queueUnpauseCmd = &cobra.Command{
|
||||||
|
Use: "unpause QUEUE [QUEUE...]",
|
||||||
|
Short: "Unpause one or more queues",
|
||||||
|
Args: cobra.MinimumNArgs(1),
|
||||||
|
Run: queueUnpause,
|
||||||
|
}
|
||||||
|
|
||||||
|
var queueRemoveCmd = &cobra.Command{
|
||||||
|
Use: "rm QUEUE [QUEUE...]",
|
||||||
|
Short: "Remove one or more queues",
|
||||||
|
Args: cobra.MinimumNArgs(1),
|
||||||
|
Run: queueRemove,
|
||||||
|
}
|
||||||
|
|
||||||
|
func queueList(cmd *cobra.Command, args []string) {
|
||||||
|
type queueInfo struct {
|
||||||
|
name string
|
||||||
|
keyslot int64
|
||||||
|
nodes []*asynq.ClusterNode
|
||||||
|
}
|
||||||
|
inspector := createInspector()
|
||||||
|
queues, err := inspector.Queues()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("error: Could not fetch list of queues: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
var qs []*queueInfo
|
||||||
|
for _, qname := range queues {
|
||||||
|
q := queueInfo{name: qname}
|
||||||
|
if useRedisCluster {
|
||||||
|
keyslot, err := inspector.ClusterKeySlot(qname)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Errorf("error: Could not get cluster keyslot for %q\n", qname)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
q.keyslot = keyslot
|
||||||
|
nodes, err := inspector.ClusterNodes(qname)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Errorf("error: Could not get cluster nodes for %q\n", qname)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
q.nodes = nodes
|
||||||
|
}
|
||||||
|
qs = append(qs, &q)
|
||||||
|
}
|
||||||
|
if useRedisCluster {
|
||||||
|
printTable(
|
||||||
|
[]string{"Queue", "Cluster KeySlot", "Cluster Nodes"},
|
||||||
|
func(w io.Writer, tmpl string) {
|
||||||
|
for _, q := range qs {
|
||||||
|
fmt.Fprintf(w, tmpl, q.name, q.keyslot, q.nodes)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
for _, q := range qs {
|
||||||
|
fmt.Println(q.name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func queueInspect(cmd *cobra.Command, args []string) {
|
||||||
|
inspector := createInspector()
|
||||||
|
for i, qname := range args {
|
||||||
|
if i > 0 {
|
||||||
|
fmt.Printf("\n%s\n\n", separator)
|
||||||
|
}
|
||||||
|
info, err := inspector.GetQueueInfo(qname)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("error: %v\n", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
printQueueInfo(info)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func printQueueInfo(info *asynq.QueueInfo) {
|
||||||
|
bold := color.New(color.Bold)
|
||||||
|
bold.Println("Queue Info")
|
||||||
|
fmt.Printf("Name: %s\n", info.Queue)
|
||||||
|
fmt.Printf("Size: %d\n", info.Size)
|
||||||
|
fmt.Printf("Paused: %t\n\n", info.Paused)
|
||||||
|
bold.Println("Task Count by State")
|
||||||
|
printTable(
|
||||||
|
[]string{"active", "pending", "scheduled", "retry", "archived"},
|
||||||
|
func(w io.Writer, tmpl string) {
|
||||||
|
fmt.Fprintf(w, tmpl, info.Active, info.Pending, info.Scheduled, info.Retry, info.Archived)
|
||||||
|
},
|
||||||
|
)
|
||||||
|
fmt.Println()
|
||||||
|
bold.Printf("Daily Stats %s UTC\n", info.Timestamp.UTC().Format("2006-01-02"))
|
||||||
|
printTable(
|
||||||
|
[]string{"processed", "failed", "error rate"},
|
||||||
|
func(w io.Writer, tmpl string) {
|
||||||
|
var errRate string
|
||||||
|
if info.Processed == 0 {
|
||||||
|
errRate = "N/A"
|
||||||
|
} else {
|
||||||
|
errRate = fmt.Sprintf("%.2f%%", float64(info.Failed)/float64(info.Processed)*100)
|
||||||
|
}
|
||||||
|
fmt.Fprintf(w, tmpl, info.Processed, info.Failed, errRate)
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func queueHistory(cmd *cobra.Command, args []string) {
|
||||||
|
days, err := cmd.Flags().GetInt("days")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("error: Internal error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
inspector := createInspector()
|
||||||
|
for i, qname := range args {
|
||||||
|
if i > 0 {
|
||||||
|
fmt.Printf("\n%s\n\n", separator)
|
||||||
|
}
|
||||||
|
fmt.Printf("Queue: %s\n\n", qname)
|
||||||
|
stats, err := inspector.History(qname, days)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("error: %v\n", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
printDailyStats(stats)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func printDailyStats(stats []*asynq.DailyStats) {
|
||||||
|
printTable(
|
||||||
|
[]string{"date (UTC)", "processed", "failed", "error rate"},
|
||||||
|
func(w io.Writer, tmpl string) {
|
||||||
|
for _, s := range stats {
|
||||||
|
var errRate string
|
||||||
|
if s.Processed == 0 {
|
||||||
|
errRate = "N/A"
|
||||||
|
} else {
|
||||||
|
errRate = fmt.Sprintf("%.2f%%", float64(s.Failed)/float64(s.Processed)*100)
|
||||||
|
}
|
||||||
|
fmt.Fprintf(w, tmpl, s.Date.Format("2006-01-02"), s.Processed, s.Failed, errRate)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func queuePause(cmd *cobra.Command, args []string) {
|
||||||
|
inspector := createInspector()
|
||||||
|
for _, qname := range args {
|
||||||
|
err := inspector.PauseQueue(qname)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fmt.Printf("Successfully paused queue %q\n", qname)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func queueUnpause(cmd *cobra.Command, args []string) {
|
||||||
|
inspector := createInspector()
|
||||||
|
for _, qname := range args {
|
||||||
|
err := inspector.UnpauseQueue(qname)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fmt.Printf("Successfully unpaused queue %q\n", qname)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func queueRemove(cmd *cobra.Command, args []string) {
|
||||||
|
// TODO: Use inspector once RemoveQueue become public API.
|
||||||
|
force, err := cmd.Flags().GetBool("force")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("error: Internal error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
r := createRDB()
|
||||||
|
for _, qname := range args {
|
||||||
|
err = r.RemoveQueue(qname, force)
|
||||||
|
if err != nil {
|
||||||
|
if errors.IsQueueNotEmpty(err) {
|
||||||
|
fmt.Printf("error: %v\nIf you are sure you want to delete it, run 'asynq queue rm --force %s'\n", err, qname)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fmt.Printf("error: %v\n", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fmt.Printf("Successfully removed queue %q\n", qname)
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,54 +0,0 @@
|
|||||||
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
|
||||||
// Use of this source code is governed by a MIT license
|
|
||||||
// that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package cmd
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/go-redis/redis/v7"
|
|
||||||
"github.com/hibiken/asynq/internal/rdb"
|
|
||||||
"github.com/spf13/cobra"
|
|
||||||
"github.com/spf13/viper"
|
|
||||||
)
|
|
||||||
|
|
||||||
// rmqCmd represents the rmq command
|
|
||||||
var rmqCmd = &cobra.Command{
|
|
||||||
Use: "rmq [queue name]",
|
|
||||||
Short: "Removes the specified queue",
|
|
||||||
Long: `Rmq (asynq rmq) will remove the specified queue.
|
|
||||||
By default, it will remove the queue only if it's empty.
|
|
||||||
Use --force option to override this behavior.
|
|
||||||
|
|
||||||
Example: asynq rmq low -> Removes "low" queue`,
|
|
||||||
Args: cobra.ExactValidArgs(1),
|
|
||||||
Run: rmq,
|
|
||||||
}
|
|
||||||
|
|
||||||
var rmqForce bool
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
rootCmd.AddCommand(rmqCmd)
|
|
||||||
rmqCmd.Flags().BoolVarP(&rmqForce, "force", "f", false, "remove the queue regardless of its size")
|
|
||||||
}
|
|
||||||
|
|
||||||
func rmq(cmd *cobra.Command, args []string) {
|
|
||||||
c := redis.NewClient(&redis.Options{
|
|
||||||
Addr: viper.GetString("uri"),
|
|
||||||
DB: viper.GetInt("db"),
|
|
||||||
Password: viper.GetString("password"),
|
|
||||||
})
|
|
||||||
r := rdb.NewRDB(c)
|
|
||||||
err := r.RemoveQueue(args[0], rmqForce)
|
|
||||||
if err != nil {
|
|
||||||
if _, ok := err.(*rdb.ErrQueueNotEmpty); ok {
|
|
||||||
fmt.Printf("error: %v\nIf you are sure you want to delete it, run 'asynq rmq --force %s'\n", err, args[0])
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
fmt.Printf("error: %v", err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
fmt.Printf("Successfully removed queue %q\n", args[0])
|
|
||||||
}
|
|
||||||
@@ -5,12 +5,19 @@
|
|||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"crypto/tls"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
"text/tabwriter"
|
"text/tabwriter"
|
||||||
|
"unicode"
|
||||||
|
"unicode/utf8"
|
||||||
|
|
||||||
|
"github.com/go-redis/redis/v7"
|
||||||
|
"github.com/hibiken/asynq"
|
||||||
|
"github.com/hibiken/asynq/internal/base"
|
||||||
|
"github.com/hibiken/asynq/internal/rdb"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
|
|
||||||
homedir "github.com/mitchellh/go-homedir"
|
homedir "github.com/mitchellh/go-homedir"
|
||||||
@@ -19,16 +26,33 @@ import (
|
|||||||
|
|
||||||
var cfgFile string
|
var cfgFile string
|
||||||
|
|
||||||
// Flags
|
// Global flag variables
|
||||||
var uri string
|
var (
|
||||||
var db int
|
uri string
|
||||||
var password string
|
db int
|
||||||
|
password string
|
||||||
|
|
||||||
|
useRedisCluster bool
|
||||||
|
clusterAddrs string
|
||||||
|
tlsServerName string
|
||||||
|
)
|
||||||
|
|
||||||
// rootCmd represents the base command when called without any subcommands
|
// rootCmd represents the base command when called without any subcommands
|
||||||
var rootCmd = &cobra.Command{
|
var rootCmd = &cobra.Command{
|
||||||
Use: "asynq",
|
Use: "asynq",
|
||||||
Short: "A monitoring tool for asynq queues",
|
Short: "A monitoring tool for asynq queues",
|
||||||
Long: `Asynq is a montoring CLI to inspect tasks and queues managed by asynq.`,
|
Long: `Asynq is a montoring CLI to inspect tasks and queues managed by asynq.`,
|
||||||
|
Version: base.Version,
|
||||||
|
}
|
||||||
|
|
||||||
|
var versionOutput = fmt.Sprintf("asynq version %s\n", base.Version)
|
||||||
|
|
||||||
|
var versionCmd = &cobra.Command{
|
||||||
|
Use: "version",
|
||||||
|
Hidden: true,
|
||||||
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
|
fmt.Print(versionOutput)
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
// Execute adds all child commands to the root command and sets flags appropriately.
|
// Execute adds all child commands to the root command and sets flags appropriately.
|
||||||
@@ -43,13 +67,26 @@ func Execute() {
|
|||||||
func init() {
|
func init() {
|
||||||
cobra.OnInitialize(initConfig)
|
cobra.OnInitialize(initConfig)
|
||||||
|
|
||||||
|
rootCmd.AddCommand(versionCmd)
|
||||||
|
rootCmd.SetVersionTemplate(versionOutput)
|
||||||
|
|
||||||
rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file to set flag defaut values (default is $HOME/.asynq.yaml)")
|
rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file to set flag defaut values (default is $HOME/.asynq.yaml)")
|
||||||
rootCmd.PersistentFlags().StringVarP(&uri, "uri", "u", "127.0.0.1:6379", "redis server URI")
|
rootCmd.PersistentFlags().StringVarP(&uri, "uri", "u", "127.0.0.1:6379", "redis server URI")
|
||||||
rootCmd.PersistentFlags().IntVarP(&db, "db", "n", 0, "redis database number (default is 0)")
|
rootCmd.PersistentFlags().IntVarP(&db, "db", "n", 0, "redis database number (default is 0)")
|
||||||
rootCmd.PersistentFlags().StringVarP(&password, "password", "p", "", "password to use when connecting to redis server")
|
rootCmd.PersistentFlags().StringVarP(&password, "password", "p", "", "password to use when connecting to redis server")
|
||||||
|
rootCmd.PersistentFlags().BoolVar(&useRedisCluster, "cluster", false, "connect to redis cluster")
|
||||||
|
rootCmd.PersistentFlags().StringVar(&clusterAddrs, "cluster_addrs",
|
||||||
|
"127.0.0.1:7000,127.0.0.1:7001,127.0.0.1:7002,127.0.0.1:7003,127.0.0.1:7004,127.0.0.1:7005",
|
||||||
|
"list of comma-separated redis server addresses")
|
||||||
|
rootCmd.PersistentFlags().StringVar(&tlsServerName, "tls_server",
|
||||||
|
"", "server name for TLS validation")
|
||||||
|
// Bind flags with config.
|
||||||
viper.BindPFlag("uri", rootCmd.PersistentFlags().Lookup("uri"))
|
viper.BindPFlag("uri", rootCmd.PersistentFlags().Lookup("uri"))
|
||||||
viper.BindPFlag("db", rootCmd.PersistentFlags().Lookup("db"))
|
viper.BindPFlag("db", rootCmd.PersistentFlags().Lookup("db"))
|
||||||
viper.BindPFlag("password", rootCmd.PersistentFlags().Lookup("password"))
|
viper.BindPFlag("password", rootCmd.PersistentFlags().Lookup("password"))
|
||||||
|
viper.BindPFlag("cluster", rootCmd.PersistentFlags().Lookup("cluster"))
|
||||||
|
viper.BindPFlag("cluster_addrs", rootCmd.PersistentFlags().Lookup("cluster_addrs"))
|
||||||
|
viper.BindPFlag("tls_server", rootCmd.PersistentFlags().Lookup("tls_server"))
|
||||||
}
|
}
|
||||||
|
|
||||||
// initConfig reads in config file and ENV variables if set.
|
// initConfig reads in config file and ENV variables if set.
|
||||||
@@ -78,6 +115,57 @@ func initConfig() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// createRDB creates a RDB instance using flag values and returns it.
|
||||||
|
func createRDB() *rdb.RDB {
|
||||||
|
var c redis.UniversalClient
|
||||||
|
if useRedisCluster {
|
||||||
|
addrs := strings.Split(viper.GetString("cluster_addrs"), ",")
|
||||||
|
c = redis.NewClusterClient(&redis.ClusterOptions{
|
||||||
|
Addrs: addrs,
|
||||||
|
Password: viper.GetString("password"),
|
||||||
|
TLSConfig: getTLSConfig(),
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
c = redis.NewClient(&redis.Options{
|
||||||
|
Addr: viper.GetString("uri"),
|
||||||
|
DB: viper.GetInt("db"),
|
||||||
|
Password: viper.GetString("password"),
|
||||||
|
TLSConfig: getTLSConfig(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return rdb.NewRDB(c)
|
||||||
|
}
|
||||||
|
|
||||||
|
// createRDB creates a Inspector instance using flag values and returns it.
|
||||||
|
func createInspector() *asynq.Inspector {
|
||||||
|
return asynq.NewInspector(getRedisConnOpt())
|
||||||
|
}
|
||||||
|
|
||||||
|
func getRedisConnOpt() asynq.RedisConnOpt {
|
||||||
|
if useRedisCluster {
|
||||||
|
addrs := strings.Split(viper.GetString("cluster_addrs"), ",")
|
||||||
|
return asynq.RedisClusterClientOpt{
|
||||||
|
Addrs: addrs,
|
||||||
|
Password: viper.GetString("password"),
|
||||||
|
TLSConfig: getTLSConfig(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return asynq.RedisClientOpt{
|
||||||
|
Addr: viper.GetString("uri"),
|
||||||
|
DB: viper.GetInt("db"),
|
||||||
|
Password: viper.GetString("password"),
|
||||||
|
TLSConfig: getTLSConfig(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getTLSConfig() *tls.Config {
|
||||||
|
tlsServer := viper.GetString("tls_server")
|
||||||
|
if tlsServer == "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &tls.Config{ServerName: tlsServer}
|
||||||
|
}
|
||||||
|
|
||||||
// printTable is a helper function to print data in table format.
|
// printTable is a helper function to print data in table format.
|
||||||
//
|
//
|
||||||
// cols is a list of headers and printRow specifies how to print rows.
|
// cols is a list of headers and printRow specifies how to print rows.
|
||||||
@@ -110,3 +198,28 @@ func printTable(cols []string, printRows func(w io.Writer, tmpl string)) {
|
|||||||
printRows(tw, format)
|
printRows(tw, format)
|
||||||
tw.Flush()
|
tw.Flush()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// formatPayload returns string representation of payload if data is printable.
|
||||||
|
// If data is not printable, it returns a string describing payload is not printable.
|
||||||
|
func formatPayload(payload []byte) string {
|
||||||
|
if !isPrintable(payload) {
|
||||||
|
return "non-printable bytes"
|
||||||
|
}
|
||||||
|
return string(payload)
|
||||||
|
}
|
||||||
|
|
||||||
|
func isPrintable(data []byte) bool {
|
||||||
|
if !utf8.Valid(data) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
isAllSpace := true
|
||||||
|
for _, r := range string(data) {
|
||||||
|
if !unicode.IsPrint(r) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if !unicode.IsSpace(r) {
|
||||||
|
isAllSpace = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return !isAllSpace
|
||||||
|
}
|
||||||
|
|||||||
@@ -12,42 +12,39 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-redis/redis/v7"
|
|
||||||
"github.com/hibiken/asynq/internal/rdb"
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"github.com/spf13/viper"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// serversCmd represents the servers command
|
func init() {
|
||||||
var serversCmd = &cobra.Command{
|
rootCmd.AddCommand(serverCmd)
|
||||||
Use: "servers",
|
serverCmd.AddCommand(serverListCmd)
|
||||||
Short: "Shows all running worker servers",
|
}
|
||||||
Long: `Servers (asynq servers) will show all running worker servers
|
|
||||||
pulling tasks from the specified redis instance.
|
var serverCmd = &cobra.Command{
|
||||||
|
Use: "server",
|
||||||
|
Short: "Manage servers",
|
||||||
|
}
|
||||||
|
|
||||||
|
var serverListCmd = &cobra.Command{
|
||||||
|
Use: "ls",
|
||||||
|
Short: "List servers",
|
||||||
|
Long: `Server list (asynq server ls) shows all running worker servers
|
||||||
|
pulling tasks from the given redis instance.
|
||||||
|
|
||||||
The command shows the following for each server:
|
The command shows the following for each server:
|
||||||
* Host and PID of the process in which the server is running
|
* Host and PID of the process in which the server is running
|
||||||
* Number of active workers out of worker pool
|
* Number of active workers out of worker pool
|
||||||
* Queue configuration
|
* Queue configuration
|
||||||
* State of the worker server ("running" | "quiet")
|
* State of the worker server ("active" | "stopped")
|
||||||
* Time the server was started
|
* Time the server was started
|
||||||
|
|
||||||
A "running" server is pulling tasks from queues and processing them.
|
A "active" server is pulling tasks from queues and processing them.
|
||||||
A "quiet" server is no longer pulling new tasks from queues`,
|
A "stopped" server is no longer pulling new tasks from queues`,
|
||||||
Args: cobra.NoArgs,
|
Run: serverList,
|
||||||
Run: servers,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func serverList(cmd *cobra.Command, args []string) {
|
||||||
rootCmd.AddCommand(serversCmd)
|
r := createRDB()
|
||||||
}
|
|
||||||
|
|
||||||
func servers(cmd *cobra.Command, args []string) {
|
|
||||||
r := rdb.NewRDB(redis.NewClient(&redis.Options{
|
|
||||||
Addr: viper.GetString("uri"),
|
|
||||||
DB: viper.GetInt("db"),
|
|
||||||
Password: viper.GetString("password"),
|
|
||||||
}))
|
|
||||||
|
|
||||||
servers, err := r.ListServers()
|
servers, err := r.ListServers()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -81,12 +78,6 @@ func servers(cmd *cobra.Command, args []string) {
|
|||||||
printTable(cols, printRows)
|
printTable(cols, printRows)
|
||||||
}
|
}
|
||||||
|
|
||||||
// timeAgo takes a time and returns a string of the format "<duration> ago".
|
|
||||||
func timeAgo(since time.Time) string {
|
|
||||||
d := time.Since(since).Round(time.Second)
|
|
||||||
return fmt.Sprintf("%v ago", d)
|
|
||||||
}
|
|
||||||
|
|
||||||
func formatQueues(qmap map[string]int) string {
|
func formatQueues(qmap map[string]int) string {
|
||||||
// sort queues by priority and name
|
// sort queues by priority and name
|
||||||
type queue struct {
|
type queue struct {
|
||||||
@@ -116,3 +107,9 @@ func formatQueues(qmap map[string]int) string {
|
|||||||
}
|
}
|
||||||
return b.String()
|
return b.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// timeAgo takes a time and returns a string of the format "<duration> ago".
|
||||||
|
func timeAgo(since time.Time) string {
|
||||||
|
d := time.Since(since).Round(time.Second)
|
||||||
|
return fmt.Sprintf("%v ago", d)
|
||||||
|
}
|
||||||
@@ -6,23 +6,23 @@ package cmd
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"sort"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"text/tabwriter"
|
"text/tabwriter"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/go-redis/redis/v7"
|
"github.com/fatih/color"
|
||||||
"github.com/hibiken/asynq/internal/rdb"
|
"github.com/hibiken/asynq/internal/rdb"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"github.com/spf13/viper"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// statsCmd represents the stats command
|
// statsCmd represents the stats command
|
||||||
var statsCmd = &cobra.Command{
|
var statsCmd = &cobra.Command{
|
||||||
Use: "stats",
|
Use: "stats",
|
||||||
Short: "Shows current state of the tasks and queues",
|
Short: "Shows current state of the tasks and queues",
|
||||||
Long: `Stats (aysnqmon stats) will show the overview of tasks and queues at that instant.
|
Long: `Stats (aysnq stats) will show the overview of tasks and queues at that instant.
|
||||||
|
|
||||||
Specifically, the command shows the following:
|
Specifically, the command shows the following:
|
||||||
* Number of tasks in each state
|
* Number of tasks in each state
|
||||||
@@ -52,72 +52,115 @@ func init() {
|
|||||||
// statsCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
|
// statsCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type AggregateStats struct {
|
||||||
|
Active int
|
||||||
|
Pending int
|
||||||
|
Scheduled int
|
||||||
|
Retry int
|
||||||
|
Archived int
|
||||||
|
Processed int
|
||||||
|
Failed int
|
||||||
|
Timestamp time.Time
|
||||||
|
}
|
||||||
|
|
||||||
func stats(cmd *cobra.Command, args []string) {
|
func stats(cmd *cobra.Command, args []string) {
|
||||||
c := redis.NewClient(&redis.Options{
|
r := createRDB()
|
||||||
Addr: viper.GetString("uri"),
|
|
||||||
DB: viper.GetInt("db"),
|
|
||||||
Password: viper.GetString("password"),
|
|
||||||
})
|
|
||||||
r := rdb.NewRDB(c)
|
|
||||||
|
|
||||||
stats, err := r.CurrentStats()
|
queues, err := r.AllQueues()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println(err)
|
fmt.Println(err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
info, err := r.RedisInfo()
|
|
||||||
|
var aggStats AggregateStats
|
||||||
|
var stats []*rdb.Stats
|
||||||
|
for _, qname := range queues {
|
||||||
|
s, err := r.CurrentStats(qname)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println(err)
|
fmt.Println(err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
fmt.Println("STATES")
|
aggStats.Active += s.Active
|
||||||
printStates(stats)
|
aggStats.Pending += s.Pending
|
||||||
|
aggStats.Scheduled += s.Scheduled
|
||||||
|
aggStats.Retry += s.Retry
|
||||||
|
aggStats.Archived += s.Archived
|
||||||
|
aggStats.Processed += s.Processed
|
||||||
|
aggStats.Failed += s.Failed
|
||||||
|
aggStats.Timestamp = s.Timestamp
|
||||||
|
stats = append(stats, s)
|
||||||
|
}
|
||||||
|
var info map[string]string
|
||||||
|
if useRedisCluster {
|
||||||
|
info, err = r.RedisClusterInfo()
|
||||||
|
} else {
|
||||||
|
info, err = r.RedisInfo()
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
bold := color.New(color.Bold)
|
||||||
|
bold.Println("Task Count by State")
|
||||||
|
printStatsByState(&aggStats)
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
|
|
||||||
fmt.Println("QUEUES")
|
bold.Println("Task Count by Queue")
|
||||||
printQueues(stats.Queues)
|
printStatsByQueue(stats)
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
|
|
||||||
fmt.Printf("STATS FOR %s UTC\n", stats.Timestamp.UTC().Format("2006-01-02"))
|
bold.Printf("Daily Stats %s UTC\n", aggStats.Timestamp.UTC().Format("2006-01-02"))
|
||||||
printStats(stats)
|
printSuccessFailureStats(&aggStats)
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
|
|
||||||
fmt.Println("REDIS INFO")
|
if useRedisCluster {
|
||||||
|
bold.Println("Redis Cluster Info")
|
||||||
|
printClusterInfo(info)
|
||||||
|
} else {
|
||||||
|
bold.Println("Redis Info")
|
||||||
printInfo(info)
|
printInfo(info)
|
||||||
|
}
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
}
|
}
|
||||||
|
|
||||||
func printStates(s *rdb.Stats) {
|
func printStatsByState(s *AggregateStats) {
|
||||||
format := strings.Repeat("%v\t", 5) + "\n"
|
format := strings.Repeat("%v\t", 5) + "\n"
|
||||||
tw := new(tabwriter.Writer).Init(os.Stdout, 0, 8, 2, ' ', 0)
|
tw := new(tabwriter.Writer).Init(os.Stdout, 0, 8, 2, ' ', 0)
|
||||||
fmt.Fprintf(tw, format, "InProgress", "Enqueued", "Scheduled", "Retry", "Dead")
|
fmt.Fprintf(tw, format, "active", "pending", "scheduled", "retry", "archived")
|
||||||
fmt.Fprintf(tw, format, "----------", "--------", "---------", "-----", "----")
|
fmt.Fprintf(tw, format, "----------", "--------", "---------", "-----", "----")
|
||||||
fmt.Fprintf(tw, format, s.InProgress, s.Enqueued, s.Scheduled, s.Retry, s.Dead)
|
fmt.Fprintf(tw, format, s.Active, s.Pending, s.Scheduled, s.Retry, s.Archived)
|
||||||
tw.Flush()
|
tw.Flush()
|
||||||
}
|
}
|
||||||
|
|
||||||
func printQueues(queues map[string]int) {
|
func printStatsByQueue(stats []*rdb.Stats) {
|
||||||
var qnames, seps, counts []string
|
var headers, seps, counts []string
|
||||||
for q := range queues {
|
for _, s := range stats {
|
||||||
qnames = append(qnames, strings.Title(q))
|
title := queueTitle(s)
|
||||||
|
headers = append(headers, title)
|
||||||
|
seps = append(seps, strings.Repeat("-", len(title)))
|
||||||
|
counts = append(counts, strconv.Itoa(s.Size))
|
||||||
}
|
}
|
||||||
sort.Strings(qnames) // sort for stable order
|
format := strings.Repeat("%v\t", len(headers)) + "\n"
|
||||||
for _, q := range qnames {
|
|
||||||
seps = append(seps, strings.Repeat("-", len(q)))
|
|
||||||
counts = append(counts, strconv.Itoa(queues[strings.ToLower(q)]))
|
|
||||||
}
|
|
||||||
format := strings.Repeat("%v\t", len(qnames)) + "\n"
|
|
||||||
tw := new(tabwriter.Writer).Init(os.Stdout, 0, 8, 2, ' ', 0)
|
tw := new(tabwriter.Writer).Init(os.Stdout, 0, 8, 2, ' ', 0)
|
||||||
fmt.Fprintf(tw, format, toInterfaceSlice(qnames)...)
|
fmt.Fprintf(tw, format, toInterfaceSlice(headers)...)
|
||||||
fmt.Fprintf(tw, format, toInterfaceSlice(seps)...)
|
fmt.Fprintf(tw, format, toInterfaceSlice(seps)...)
|
||||||
fmt.Fprintf(tw, format, toInterfaceSlice(counts)...)
|
fmt.Fprintf(tw, format, toInterfaceSlice(counts)...)
|
||||||
tw.Flush()
|
tw.Flush()
|
||||||
}
|
}
|
||||||
|
|
||||||
func printStats(s *rdb.Stats) {
|
func queueTitle(s *rdb.Stats) string {
|
||||||
|
var b strings.Builder
|
||||||
|
b.WriteString(s.Queue)
|
||||||
|
if s.Paused {
|
||||||
|
b.WriteString(" (paused)")
|
||||||
|
}
|
||||||
|
return b.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func printSuccessFailureStats(s *AggregateStats) {
|
||||||
format := strings.Repeat("%v\t", 3) + "\n"
|
format := strings.Repeat("%v\t", 3) + "\n"
|
||||||
tw := new(tabwriter.Writer).Init(os.Stdout, 0, 8, 2, ' ', 0)
|
tw := new(tabwriter.Writer).Init(os.Stdout, 0, 8, 2, ' ', 0)
|
||||||
fmt.Fprintf(tw, format, "Processed", "Failed", "Error Rate")
|
fmt.Fprintf(tw, format, "processed", "failed", "error rate")
|
||||||
fmt.Fprintf(tw, format, "---------", "------", "----------")
|
fmt.Fprintf(tw, format, "---------", "------", "----------")
|
||||||
var errrate string
|
var errrate string
|
||||||
if s.Processed == 0 {
|
if s.Processed == 0 {
|
||||||
@@ -132,7 +175,7 @@ func printStats(s *rdb.Stats) {
|
|||||||
func printInfo(info map[string]string) {
|
func printInfo(info map[string]string) {
|
||||||
format := strings.Repeat("%v\t", 5) + "\n"
|
format := strings.Repeat("%v\t", 5) + "\n"
|
||||||
tw := new(tabwriter.Writer).Init(os.Stdout, 0, 8, 2, ' ', 0)
|
tw := new(tabwriter.Writer).Init(os.Stdout, 0, 8, 2, ' ', 0)
|
||||||
fmt.Fprintf(tw, format, "Version", "Uptime", "Connections", "Memory Usage", "Peak Memory Usage")
|
fmt.Fprintf(tw, format, "version", "uptime", "connections", "memory usage", "peak memory usage")
|
||||||
fmt.Fprintf(tw, format, "-------", "------", "-----------", "------------", "-----------------")
|
fmt.Fprintf(tw, format, "-------", "------", "-----------", "------------", "-----------------")
|
||||||
fmt.Fprintf(tw, format,
|
fmt.Fprintf(tw, format,
|
||||||
info["redis_version"],
|
info["redis_version"],
|
||||||
@@ -144,6 +187,19 @@ func printInfo(info map[string]string) {
|
|||||||
tw.Flush()
|
tw.Flush()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func printClusterInfo(info map[string]string) {
|
||||||
|
printTable(
|
||||||
|
[]string{"State", "Known Nodes", "Cluster Size"},
|
||||||
|
func(w io.Writer, tmpl string) {
|
||||||
|
fmt.Fprintf(w, tmpl,
|
||||||
|
strings.ToUpper(info["cluster_state"]),
|
||||||
|
info["cluster_known_nodes"],
|
||||||
|
info["cluster_size"],
|
||||||
|
)
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
func toInterfaceSlice(strs []string) []interface{} {
|
func toInterfaceSlice(strs []string) []interface{} {
|
||||||
var res []interface{}
|
var res []interface{}
|
||||||
for _, s := range strs {
|
for _, s := range strs {
|
||||||
|
|||||||
540
tools/asynq/cmd/task.go
Normal file
540
tools/asynq/cmd/task.go
Normal file
@@ -0,0 +1,540 @@
|
|||||||
|
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license
|
||||||
|
// that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package cmd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/fatih/color"
|
||||||
|
"github.com/hibiken/asynq"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
rootCmd.AddCommand(taskCmd)
|
||||||
|
taskCmd.AddCommand(taskListCmd)
|
||||||
|
taskListCmd.Flags().StringP("queue", "q", "", "queue to inspect")
|
||||||
|
taskListCmd.Flags().StringP("state", "s", "", "state of the tasks to inspect")
|
||||||
|
taskListCmd.Flags().Int("page", 1, "page number")
|
||||||
|
taskListCmd.Flags().Int("size", 30, "page size")
|
||||||
|
taskListCmd.MarkFlagRequired("queue")
|
||||||
|
taskListCmd.MarkFlagRequired("state")
|
||||||
|
|
||||||
|
taskCmd.AddCommand(taskCancelCmd)
|
||||||
|
|
||||||
|
taskCmd.AddCommand(taskInspectCmd)
|
||||||
|
taskInspectCmd.Flags().StringP("queue", "q", "", "queue to which the task belongs")
|
||||||
|
taskInspectCmd.Flags().StringP("id", "i", "", "id of the task")
|
||||||
|
taskInspectCmd.MarkFlagRequired("queue")
|
||||||
|
taskInspectCmd.MarkFlagRequired("id")
|
||||||
|
|
||||||
|
taskCmd.AddCommand(taskArchiveCmd)
|
||||||
|
taskArchiveCmd.Flags().StringP("queue", "q", "", "queue to which the task belongs")
|
||||||
|
taskArchiveCmd.Flags().StringP("id", "i", "", "id of the task")
|
||||||
|
taskArchiveCmd.MarkFlagRequired("queue")
|
||||||
|
taskArchiveCmd.MarkFlagRequired("id")
|
||||||
|
|
||||||
|
taskCmd.AddCommand(taskDeleteCmd)
|
||||||
|
taskDeleteCmd.Flags().StringP("queue", "q", "", "queue to which the task belongs")
|
||||||
|
taskDeleteCmd.Flags().StringP("id", "i", "", "id of the task")
|
||||||
|
taskDeleteCmd.MarkFlagRequired("queue")
|
||||||
|
taskDeleteCmd.MarkFlagRequired("id")
|
||||||
|
|
||||||
|
taskCmd.AddCommand(taskRunCmd)
|
||||||
|
taskRunCmd.Flags().StringP("queue", "q", "", "queue to which the task belongs")
|
||||||
|
taskRunCmd.Flags().StringP("id", "i", "", "id of the task")
|
||||||
|
taskRunCmd.MarkFlagRequired("queue")
|
||||||
|
taskRunCmd.MarkFlagRequired("id")
|
||||||
|
|
||||||
|
taskCmd.AddCommand(taskArchiveAllCmd)
|
||||||
|
taskArchiveAllCmd.Flags().StringP("queue", "q", "", "queue to which the tasks belong")
|
||||||
|
taskArchiveAllCmd.Flags().StringP("state", "s", "", "state of the tasks")
|
||||||
|
taskArchiveAllCmd.MarkFlagRequired("queue")
|
||||||
|
taskArchiveAllCmd.MarkFlagRequired("state")
|
||||||
|
|
||||||
|
taskCmd.AddCommand(taskDeleteAllCmd)
|
||||||
|
taskDeleteAllCmd.Flags().StringP("queue", "q", "", "queue to which the tasks belong")
|
||||||
|
taskDeleteAllCmd.Flags().StringP("state", "s", "", "state of the tasks")
|
||||||
|
taskDeleteAllCmd.MarkFlagRequired("queue")
|
||||||
|
taskDeleteAllCmd.MarkFlagRequired("state")
|
||||||
|
|
||||||
|
taskCmd.AddCommand(taskRunAllCmd)
|
||||||
|
taskRunAllCmd.Flags().StringP("queue", "q", "", "queue to which the tasks belong")
|
||||||
|
taskRunAllCmd.Flags().StringP("state", "s", "", "state of the tasks")
|
||||||
|
taskRunAllCmd.MarkFlagRequired("queue")
|
||||||
|
taskRunAllCmd.MarkFlagRequired("state")
|
||||||
|
}
|
||||||
|
|
||||||
|
var taskCmd = &cobra.Command{
|
||||||
|
Use: "task",
|
||||||
|
Short: "Manage tasks",
|
||||||
|
}
|
||||||
|
|
||||||
|
var taskListCmd = &cobra.Command{
|
||||||
|
Use: "ls --queue=QUEUE --state=STATE",
|
||||||
|
Short: "List tasks",
|
||||||
|
Long: `List tasks of the given state from the specified queue.
|
||||||
|
|
||||||
|
The value for the state flag should be one of:
|
||||||
|
- active
|
||||||
|
- pending
|
||||||
|
- scheduled
|
||||||
|
- retry
|
||||||
|
- archived
|
||||||
|
|
||||||
|
List opeartion paginates the result set.
|
||||||
|
By default, the command fetches the first 30 tasks.
|
||||||
|
Use --page and --size flags to specify the page number and size.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
To list pending tasks from "default" queue, run
|
||||||
|
asynq task ls --queue=default --state=pending
|
||||||
|
|
||||||
|
To list the tasks from the second page, run
|
||||||
|
asynq task ls --queue=default --state=pending --page=1`,
|
||||||
|
Run: taskList,
|
||||||
|
}
|
||||||
|
|
||||||
|
var taskInspectCmd = &cobra.Command{
|
||||||
|
Use: "inspect --queue=QUEUE --id=TASK_ID",
|
||||||
|
Short: "Display detailed information on the specified task",
|
||||||
|
Args: cobra.NoArgs,
|
||||||
|
Run: taskInspect,
|
||||||
|
}
|
||||||
|
|
||||||
|
var taskCancelCmd = &cobra.Command{
|
||||||
|
Use: "cancel TASK_ID [TASK_ID...]",
|
||||||
|
Short: "Cancel one or more active tasks",
|
||||||
|
Args: cobra.MinimumNArgs(1),
|
||||||
|
Run: taskCancel,
|
||||||
|
}
|
||||||
|
|
||||||
|
var taskArchiveCmd = &cobra.Command{
|
||||||
|
Use: "archive --queue=QUEUE --id=TASK_ID",
|
||||||
|
Short: "Archive a task with the given id",
|
||||||
|
Args: cobra.NoArgs,
|
||||||
|
Run: taskArchive,
|
||||||
|
}
|
||||||
|
|
||||||
|
var taskDeleteCmd = &cobra.Command{
|
||||||
|
Use: "delete --queue=QUEUE --id=TASK_ID",
|
||||||
|
Short: "Delete a task with the given id",
|
||||||
|
Args: cobra.NoArgs,
|
||||||
|
Run: taskDelete,
|
||||||
|
}
|
||||||
|
|
||||||
|
var taskRunCmd = &cobra.Command{
|
||||||
|
Use: "run --queue=QUEUE --id=TASK_ID",
|
||||||
|
Short: "Run a task with the given id",
|
||||||
|
Args: cobra.NoArgs,
|
||||||
|
Run: taskRun,
|
||||||
|
}
|
||||||
|
|
||||||
|
var taskArchiveAllCmd = &cobra.Command{
|
||||||
|
Use: "archiveall --queue=QUEUE --state=STATE",
|
||||||
|
Short: "Archive all tasks in the given state",
|
||||||
|
Args: cobra.NoArgs,
|
||||||
|
Run: taskArchiveAll,
|
||||||
|
}
|
||||||
|
|
||||||
|
var taskDeleteAllCmd = &cobra.Command{
|
||||||
|
Use: "deleteall --queue=QUEUE --state=STATE",
|
||||||
|
Short: "Delete all tasks in the given state",
|
||||||
|
Args: cobra.NoArgs,
|
||||||
|
Run: taskDeleteAll,
|
||||||
|
}
|
||||||
|
|
||||||
|
var taskRunAllCmd = &cobra.Command{
|
||||||
|
Use: "runall --queue=QUEUE --state=STATE",
|
||||||
|
Short: "Run all tasks in the given state",
|
||||||
|
Args: cobra.NoArgs,
|
||||||
|
Run: taskRunAll,
|
||||||
|
}
|
||||||
|
|
||||||
|
func taskList(cmd *cobra.Command, args []string) {
|
||||||
|
qname, err := cmd.Flags().GetString("queue")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
state, err := cmd.Flags().GetString("state")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
pageNum, err := cmd.Flags().GetInt("page")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
pageSize, err := cmd.Flags().GetInt("size")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch state {
|
||||||
|
case "active":
|
||||||
|
listActiveTasks(qname, pageNum, pageSize)
|
||||||
|
case "pending":
|
||||||
|
listPendingTasks(qname, pageNum, pageSize)
|
||||||
|
case "scheduled":
|
||||||
|
listScheduledTasks(qname, pageNum, pageSize)
|
||||||
|
case "retry":
|
||||||
|
listRetryTasks(qname, pageNum, pageSize)
|
||||||
|
case "archived":
|
||||||
|
listArchivedTasks(qname, pageNum, pageSize)
|
||||||
|
default:
|
||||||
|
fmt.Printf("error: state=%q is not supported\n", state)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func listActiveTasks(qname string, pageNum, pageSize int) {
|
||||||
|
i := createInspector()
|
||||||
|
tasks, err := i.ListActiveTasks(qname, asynq.PageSize(pageSize), asynq.Page(pageNum))
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
if len(tasks) == 0 {
|
||||||
|
fmt.Printf("No active tasks in %q queue\n", qname)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
printTable(
|
||||||
|
[]string{"ID", "Type", "Payload"},
|
||||||
|
func(w io.Writer, tmpl string) {
|
||||||
|
for _, t := range tasks {
|
||||||
|
fmt.Fprintf(w, tmpl, t.ID, t.Type, formatPayload(t.Payload))
|
||||||
|
}
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func listPendingTasks(qname string, pageNum, pageSize int) {
|
||||||
|
i := createInspector()
|
||||||
|
tasks, err := i.ListPendingTasks(qname, asynq.PageSize(pageSize), asynq.Page(pageNum))
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
if len(tasks) == 0 {
|
||||||
|
fmt.Printf("No pending tasks in %q queue\n", qname)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
printTable(
|
||||||
|
[]string{"ID", "Type", "Payload"},
|
||||||
|
func(w io.Writer, tmpl string) {
|
||||||
|
for _, t := range tasks {
|
||||||
|
fmt.Fprintf(w, tmpl, t.ID, t.Type, formatPayload(t.Payload))
|
||||||
|
}
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func listScheduledTasks(qname string, pageNum, pageSize int) {
|
||||||
|
i := createInspector()
|
||||||
|
tasks, err := i.ListScheduledTasks(qname, asynq.PageSize(pageSize), asynq.Page(pageNum))
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
if len(tasks) == 0 {
|
||||||
|
fmt.Printf("No scheduled tasks in %q queue\n", qname)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
printTable(
|
||||||
|
[]string{"ID", "Type", "Payload", "Process In"},
|
||||||
|
func(w io.Writer, tmpl string) {
|
||||||
|
for _, t := range tasks {
|
||||||
|
fmt.Fprintf(w, tmpl, t.ID, t.Type, formatPayload(t.Payload), formatProcessAt(t.NextProcessAt))
|
||||||
|
}
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// formatProcessAt formats next process at time to human friendly string.
|
||||||
|
// If processAt time is in the past, returns "right now".
|
||||||
|
// If processAt time is in the future, returns "in xxx" where xxx is the duration from now.
|
||||||
|
func formatProcessAt(processAt time.Time) string {
|
||||||
|
d := processAt.Sub(time.Now())
|
||||||
|
if d < 0 {
|
||||||
|
return "right now"
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("in %v", d.Round(time.Second))
|
||||||
|
}
|
||||||
|
|
||||||
|
func listRetryTasks(qname string, pageNum, pageSize int) {
|
||||||
|
i := createInspector()
|
||||||
|
tasks, err := i.ListRetryTasks(qname, asynq.PageSize(pageSize), asynq.Page(pageNum))
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
if len(tasks) == 0 {
|
||||||
|
fmt.Printf("No retry tasks in %q queue\n", qname)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
printTable(
|
||||||
|
[]string{"ID", "Type", "Payload", "Next Retry", "Last Error", "Last Failed", "Retried", "Max Retry"},
|
||||||
|
func(w io.Writer, tmpl string) {
|
||||||
|
for _, t := range tasks {
|
||||||
|
fmt.Fprintf(w, tmpl, t.ID, t.Type, formatPayload(t.Payload), formatProcessAt(t.NextProcessAt),
|
||||||
|
t.LastErr, formatLastFailedAt(t.LastFailedAt), t.Retried, t.MaxRetry)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func listArchivedTasks(qname string, pageNum, pageSize int) {
|
||||||
|
i := createInspector()
|
||||||
|
tasks, err := i.ListArchivedTasks(qname, asynq.PageSize(pageSize), asynq.Page(pageNum))
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
if len(tasks) == 0 {
|
||||||
|
fmt.Printf("No archived tasks in %q queue\n", qname)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
printTable(
|
||||||
|
[]string{"ID", "Type", "Payload", "Last Failed", "Last Error"},
|
||||||
|
func(w io.Writer, tmpl string) {
|
||||||
|
for _, t := range tasks {
|
||||||
|
fmt.Fprintf(w, tmpl, t.ID, t.Type, formatPayload(t.Payload), formatLastFailedAt(t.LastFailedAt), t.LastErr)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func taskCancel(cmd *cobra.Command, args []string) {
|
||||||
|
i := createInspector()
|
||||||
|
for _, id := range args {
|
||||||
|
if err := i.CancelProcessing(id); err != nil {
|
||||||
|
fmt.Printf("error: could not send cancelation signal: %v\n", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fmt.Printf("Sent cancelation signal for task %s\n", id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func taskInspect(cmd *cobra.Command, args []string) {
|
||||||
|
qname, err := cmd.Flags().GetString("queue")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
id, err := cmd.Flags().GetString("id")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
i := createInspector()
|
||||||
|
info, err := i.GetTaskInfo(qname, id)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
printTaskInfo(info)
|
||||||
|
}
|
||||||
|
|
||||||
|
func printTaskInfo(info *asynq.TaskInfo) {
|
||||||
|
bold := color.New(color.Bold)
|
||||||
|
bold.Println("Task Info")
|
||||||
|
fmt.Printf("Queue: %s\n", info.Queue)
|
||||||
|
fmt.Printf("ID: %s\n", info.ID)
|
||||||
|
fmt.Printf("Type: %s\n", info.Type)
|
||||||
|
fmt.Printf("State: %v\n", info.State)
|
||||||
|
fmt.Printf("Retried: %d/%d\n", info.Retried, info.MaxRetry)
|
||||||
|
fmt.Println()
|
||||||
|
fmt.Printf("Next process time: %s\n", formatNextProcessAt(info.NextProcessAt))
|
||||||
|
if len(info.LastErr) != 0 {
|
||||||
|
fmt.Println()
|
||||||
|
bold.Println("Last Failure")
|
||||||
|
fmt.Printf("Failed at: %s\n", formatLastFailedAt(info.LastFailedAt))
|
||||||
|
fmt.Printf("Error message: %s\n", info.LastErr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func formatNextProcessAt(processAt time.Time) string {
|
||||||
|
if processAt.IsZero() || processAt.Unix() == 0 {
|
||||||
|
return "n/a"
|
||||||
|
}
|
||||||
|
if processAt.Before(time.Now()) {
|
||||||
|
return "now"
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%s (in %v)", processAt.Format(time.UnixDate), processAt.Sub(time.Now()).Round(time.Second))
|
||||||
|
}
|
||||||
|
|
||||||
|
func formatLastFailedAt(lastFailedAt time.Time) string {
|
||||||
|
if lastFailedAt.IsZero() || lastFailedAt.Unix() == 0 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return lastFailedAt.Format(time.UnixDate)
|
||||||
|
}
|
||||||
|
|
||||||
|
func taskArchive(cmd *cobra.Command, args []string) {
|
||||||
|
qname, err := cmd.Flags().GetString("queue")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
id, err := cmd.Flags().GetString("id")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
i := createInspector()
|
||||||
|
err = i.ArchiveTask(qname, id)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
fmt.Println("task archived")
|
||||||
|
}
|
||||||
|
|
||||||
|
func taskDelete(cmd *cobra.Command, args []string) {
|
||||||
|
qname, err := cmd.Flags().GetString("queue")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
id, err := cmd.Flags().GetString("id")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
i := createInspector()
|
||||||
|
err = i.DeleteTask(qname, id)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
fmt.Println("task deleted")
|
||||||
|
}
|
||||||
|
|
||||||
|
func taskRun(cmd *cobra.Command, args []string) {
|
||||||
|
qname, err := cmd.Flags().GetString("queue")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
id, err := cmd.Flags().GetString("id")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
i := createInspector()
|
||||||
|
err = i.RunTask(qname, id)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
fmt.Println("task is now pending")
|
||||||
|
}
|
||||||
|
|
||||||
|
func taskArchiveAll(cmd *cobra.Command, args []string) {
|
||||||
|
qname, err := cmd.Flags().GetString("queue")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
state, err := cmd.Flags().GetString("state")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
i := createInspector()
|
||||||
|
var n int
|
||||||
|
switch state {
|
||||||
|
case "pending":
|
||||||
|
n, err = i.ArchiveAllPendingTasks(qname)
|
||||||
|
case "scheduled":
|
||||||
|
n, err = i.ArchiveAllScheduledTasks(qname)
|
||||||
|
case "retry":
|
||||||
|
n, err = i.ArchiveAllRetryTasks(qname)
|
||||||
|
default:
|
||||||
|
fmt.Printf("error: unsupported state %q\n", state)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
fmt.Printf("%d tasks archived\n", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
func taskDeleteAll(cmd *cobra.Command, args []string) {
|
||||||
|
qname, err := cmd.Flags().GetString("queue")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
state, err := cmd.Flags().GetString("state")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
i := createInspector()
|
||||||
|
var n int
|
||||||
|
switch state {
|
||||||
|
case "pending":
|
||||||
|
n, err = i.DeleteAllPendingTasks(qname)
|
||||||
|
case "scheduled":
|
||||||
|
n, err = i.DeleteAllScheduledTasks(qname)
|
||||||
|
case "retry":
|
||||||
|
n, err = i.DeleteAllRetryTasks(qname)
|
||||||
|
case "archived":
|
||||||
|
n, err = i.DeleteAllArchivedTasks(qname)
|
||||||
|
default:
|
||||||
|
fmt.Printf("error: unsupported state %q\n", state)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
fmt.Printf("%d tasks deleted\n", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
func taskRunAll(cmd *cobra.Command, args []string) {
|
||||||
|
qname, err := cmd.Flags().GetString("queue")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
state, err := cmd.Flags().GetString("state")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
i := createInspector()
|
||||||
|
var n int
|
||||||
|
switch state {
|
||||||
|
case "scheduled":
|
||||||
|
n, err = i.RunAllScheduledTasks(qname)
|
||||||
|
case "retry":
|
||||||
|
n, err = i.RunAllRetryTasks(qname)
|
||||||
|
case "archived":
|
||||||
|
n, err = i.RunAllArchivedTasks(qname)
|
||||||
|
default:
|
||||||
|
fmt.Printf("error: unsupported state %q\n", state)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
fmt.Printf("%d tasks are now pending\n", n)
|
||||||
|
}
|
||||||
@@ -1,75 +0,0 @@
|
|||||||
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
|
||||||
// Use of this source code is governed by a MIT license
|
|
||||||
// that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package cmd
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"sort"
|
|
||||||
|
|
||||||
"github.com/go-redis/redis/v7"
|
|
||||||
"github.com/hibiken/asynq/internal/rdb"
|
|
||||||
"github.com/spf13/cobra"
|
|
||||||
"github.com/spf13/viper"
|
|
||||||
)
|
|
||||||
|
|
||||||
// workersCmd represents the workers command
|
|
||||||
var workersCmd = &cobra.Command{
|
|
||||||
Use: "workers",
|
|
||||||
Short: "Shows all running workers information",
|
|
||||||
Long: `Workers (asynq workers) will show all running workers information.
|
|
||||||
|
|
||||||
The command shows the following for each worker:
|
|
||||||
* Process in which the worker is running
|
|
||||||
* ID of the task worker is processing
|
|
||||||
* Type of the task worker is processing
|
|
||||||
* Payload of the task worker is processing
|
|
||||||
* Queue that the task was pulled from.
|
|
||||||
* Time the worker started processing the task`,
|
|
||||||
Args: cobra.NoArgs,
|
|
||||||
Run: workers,
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
rootCmd.AddCommand(workersCmd)
|
|
||||||
}
|
|
||||||
|
|
||||||
func workers(cmd *cobra.Command, args []string) {
|
|
||||||
r := rdb.NewRDB(redis.NewClient(&redis.Options{
|
|
||||||
Addr: viper.GetString("uri"),
|
|
||||||
DB: viper.GetInt("db"),
|
|
||||||
Password: viper.GetString("password"),
|
|
||||||
}))
|
|
||||||
|
|
||||||
workers, err := r.ListWorkers()
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(workers) == 0 {
|
|
||||||
fmt.Println("No workers")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// sort by started timestamp or ID.
|
|
||||||
sort.Slice(workers, func(i, j int) bool {
|
|
||||||
x, y := workers[i], workers[j]
|
|
||||||
if x.Started != y.Started {
|
|
||||||
return x.Started.Before(y.Started)
|
|
||||||
}
|
|
||||||
return x.ID.String() < y.ID.String()
|
|
||||||
})
|
|
||||||
|
|
||||||
cols := []string{"Process", "ID", "Type", "Payload", "Queue", "Started"}
|
|
||||||
printRows := func(w io.Writer, tmpl string) {
|
|
||||||
for _, wk := range workers {
|
|
||||||
fmt.Fprintf(w, tmpl,
|
|
||||||
fmt.Sprintf("%s:%d", wk.Host, wk.PID), wk.ID, wk.Type, wk.Payload, wk.Queue, timeAgo(wk.Started))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
printTable(cols, printRows)
|
|
||||||
}
|
|
||||||
19
tools/go.mod
19
tools/go.mod
@@ -3,12 +3,21 @@ module github.com/hibiken/asynq/tools
|
|||||||
go 1.13
|
go 1.13
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/go-redis/redis/v7 v7.2.0
|
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6 // indirect
|
||||||
github.com/hibiken/asynq v0.4.0
|
github.com/coreos/go-etcd v2.0.0+incompatible // indirect
|
||||||
|
github.com/cpuguy83/go-md2man v1.0.10 // indirect
|
||||||
|
github.com/fatih/color v1.9.0
|
||||||
|
github.com/go-redis/redis/v7 v7.4.0
|
||||||
|
github.com/golang/protobuf v1.4.1 // indirect
|
||||||
|
github.com/google/uuid v1.2.0
|
||||||
|
github.com/hibiken/asynq v0.17.1
|
||||||
github.com/mitchellh/go-homedir v1.1.0
|
github.com/mitchellh/go-homedir v1.1.0
|
||||||
github.com/rs/xid v1.2.1
|
github.com/spf13/cast v1.3.1
|
||||||
github.com/spf13/cobra v0.0.5
|
github.com/spf13/cobra v1.1.1
|
||||||
github.com/spf13/viper v1.6.2
|
github.com/spf13/viper v1.7.0
|
||||||
|
github.com/ugorji/go v1.1.4 // indirect
|
||||||
|
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8 // indirect
|
||||||
|
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77 // indirect
|
||||||
)
|
)
|
||||||
|
|
||||||
replace github.com/hibiken/asynq => ./..
|
replace github.com/hibiken/asynq => ./..
|
||||||
|
|||||||
250
tools/go.sum
250
tools/go.sum
@@ -1,130 +1,241 @@
|
|||||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||||
|
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||||
|
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
||||||
|
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
|
||||||
|
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
|
||||||
|
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
|
||||||
|
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
|
||||||
|
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||||
|
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||||
|
cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
|
||||||
|
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
||||||
|
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
|
||||||
|
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||||
|
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||||
|
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||||
|
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
|
||||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||||
|
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
|
||||||
|
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||||
|
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||||
|
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
|
||||||
|
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||||
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||||
|
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||||
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
|
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
|
||||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||||
|
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||||
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||||
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
||||||
|
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||||
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||||
|
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||||
|
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||||
|
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||||
|
github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s=
|
||||||
|
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
|
||||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||||
|
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||||
github.com/go-redis/redis v6.15.7+incompatible h1:3skhDh95XQMpnqeqNftPkQD9jL9e5e36z/1SUm6dy1U=
|
|
||||||
github.com/go-redis/redis/v7 v7.0.0-beta.4/go.mod h1:xhhSbUMTsleRPur+Vgx9sUHtyN33bdjxY+9/0n9Ig8s=
|
|
||||||
github.com/go-redis/redis/v7 v7.1.0 h1:I4C4a8UGbFejiVjtYVTRVOiMIJ5pm5Yru6ibvDX/OS0=
|
|
||||||
github.com/go-redis/redis/v7 v7.1.0/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRfnwdHj/Dcg=
|
|
||||||
github.com/go-redis/redis/v7 v7.2.0 h1:CrCexy/jYWZjW0AyVoHlcJUeZN19VWlbepTh1Vq6dJs=
|
github.com/go-redis/redis/v7 v7.2.0 h1:CrCexy/jYWZjW0AyVoHlcJUeZN19VWlbepTh1Vq6dJs=
|
||||||
github.com/go-redis/redis/v7 v7.2.0/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRfnwdHj/Dcg=
|
github.com/go-redis/redis/v7 v7.2.0/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRfnwdHj/Dcg=
|
||||||
|
github.com/go-redis/redis/v7 v7.4.0 h1:7obg6wUoj05T0EpY0o8B59S9w5yeMWql7sw2kwNW1x4=
|
||||||
|
github.com/go-redis/redis/v7 v7.4.0/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRfnwdHj/Dcg=
|
||||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||||
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||||
|
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||||
|
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
|
||||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
|
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
||||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||||
|
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||||
|
github.com/golang/protobuf v1.4.1 h1:ZFgWrT+bLgsYPirOnRfKLYJLvssAegOj/hgyMFdJZe0=
|
||||||
|
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||||
|
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||||
|
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
|
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
|
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
|
||||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||||
|
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||||
|
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||||
|
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||||
|
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
|
||||||
|
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
|
github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs=
|
||||||
|
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
|
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||||
|
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||||
|
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
|
||||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||||
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||||
|
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||||
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||||
|
github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
|
||||||
|
github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
|
||||||
|
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||||
|
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||||
|
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||||
|
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
|
||||||
|
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
|
||||||
|
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
|
||||||
|
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
|
||||||
|
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
|
||||||
|
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||||
|
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||||
|
github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
|
||||||
|
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||||
|
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||||
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
|
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
|
||||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||||
github.com/hibiken/asynq v0.4.0 h1:NvAfYX0DRe04WgGMKRg5oX7bs6ktv2fu9YwB6O356FI=
|
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
|
||||||
github.com/hibiken/asynq v0.4.0/go.mod h1:dtrVkxCsGPVhVNHMDXAH7lFq64kbj43+G6lt4FQZfW4=
|
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
|
||||||
|
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
|
||||||
|
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
|
||||||
|
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
||||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||||
|
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
|
||||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||||
|
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||||
|
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||||
|
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
|
||||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||||
|
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||||
|
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||||
github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=
|
github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=
|
||||||
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||||
|
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||||
|
github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA=
|
||||||
|
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||||
|
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||||
|
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||||
|
github.com/mattn/go-isatty v0.0.11 h1:FxPOTFNqGkuDUGi3H/qkUbQO4ZiBa2brKq5r0l8TGeM=
|
||||||
|
github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||||
|
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||||
|
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
|
||||||
|
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||||
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
||||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||||
|
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
|
||||||
|
github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
|
||||||
|
github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
|
||||||
|
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||||
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
|
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
|
||||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||||
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
|
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||||
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||||
github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo=
|
||||||
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||||
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME=
|
||||||
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||||
|
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||||
github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
|
github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
|
||||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||||
github.com/pelletier/go-toml v1.6.0 h1:aetoXYr0Tv7xRU/V4B4IZJ2QcbtMUFoNb3ORp7TzIK4=
|
|
||||||
github.com/pelletier/go-toml v1.6.0/go.mod h1:5N711Q9dKgbdkxHL+MEfF31hpT7l0S0s/t2kKREewys=
|
|
||||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
|
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
|
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
|
||||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||||
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
|
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
|
||||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
|
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||||
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||||
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||||
|
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
|
||||||
|
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
|
||||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||||
github.com/rs/xid v1.2.1 h1:mhH9Nq+C1fY2l1XIpgxIiUOfNpRBYH1kKcr+qfKgjRc=
|
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||||
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
|
|
||||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||||
|
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||||
|
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||||
|
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||||
|
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||||
|
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
|
||||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||||
|
github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
|
||||||
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||||
github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI=
|
github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI=
|
||||||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||||
github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc=
|
|
||||||
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
|
|
||||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||||
github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng=
|
github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng=
|
||||||
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||||
github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s=
|
github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s=
|
||||||
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
|
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
|
||||||
|
github.com/spf13/cobra v1.0.0 h1:6m/oheQuQ13N9ks4hubMG6BnvwOeaJrqSPLahSnczz8=
|
||||||
|
github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
|
||||||
|
github.com/spf13/cobra v1.1.1 h1:KfztREH0tPxJJ+geloSLaAkaPkr4ki2Er5quFV1TDo4=
|
||||||
|
github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI=
|
||||||
github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=
|
github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=
|
||||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||||
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
|
|
||||||
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
|
|
||||||
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
|
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
|
||||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||||
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
|
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
|
||||||
github.com/spf13/viper v1.6.0/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k=
|
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
|
||||||
github.com/spf13/viper v1.6.2 h1:7aKfF+e8/k68gda3LOjo5RxiUqddoFxVq4BKBPrxk5E=
|
github.com/spf13/viper v1.6.2 h1:7aKfF+e8/k68gda3LOjo5RxiUqddoFxVq4BKBPrxk5E=
|
||||||
github.com/spf13/viper v1.6.2/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k=
|
github.com/spf13/viper v1.6.2/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k=
|
||||||
|
github.com/spf13/viper v1.7.0 h1:xVKxvI7ouOI5I+U9s2eeiUfMaWBVoXA3AWskkrqK0VM=
|
||||||
|
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
|
||||||
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
|
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
|
||||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||||
|
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||||
github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
|
github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
|
||||||
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
|
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
|
||||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||||
@@ -133,58 +244,156 @@ github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljT
|
|||||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||||
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||||
|
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||||
|
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||||
go.uber.org/goleak v0.10.0/go.mod h1:VCZuO8V8mFPlL0F5J5GK1rtHV3DrFcQ1R8ryq7FK0aI=
|
go.uber.org/goleak v0.10.0/go.mod h1:VCZuO8V8mFPlL0F5J5GK1rtHV3DrFcQ1R8ryq7FK0aI=
|
||||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||||
|
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||||
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
|
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
|
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
|
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
|
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
|
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||||
|
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
|
||||||
|
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
|
||||||
|
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||||
|
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||||
|
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||||
|
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||||
|
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||||
|
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||||
|
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||||
|
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
||||||
|
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
||||||
|
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||||
|
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
||||||
|
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
|
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
|
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
|
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||||
|
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||||
|
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20190923162816-aa69164e4478 h1:l5EDrHhldLYb3ZRHDUhXF7Om7MvYXnkV9/iQNo1lX6g=
|
||||||
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e h1:9vRrk9YW2BTzLP0VCB9ZDjU4cPqkg+IDWL7XgxA1yxQ=
|
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e h1:9vRrk9YW2BTzLP0VCB9ZDjU4cPqkg+IDWL7XgxA1yxQ=
|
||||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
|
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||||
|
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
|
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
|
||||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||||
|
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||||
|
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||||
|
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||||
|
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||||
|
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||||
|
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||||
|
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||||
|
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||||
|
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||||
|
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
|
||||||
|
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||||
|
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||||
|
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||||
|
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
|
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
|
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
||||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||||
|
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||||
|
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||||
|
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||||
|
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||||
|
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||||
|
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||||
|
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
|
||||||
|
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||||
|
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||||
|
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||||
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||||
|
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||||
|
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||||
|
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||||
|
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||||
|
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||||
|
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||||
|
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||||
|
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||||
|
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||||
|
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||||
|
google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c=
|
||||||
|
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
|
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
|
||||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
|
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||||
|
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
||||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||||
gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno=
|
gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno=
|
||||||
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||||
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
|
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
|
||||||
|
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||||
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
|
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
|
||||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
@@ -192,4 +401,11 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
|||||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo=
|
gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo=
|
||||||
gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
|
||||||
|
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
|
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
|
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
|
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
|
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||||
|
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||||
|
|||||||
Reference in New Issue
Block a user