mirror of
https://github.com/hibiken/asynq.git
synced 2025-10-22 09:56:12 +08:00
Compare commits
350 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a19909f5f4 | ||
|
|
cea5110d15 | ||
|
|
9b63e23274 | ||
|
|
de25201d9f | ||
|
|
ec560afb01 | ||
|
|
d4006894ad | ||
|
|
59927509d8 | ||
|
|
8211167de2 | ||
|
|
d7169cd445 | ||
|
|
dfae8638e1 | ||
|
|
b9943de2ab | ||
|
|
871474f220 | ||
|
|
87dc392c7f | ||
|
|
dabcb120d5 | ||
|
|
bc2f1986d7 | ||
|
|
b8cb579407 | ||
|
|
bca624792c | ||
|
|
d865d89900 | ||
|
|
852af7abd1 | ||
|
|
5490d2c625 | ||
|
|
ebd7a32c0f | ||
|
|
55d0610a03 | ||
|
|
ab8a4f5b1e | ||
|
|
d7ceb0c090 | ||
|
|
8bd70c6f84 | ||
|
|
10ab4e3745 | ||
|
|
349f4c50fb | ||
|
|
dff2e3a336 | ||
|
|
65040af7b5 | ||
|
|
053fe2d1ee | ||
|
|
25832e5e95 | ||
|
|
aa26f3819e | ||
|
|
d94614bb9b | ||
|
|
ce46b07652 | ||
|
|
2d0170541c | ||
|
|
c1f08106da | ||
|
|
74cf804197 | ||
|
|
8dfabfccb3 | ||
|
|
5f20edcbd1 | ||
|
|
1ddb2f7bce | ||
|
|
82d18e3d91 | ||
|
|
43cb4ddf19 | ||
|
|
ddfc6747a1 | ||
|
|
970cb7a606 | ||
|
|
157e97e72e | ||
|
|
22e6c9d297 | ||
|
|
99a6750656 | ||
|
|
e7c1c3ad6f | ||
|
|
c9183374c5 | ||
|
|
6e7106c8f2 | ||
|
|
9f2c321e98 | ||
|
|
e2b61c9056 | ||
|
|
531d1ef089 | ||
|
|
413afc2ab6 | ||
|
|
6bb4818509 | ||
|
|
f4ddac4dcc | ||
|
|
4638405cbd | ||
|
|
9e2f88c00d | ||
|
|
dbdd9c6d5f | ||
|
|
2261c7c9a0 | ||
|
|
83cae4bb24 | ||
|
|
23c522dc9f | ||
|
|
0d2c0f612b | ||
|
|
d612a8a9e4 | ||
|
|
b3ef9e91a9 | ||
|
|
05534c6f24 | ||
|
|
f0db219f6a | ||
|
|
3ae0e7f528 | ||
|
|
421dc584ff | ||
|
|
cfd1a1dfe8 | ||
|
|
c197902dc0 | ||
|
|
e6355bf3f5 | ||
|
|
95c90a5cb8 | ||
|
|
6817af366a | ||
|
|
4bce28d677 | ||
|
|
73f930313c | ||
|
|
bff2a05d59 | ||
|
|
684a7e0c98 | ||
|
|
46b23d6495 | ||
|
|
c0ae62499f | ||
|
|
7744ade362 | ||
|
|
f532c95394 | ||
|
|
ff6768f9bb | ||
|
|
d5e9f3b1bd | ||
|
|
d02b722d8a | ||
|
|
99c7ebeef2 | ||
|
|
bf54621196 | ||
|
|
27baf6de0d | ||
|
|
1bd0bee1e5 | ||
|
|
a9feec5967 | ||
|
|
e01c6379c8 | ||
|
|
a0df047f71 | ||
|
|
68dd6d9a9d | ||
|
|
6cce31a134 | ||
|
|
f9d7af3def | ||
|
|
b0321fb465 | ||
|
|
7776c7ae53 | ||
|
|
709ca79a2b | ||
|
|
08d8f0b37c | ||
|
|
385323b679 | ||
|
|
77604af265 | ||
|
|
4765742e8a | ||
|
|
68839dc9d3 | ||
|
|
8922d2423a | ||
|
|
b358de907e | ||
|
|
8ee1825e67 | ||
|
|
c8bda26bed | ||
|
|
8aeeb61c9d | ||
|
|
96c51fdc23 | ||
|
|
ea9086fd8b | ||
|
|
e63d51da0c | ||
|
|
cd351d49b9 | ||
|
|
87264b66f3 | ||
|
|
62168b8d0d | ||
|
|
840f7245b1 | ||
|
|
12f4c7cf6e | ||
|
|
0ec3b55e6b | ||
|
|
4bcc5ab6aa | ||
|
|
456edb6b71 | ||
|
|
b835090ad8 | ||
|
|
09cbea66f6 | ||
|
|
b9c2572203 | ||
|
|
0bf767cf21 | ||
|
|
1812d05d21 | ||
|
|
4af65d5fa5 | ||
|
|
a19ad19382 | ||
|
|
8117ce8972 | ||
|
|
d98ecdebb4 | ||
|
|
ffe9aa74b3 | ||
|
|
d2d4029aba | ||
|
|
76bd865ebc | ||
|
|
136d1c9ea9 | ||
|
|
52e04355d3 | ||
|
|
cde3e57c6c | ||
|
|
dd66acef1b | ||
|
|
30a3d9641a | ||
|
|
961582cba6 | ||
|
|
430dbb298e | ||
|
|
675826be5f | ||
|
|
62f4e46b73 | ||
|
|
a500f8a534 | ||
|
|
bcfeff38ed | ||
|
|
12a90f6a8d | ||
|
|
807624e7dd | ||
|
|
4d65024bd7 | ||
|
|
76486b5cb4 | ||
|
|
1db516c53c | ||
|
|
cb5bdf245c | ||
|
|
267493ccef | ||
|
|
5d7f1b6a80 | ||
|
|
77ded502ab | ||
|
|
f2284be43d | ||
|
|
3cadab55cb | ||
|
|
298a420f9f | ||
|
|
b1d717c842 | ||
|
|
56e5762eea | ||
|
|
5ec41e388b | ||
|
|
9c95c41651 | ||
|
|
476812475e | ||
|
|
7af3981929 | ||
|
|
2516c4baba | ||
|
|
ebe482a65c | ||
|
|
3e9fc2f972 | ||
|
|
63ce9ed0f9 | ||
|
|
32d3f329b9 | ||
|
|
544c301a8b | ||
|
|
8b997d2fab | ||
|
|
901105a8d7 | ||
|
|
aaa3f1d4fd | ||
|
|
4722ca2d3d | ||
|
|
6a9d9fd717 | ||
|
|
de28c1ea19 | ||
|
|
f618f5b1f5 | ||
|
|
aa936466b3 | ||
|
|
5d1ec70544 | ||
|
|
d1d3be9b00 | ||
|
|
bc77f6fe14 | ||
|
|
efe197a47b | ||
|
|
97b5516183 | ||
|
|
8eafa03ca7 | ||
|
|
430b01c9aa | ||
|
|
14c381dc40 | ||
|
|
e13122723a | ||
|
|
eba7c4e085 | ||
|
|
bfde0b6283 | ||
|
|
afde6a7266 | ||
|
|
6529a1e0b1 | ||
|
|
c9a6ab8ae1 | ||
|
|
557c1a5044 | ||
|
|
0236eb9a1c | ||
|
|
3c2b2cf4a3 | ||
|
|
04df71198d | ||
|
|
2884044e75 | ||
|
|
3719fad396 | ||
|
|
42c7ac0746 | ||
|
|
d331ff055d | ||
|
|
ccb682853e | ||
|
|
7c3ad9e45c | ||
|
|
ea23db4f6b | ||
|
|
00a25ca570 | ||
|
|
7235041128 | ||
|
|
a150d18ed7 | ||
|
|
0712e90f23 | ||
|
|
c5100a9c23 | ||
|
|
196d66f221 | ||
|
|
38509e309f | ||
|
|
f4dd8fe962 | ||
|
|
c06e9de97d | ||
|
|
52d536a8f5 | ||
|
|
f9c0673116 | ||
|
|
b604d25937 | ||
|
|
dfdf530a24 | ||
|
|
e9239260ae | ||
|
|
8f9d5a3352 | ||
|
|
c4dc993241 | ||
|
|
37dfd746d4 | ||
|
|
8d6e4167ab | ||
|
|
476862dd7b | ||
|
|
dcd873fa2a | ||
|
|
2604bb2192 | ||
|
|
942345ee80 | ||
|
|
1f059eeee1 | ||
|
|
4ae73abdaa | ||
|
|
96b2318300 | ||
|
|
8312515e64 | ||
|
|
50e7f38365 | ||
|
|
fadcae76d6 | ||
|
|
a2d4ead989 | ||
|
|
82b6828f43 | ||
|
|
3114987428 | ||
|
|
1ee3b10104 | ||
|
|
6d720d6a05 | ||
|
|
3e6981170d | ||
|
|
a9aa480551 | ||
|
|
9d41de795a | ||
|
|
c43fb21a0a | ||
|
|
a293efcdab | ||
|
|
69d7ec725a | ||
|
|
450a9aa1e2 | ||
|
|
6e294a7013 | ||
|
|
c26b7469bd | ||
|
|
818c2d6f35 | ||
|
|
e09870a08a | ||
|
|
ac3d5b126a | ||
|
|
29e542e591 | ||
|
|
a891ce5568 | ||
|
|
ebe3c4083f | ||
|
|
c8c47fcbf0 | ||
|
|
cca680a7fd | ||
|
|
8076b5ae50 | ||
|
|
a42c174dae | ||
|
|
a88325cb96 | ||
|
|
eb739a0258 | ||
|
|
a9c31553b8 | ||
|
|
dab8295883 | ||
|
|
131ac823fd | ||
|
|
4897dba397 | ||
|
|
6b96459881 | ||
|
|
572eb338d5 | ||
|
|
27f4027447 | ||
|
|
ee1afd12f5 | ||
|
|
3ac548e97c | ||
|
|
f38f94b947 | ||
|
|
d6f389e63f | ||
|
|
118ef27bf2 | ||
|
|
fad0696828 | ||
|
|
4037b41479 | ||
|
|
96f23d88cd | ||
|
|
83bdca5220 | ||
|
|
2f226dfb84 | ||
|
|
3f26122ac0 | ||
|
|
2a18181501 | ||
|
|
aa2676bb57 | ||
|
|
9348a62691 | ||
|
|
f59de9ac56 | ||
|
|
996a6c0ead | ||
|
|
47e9ba4eba | ||
|
|
dbf140a767 | ||
|
|
5f82b4b365 | ||
|
|
44a3d177f0 | ||
|
|
24b13bd865 | ||
|
|
d25090c669 | ||
|
|
b5caefd663 | ||
|
|
becd26479b | ||
|
|
4b81b91d3e | ||
|
|
8e23b865e9 | ||
|
|
a873d488ee | ||
|
|
e0a8f1252a | ||
|
|
650d7fdbe9 | ||
|
|
f6d504939e | ||
|
|
74f08795f8 | ||
|
|
35b2b1782e | ||
|
|
f63dcce0c0 | ||
|
|
565f86ee4f | ||
|
|
94aa878060 | ||
|
|
50b6034bf9 | ||
|
|
154113d0d0 | ||
|
|
669c7995c4 | ||
|
|
6d6a301379 | ||
|
|
53f9475582 | ||
|
|
e8fdbc5a72 | ||
|
|
5f06c308f0 | ||
|
|
a913e6d73f | ||
|
|
6978e93080 | ||
|
|
92d77bbc6e | ||
|
|
a28f61f313 | ||
|
|
9bd3d8e19e | ||
|
|
7382e2aeb8 | ||
|
|
007fac8055 | ||
|
|
8d43fe407a | ||
|
|
34b90ecc8a | ||
|
|
8b60e6a268 | ||
|
|
486dcd799b | ||
|
|
195f4603bb | ||
|
|
2e2c9b9f6b | ||
|
|
199bf4d66a | ||
|
|
7e942ec241 | ||
|
|
379da8f7a2 | ||
|
|
feee87adda | ||
|
|
7657f560ec | ||
|
|
7c7de0d8e0 | ||
|
|
83f1e20d74 | ||
|
|
4e8ac151ae | ||
|
|
08b71672aa | ||
|
|
92af00f9fd | ||
|
|
113451ce6a | ||
|
|
9cd9f3d6b4 | ||
|
|
7b9119c703 | ||
|
|
9b05dea394 | ||
|
|
6cc5bafaba | ||
|
|
716d3d987e | ||
|
|
0527b93432 | ||
|
|
5dddc35d7c | ||
|
|
4e5f596910 | ||
|
|
8bf5917cd9 | ||
|
|
7f30fa2bb6 | ||
|
|
ade6e61f51 | ||
|
|
a2abeedaa0 | ||
|
|
81bb52b08c | ||
|
|
bc2a7635a0 | ||
|
|
f65d408bf9 | ||
|
|
4749b4bbfc | ||
|
|
06c4a1c7f8 | ||
|
|
8af4cbad51 | ||
|
|
4e800a7f68 | ||
|
|
d6a5c84dc6 | ||
|
|
363cfedb49 | ||
|
|
4595bd41c3 | ||
|
|
e236d55477 | ||
|
|
a38f628f3b |
12
.github/FUNDING.yml
vendored
Normal file
12
.github/FUNDING.yml
vendored
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
# These are supported funding model platforms
|
||||||
|
|
||||||
|
github: [hibiken] # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2]
|
||||||
|
patreon: # Replace with a single Patreon username
|
||||||
|
open_collective: # Replace with a single Open Collective username
|
||||||
|
ko_fi: # Replace with a single Ko-fi username
|
||||||
|
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
|
||||||
|
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
|
||||||
|
liberapay: # Replace with a single Liberapay username
|
||||||
|
issuehunt: # Replace with a single IssueHunt username
|
||||||
|
otechie: # Replace with a single Otechie username
|
||||||
|
custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']
|
||||||
82
.github/workflows/benchstat.yml
vendored
Normal file
82
.github/workflows/benchstat.yml
vendored
Normal file
@@ -0,0 +1,82 @@
|
|||||||
|
# This workflow runs benchmarks against the current branch,
|
||||||
|
# compares them to benchmarks against master,
|
||||||
|
# and uploads the results as an artifact.
|
||||||
|
|
||||||
|
name: benchstat
|
||||||
|
|
||||||
|
on: [pull_request]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
incoming:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
services:
|
||||||
|
redis:
|
||||||
|
image: redis
|
||||||
|
ports:
|
||||||
|
- 6379:6379
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
- name: Set up Go
|
||||||
|
uses: actions/setup-go@v2
|
||||||
|
with:
|
||||||
|
go-version: 1.16.x
|
||||||
|
- name: Benchmark
|
||||||
|
run: go test -run=^$ -bench=. -count=5 -timeout=60m ./... | tee -a new.txt
|
||||||
|
- name: Upload Benchmark
|
||||||
|
uses: actions/upload-artifact@v2
|
||||||
|
with:
|
||||||
|
name: bench-incoming
|
||||||
|
path: new.txt
|
||||||
|
|
||||||
|
current:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
services:
|
||||||
|
redis:
|
||||||
|
image: redis
|
||||||
|
ports:
|
||||||
|
- 6379:6379
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
with:
|
||||||
|
ref: master
|
||||||
|
- name: Set up Go
|
||||||
|
uses: actions/setup-go@v2
|
||||||
|
with:
|
||||||
|
go-version: 1.15.x
|
||||||
|
- name: Benchmark
|
||||||
|
run: go test -run=^$ -bench=. -count=5 -timeout=60m ./... | tee -a old.txt
|
||||||
|
- name: Upload Benchmark
|
||||||
|
uses: actions/upload-artifact@v2
|
||||||
|
with:
|
||||||
|
name: bench-current
|
||||||
|
path: old.txt
|
||||||
|
|
||||||
|
benchstat:
|
||||||
|
needs: [incoming, current]
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
- name: Set up Go
|
||||||
|
uses: actions/setup-go@v2
|
||||||
|
with:
|
||||||
|
go-version: 1.15.x
|
||||||
|
- name: Install benchstat
|
||||||
|
run: go get -u golang.org/x/perf/cmd/benchstat
|
||||||
|
- name: Download Incoming
|
||||||
|
uses: actions/download-artifact@v2
|
||||||
|
with:
|
||||||
|
name: bench-incoming
|
||||||
|
- name: Download Current
|
||||||
|
uses: actions/download-artifact@v2
|
||||||
|
with:
|
||||||
|
name: bench-current
|
||||||
|
- name: Benchstat Results
|
||||||
|
run: benchstat old.txt new.txt | tee -a benchstat.txt
|
||||||
|
- name: Upload benchstat results
|
||||||
|
uses: actions/upload-artifact@v2
|
||||||
|
with:
|
||||||
|
name: benchstat
|
||||||
|
path: benchstat.txt
|
||||||
44
.github/workflows/build.yml
vendored
Normal file
44
.github/workflows/build.yml
vendored
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
name: build
|
||||||
|
|
||||||
|
on: [push, pull_request]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
os: [ubuntu-latest]
|
||||||
|
go-version: [1.14.x, 1.15.x, 1.16.x, 1.17.x]
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
|
services:
|
||||||
|
redis:
|
||||||
|
image: redis
|
||||||
|
ports:
|
||||||
|
- 6379:6379
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
|
||||||
|
- name: Set up Go
|
||||||
|
uses: actions/setup-go@v2
|
||||||
|
with:
|
||||||
|
go-version: ${{ matrix.go-version }}
|
||||||
|
|
||||||
|
- name: Build core module
|
||||||
|
run: go build -v ./...
|
||||||
|
|
||||||
|
- name: Build x module
|
||||||
|
run: cd x && go build -v ./... && cd ..
|
||||||
|
|
||||||
|
- name: Build tools module
|
||||||
|
run: cd tools && go build -v ./... && cd ..
|
||||||
|
|
||||||
|
- name: Test core module
|
||||||
|
run: go test -race -v -coverprofile=coverage.txt -covermode=atomic ./...
|
||||||
|
|
||||||
|
- name: Test x module
|
||||||
|
run: cd x && go test -race -v ./... && cd ..
|
||||||
|
|
||||||
|
- name: Benchmark Test
|
||||||
|
run: go test -run=^$ -bench=. -loglevel=debug ./...
|
||||||
|
|
||||||
|
- name: Upload coverage to Codecov
|
||||||
|
uses: codecov/codecov-action@v1
|
||||||
8
.gitignore
vendored
8
.gitignore
vendored
@@ -1,3 +1,4 @@
|
|||||||
|
vendor
|
||||||
# Binaries for programs and plugins
|
# Binaries for programs and plugins
|
||||||
*.exe
|
*.exe
|
||||||
*.exe~
|
*.exe~
|
||||||
@@ -14,8 +15,13 @@
|
|||||||
# Ignore examples for now
|
# Ignore examples for now
|
||||||
/examples
|
/examples
|
||||||
|
|
||||||
# Ignore command binary
|
# Ignore tool binaries
|
||||||
/tools/asynq/asynq
|
/tools/asynq/asynq
|
||||||
|
/tools/metrics_exporter/metrics_exporter
|
||||||
|
|
||||||
# Ignore asynq config file
|
# Ignore asynq config file
|
||||||
.asynq.*
|
.asynq.*
|
||||||
|
|
||||||
|
# Ignore editor config files
|
||||||
|
.vscode
|
||||||
|
.idea
|
||||||
|
|||||||
12
.travis.yml
12
.travis.yml
@@ -1,12 +0,0 @@
|
|||||||
language: go
|
|
||||||
go_import_path: github.com/hibiken/asynq
|
|
||||||
git:
|
|
||||||
depth: 1
|
|
||||||
go: [1.13.x, 1.14.x]
|
|
||||||
script:
|
|
||||||
- go test -race -v -coverprofile=coverage.txt -covermode=atomic ./...
|
|
||||||
services:
|
|
||||||
- redis-server
|
|
||||||
after_success:
|
|
||||||
- bash ./.travis/benchcmp.sh
|
|
||||||
- bash <(curl -s https://codecov.io/bash)
|
|
||||||
@@ -1,15 +0,0 @@
|
|||||||
if [ "${TRAVIS_PULL_REQUEST_BRANCH:-$TRAVIS_BRANCH}" != "master" ]; then
|
|
||||||
REMOTE_URL="$(git config --get remote.origin.url)";
|
|
||||||
cd ${TRAVIS_BUILD_DIR}/.. && \
|
|
||||||
git clone ${REMOTE_URL} "${TRAVIS_REPO_SLUG}-bench" && \
|
|
||||||
cd "${TRAVIS_REPO_SLUG}-bench" && \
|
|
||||||
# Benchmark master
|
|
||||||
git checkout master && \
|
|
||||||
go test -run=XXX -bench=. ./... > master.txt && \
|
|
||||||
# Benchmark feature branch
|
|
||||||
git checkout ${TRAVIS_COMMIT} && \
|
|
||||||
go test -run=XXX -bench=. ./... > feature.txt && \
|
|
||||||
go get -u golang.org/x/tools/cmd/benchcmp && \
|
|
||||||
# compare two benchmarks
|
|
||||||
benchcmp master.txt feature.txt;
|
|
||||||
fi
|
|
||||||
326
CHANGELOG.md
326
CHANGELOG.md
@@ -7,6 +7,332 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
|||||||
|
|
||||||
## [Unreleased]
|
## [Unreleased]
|
||||||
|
|
||||||
|
## [0.21.0] - 2022-02-19
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- `BaseContext` is introduced in `Config` to specify callback hook to provide a base `context` from which `Handler` `context` is derived
|
||||||
|
- `IsOrphaned` field is added to `TaskInfo` to describe a task left in active state with no worker processing it.
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- `Server` now recovers tasks with an expired lease. Recovered tasks are retried/archived with `ErrLeaseExpired` error.
|
||||||
|
|
||||||
|
## [0.21.0] - 2022-01-22
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- `PeriodicTaskManager` is added. Prefer using this over `Scheduler` as it has better support for dynamic periodic tasks.
|
||||||
|
- The `asynq stats` command now supports a `--json` option, making its output a JSON object
|
||||||
|
- Introduced new configuration for `DelayedTaskCheckInterval`. See [godoc](https://godoc.org/github.com/hibiken/asynq) for more details.
|
||||||
|
|
||||||
|
## [0.20.0] - 2021-12-19
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- Package `x/metrics` is added.
|
||||||
|
- Tool `tools/metrics_exporter` binary is added.
|
||||||
|
- `ProcessedTotal` and `FailedTotal` fields were added to `QueueInfo` struct.
|
||||||
|
|
||||||
|
## [0.19.1] - 2021-12-12
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- `Latency` field is added to `QueueInfo`.
|
||||||
|
- `EnqueueContext` method is added to `Client`.
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- Fixed an error when user pass a duration less than 1s to `Unique` option
|
||||||
|
|
||||||
|
## [0.19.0] - 2021-11-06
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- `NewTask` takes `Option` as variadic argument
|
||||||
|
- Bumped minimum supported go version to 1.14 (i.e. go1.14 or higher is required).
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- `Retention` option is added to allow user to specify task retention duration after completion.
|
||||||
|
- `TaskID` option is added to allow user to specify task ID.
|
||||||
|
- `ErrTaskIDConflict` sentinel error value is added.
|
||||||
|
- `ResultWriter` type is added and provided through `Task.ResultWriter` method.
|
||||||
|
- `TaskInfo` has new fields `CompletedAt`, `Result` and `Retention`.
|
||||||
|
|
||||||
|
### Removed
|
||||||
|
|
||||||
|
- `Client.SetDefaultOptions` is removed. Use `NewTask` instead to pass default options for tasks.
|
||||||
|
|
||||||
|
## [0.18.6] - 2021-10-03
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- Updated `github.com/go-redis/redis` package to v8
|
||||||
|
|
||||||
|
## [0.18.5] - 2021-09-01
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- `IsFailure` config option is added to determine whether error returned from Handler counts as a failure.
|
||||||
|
|
||||||
|
## [0.18.4] - 2021-08-17
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- Scheduler methods are now thread-safe. It's now safe to call `Register` and `Unregister` concurrently.
|
||||||
|
|
||||||
|
## [0.18.3] - 2021-08-09
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- `Client.Enqueue` no longer enqueues tasks with empty typename; Error message is returned.
|
||||||
|
|
||||||
|
## [0.18.2] - 2021-07-15
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- Changed `Queue` function to not to convert the provided queue name to lowercase. Queue names are now case-sensitive.
|
||||||
|
- `QueueInfo.MemoryUsage` is now an approximate usage value.
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- Fixed latency issue around memory usage (see https://github.com/hibiken/asynq/issues/309).
|
||||||
|
|
||||||
|
## [0.18.1] - 2021-07-04
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- Changed to execute task recovering logic when server starts up; Previously it needed to wait for a minute for task recovering logic to exeucte.
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- Fixed task recovering logic to execute every minute
|
||||||
|
|
||||||
|
## [0.18.0] - 2021-06-29
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- NewTask function now takes array of bytes as payload.
|
||||||
|
- Task `Type` and `Payload` should be accessed by a method call.
|
||||||
|
- `Server` API has changed. Renamed `Quiet` to `Stop`. Renamed `Stop` to `Shutdown`. _Note:_ As a result of this renaming, the behavior of `Stop` has changed. Please update the exising code to call `Shutdown` where it used to call `Stop`.
|
||||||
|
- `Scheduler` API has changed. Renamed `Stop` to `Shutdown`.
|
||||||
|
- Requires redis v4.0+ for multiple field/value pair support
|
||||||
|
- `Client.Enqueue` now returns `TaskInfo`
|
||||||
|
- `Inspector.RunTaskByKey` is replaced with `Inspector.RunTask`
|
||||||
|
- `Inspector.DeleteTaskByKey` is replaced with `Inspector.DeleteTask`
|
||||||
|
- `Inspector.ArchiveTaskByKey` is replaced with `Inspector.ArchiveTask`
|
||||||
|
- `inspeq` package is removed. All types and functions from the package is moved to `asynq` package.
|
||||||
|
- `WorkerInfo` field names have changed.
|
||||||
|
- `Inspector.CancelActiveTask` is renamed to `Inspector.CancelProcessing`
|
||||||
|
|
||||||
|
## [0.17.2] - 2021-06-06
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- Free unique lock when task is deleted (https://github.com/hibiken/asynq/issues/275).
|
||||||
|
|
||||||
|
## [0.17.1] - 2021-04-04
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- Fix bug in internal `RDB.memoryUsage` method.
|
||||||
|
|
||||||
|
## [0.17.0] - 2021-03-24
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- `DialTimeout`, `ReadTimeout`, and `WriteTimeout` options are added to `RedisConnOpt`.
|
||||||
|
|
||||||
|
## [0.16.1] - 2021-03-20
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- Replace `KEYS` command with `SCAN` as recommended by [redis doc](https://redis.io/commands/KEYS).
|
||||||
|
|
||||||
|
## [0.16.0] - 2021-03-10
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- `Unregister` method is added to `Scheduler` to remove a registered entry.
|
||||||
|
|
||||||
|
## [0.15.0] - 2021-01-31
|
||||||
|
|
||||||
|
**IMPORTATNT**: All `Inspector` related code are moved to subpackage "github.com/hibiken/asynq/inspeq"
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- `Inspector` related code are moved to subpackage "github.com/hibken/asynq/inspeq".
|
||||||
|
- `RedisConnOpt` interface has changed slightly. If you have been passing `RedisClientOpt`, `RedisFailoverClientOpt`, or `RedisClusterClientOpt` as a pointer,
|
||||||
|
update your code to pass as a value.
|
||||||
|
- `ErrorMsg` field in `RetryTask` and `ArchivedTask` was renamed to `LastError`.
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- `MaxRetry`, `Retried`, `LastError` fields were added to all task types returned from `Inspector`.
|
||||||
|
- `MemoryUsage` field was added to `QueueStats`.
|
||||||
|
- `DeleteAllPendingTasks`, `ArchiveAllPendingTasks` were added to `Inspector`
|
||||||
|
- `DeleteTaskByKey` and `ArchiveTaskByKey` now supports deleting/archiving `PendingTask`.
|
||||||
|
- asynq CLI now supports deleting/archiving pending tasks.
|
||||||
|
|
||||||
|
## [0.14.1] - 2021-01-19
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- `go.mod` file for CLI
|
||||||
|
|
||||||
|
## [0.14.0] - 2021-01-14
|
||||||
|
|
||||||
|
**IMPORTATNT**: Please run `asynq migrate` command to migrate from the previous versions.
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- Renamed `DeadTask` to `ArchivedTask`.
|
||||||
|
- Renamed the operation `Kill` to `Archive` in `Inpsector`.
|
||||||
|
- Print stack trace when Handler panics.
|
||||||
|
- Include a file name and a line number in the error message when recovering from a panic.
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- `DefaultRetryDelayFunc` is now a public API, which can be used in the custom `RetryDelayFunc`.
|
||||||
|
- `SkipRetry` error is added to be used as a return value from `Handler`.
|
||||||
|
- `Servers` method is added to `Inspector`
|
||||||
|
- `CancelActiveTask` method is added to `Inspector`.
|
||||||
|
- `ListSchedulerEnqueueEvents` method is added to `Inspector`.
|
||||||
|
- `SchedulerEntries` method is added to `Inspector`.
|
||||||
|
- `DeleteQueue` method is added to `Inspector`.
|
||||||
|
|
||||||
|
## [0.13.1] - 2020-11-22
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- Fixed processor to wait for specified time duration before forcefully shutdown workers.
|
||||||
|
|
||||||
|
## [0.13.0] - 2020-10-13
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- `Scheduler` type is added to enable periodic tasks. See the godoc for its APIs and [wiki](https://github.com/hibiken/asynq/wiki/Periodic-Tasks) for the getting-started guide.
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- interface `Option` has changed. See the godoc for the new interface.
|
||||||
|
This change would have no impact as long as you are using exported functions (e.g. `MaxRetry`, `Queue`, etc)
|
||||||
|
to create `Option`s.
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- `Payload.String() string` method is added
|
||||||
|
- `Payload.MarshalJSON() ([]byte, error)` method is added
|
||||||
|
|
||||||
|
## [0.12.0] - 2020-09-12
|
||||||
|
|
||||||
|
**IMPORTANT**: If you are upgrading from a previous version, please install the latest version of the CLI `go get -u github.com/hibiken/asynq/tools/asynq` and run `asynq migrate` command. No process should be writing to Redis while you run the migration command.
|
||||||
|
|
||||||
|
## The semantics of queue have changed
|
||||||
|
|
||||||
|
Previously, we called tasks that are ready to be processed _"Enqueued tasks"_, and other tasks that are scheduled to be processed in the future _"Scheduled tasks"_, etc.
|
||||||
|
We changed the semantics of _"Enqueue"_ slightly; All tasks that client pushes to Redis are _Enqueued_ to a queue. Within a queue, tasks will transition from one state to another.
|
||||||
|
Possible task states are:
|
||||||
|
|
||||||
|
- `Pending`: task is ready to be processed (previously called "Enqueued")
|
||||||
|
- `Active`: tasks is currently being processed (previously called "InProgress")
|
||||||
|
- `Scheduled`: task is scheduled to be processed in the future
|
||||||
|
- `Retry`: task failed to be processed and will be retried again in the future
|
||||||
|
- `Dead`: task has exhausted all of its retries and stored for manual inspection purpose
|
||||||
|
|
||||||
|
**These semantics change is reflected in the new `Inspector` API and CLI commands.**
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
#### `Client`
|
||||||
|
|
||||||
|
Use `ProcessIn` or `ProcessAt` option to schedule a task instead of `EnqueueIn` or `EnqueueAt`.
|
||||||
|
|
||||||
|
| Previously | v0.12.0 |
|
||||||
|
| --------------------------- | ------------------------------------------ |
|
||||||
|
| `client.EnqueueAt(t, task)` | `client.Enqueue(task, asynq.ProcessAt(t))` |
|
||||||
|
| `client.EnqueueIn(d, task)` | `client.Enqueue(task, asynq.ProcessIn(d))` |
|
||||||
|
|
||||||
|
#### `Inspector`
|
||||||
|
|
||||||
|
All Inspector methods are scoped to a queue, and the methods take `qname (string)` as the first argument.
|
||||||
|
`EnqueuedTask` is renamed to `PendingTask` and its corresponding methods.
|
||||||
|
`InProgressTask` is renamed to `ActiveTask` and its corresponding methods.
|
||||||
|
Command "Enqueue" is replaced by the verb "Run" (e.g. `EnqueueAllScheduledTasks` --> `RunAllScheduledTasks`)
|
||||||
|
|
||||||
|
#### `CLI`
|
||||||
|
|
||||||
|
CLI commands are restructured to use subcommands. Commands are organized into a few management commands:
|
||||||
|
To view details on any command, use `asynq help <command> <subcommand>`.
|
||||||
|
|
||||||
|
- `asynq stats`
|
||||||
|
- `asynq queue [ls inspect history rm pause unpause]`
|
||||||
|
- `asynq task [ls cancel delete kill run delete-all kill-all run-all]`
|
||||||
|
- `asynq server [ls]`
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
#### `RedisConnOpt`
|
||||||
|
|
||||||
|
- `RedisClusterClientOpt` is added to connect to Redis Cluster.
|
||||||
|
- `Username` field is added to all `RedisConnOpt` types in order to authenticate connection when Redis ACLs are used.
|
||||||
|
|
||||||
|
#### `Client`
|
||||||
|
|
||||||
|
- `ProcessIn(d time.Duration) Option` and `ProcessAt(t time.Time) Option` are added to replace `EnqueueIn` and `EnqueueAt` functionality.
|
||||||
|
|
||||||
|
#### `Inspector`
|
||||||
|
|
||||||
|
- `Queues() ([]string, error)` method is added to get all queue names.
|
||||||
|
- `ClusterKeySlot(qname string) (int64, error)` method is added to get queue's hash slot in Redis cluster.
|
||||||
|
- `ClusterNodes(qname string) ([]ClusterNode, error)` method is added to get a list of Redis cluster nodes for the given queue.
|
||||||
|
- `Close() error` method is added to close connection with redis.
|
||||||
|
|
||||||
|
### `Handler`
|
||||||
|
|
||||||
|
- `GetQueueName(ctx context.Context) (string, bool)` helper is added to extract queue name from a context.
|
||||||
|
|
||||||
|
## [0.11.0] - 2020-07-28
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- `Inspector` type was added to monitor and mutate state of queues and tasks.
|
||||||
|
- `HealthCheckFunc` and `HealthCheckInterval` fields were added to `Config` to allow user to specify a callback
|
||||||
|
function to check for broker connection.
|
||||||
|
|
||||||
|
## [0.10.0] - 2020-07-06
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- All tasks now requires timeout or deadline. By default, timeout is set to 30 mins.
|
||||||
|
- Tasks that exceed its deadline are automatically retried.
|
||||||
|
- Encoding schema for task message has changed. Please install the latest CLI and run `migrate` command if
|
||||||
|
you have tasks enqueued with the previous version of asynq.
|
||||||
|
- API of `(*Client).Enqueue`, `(*Client).EnqueueIn`, and `(*Client).EnqueueAt` has changed to return a `*Result`.
|
||||||
|
- API of `ErrorHandler` has changed. It now takes context as the first argument and removed `retried`, `maxRetry` from the argument list.
|
||||||
|
Use `GetRetryCount` and/or `GetMaxRetry` to get the count values.
|
||||||
|
|
||||||
|
## [0.9.4] - 2020-06-13
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- Fixes issue of same tasks processed by more than one worker (https://github.com/hibiken/asynq/issues/90).
|
||||||
|
|
||||||
|
## [0.9.3] - 2020-06-12
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- Fixes the JSON number overflow issue (https://github.com/hibiken/asynq/issues/166).
|
||||||
|
|
||||||
|
## [0.9.2] - 2020-06-08
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- The `pause` and `unpause` commands were added to the CLI. See README for the CLI for details.
|
||||||
|
|
||||||
## [0.9.1] - 2020-05-29
|
## [0.9.1] - 2020-05-29
|
||||||
|
|
||||||
### Added
|
### Added
|
||||||
|
|||||||
128
CODE_OF_CONDUCT.md
Normal file
128
CODE_OF_CONDUCT.md
Normal file
@@ -0,0 +1,128 @@
|
|||||||
|
# Contributor Covenant Code of Conduct
|
||||||
|
|
||||||
|
## Our Pledge
|
||||||
|
|
||||||
|
We as members, contributors, and leaders pledge to make participation in our
|
||||||
|
community a harassment-free experience for everyone, regardless of age, body
|
||||||
|
size, visible or invisible disability, ethnicity, sex characteristics, gender
|
||||||
|
identity and expression, level of experience, education, socio-economic status,
|
||||||
|
nationality, personal appearance, race, religion, or sexual identity
|
||||||
|
and orientation.
|
||||||
|
|
||||||
|
We pledge to act and interact in ways that contribute to an open, welcoming,
|
||||||
|
diverse, inclusive, and healthy community.
|
||||||
|
|
||||||
|
## Our Standards
|
||||||
|
|
||||||
|
Examples of behavior that contributes to a positive environment for our
|
||||||
|
community include:
|
||||||
|
|
||||||
|
* Demonstrating empathy and kindness toward other people
|
||||||
|
* Being respectful of differing opinions, viewpoints, and experiences
|
||||||
|
* Giving and gracefully accepting constructive feedback
|
||||||
|
* Accepting responsibility and apologizing to those affected by our mistakes,
|
||||||
|
and learning from the experience
|
||||||
|
* Focusing on what is best not just for us as individuals, but for the
|
||||||
|
overall community
|
||||||
|
|
||||||
|
Examples of unacceptable behavior include:
|
||||||
|
|
||||||
|
* The use of sexualized language or imagery, and sexual attention or
|
||||||
|
advances of any kind
|
||||||
|
* Trolling, insulting or derogatory comments, and personal or political attacks
|
||||||
|
* Public or private harassment
|
||||||
|
* Publishing others' private information, such as a physical or email
|
||||||
|
address, without their explicit permission
|
||||||
|
* Other conduct which could reasonably be considered inappropriate in a
|
||||||
|
professional setting
|
||||||
|
|
||||||
|
## Enforcement Responsibilities
|
||||||
|
|
||||||
|
Community leaders are responsible for clarifying and enforcing our standards of
|
||||||
|
acceptable behavior and will take appropriate and fair corrective action in
|
||||||
|
response to any behavior that they deem inappropriate, threatening, offensive,
|
||||||
|
or harmful.
|
||||||
|
|
||||||
|
Community leaders have the right and responsibility to remove, edit, or reject
|
||||||
|
comments, commits, code, wiki edits, issues, and other contributions that are
|
||||||
|
not aligned to this Code of Conduct, and will communicate reasons for moderation
|
||||||
|
decisions when appropriate.
|
||||||
|
|
||||||
|
## Scope
|
||||||
|
|
||||||
|
This Code of Conduct applies within all community spaces, and also applies when
|
||||||
|
an individual is officially representing the community in public spaces.
|
||||||
|
Examples of representing our community include using an official e-mail address,
|
||||||
|
posting via an official social media account, or acting as an appointed
|
||||||
|
representative at an online or offline event.
|
||||||
|
|
||||||
|
## Enforcement
|
||||||
|
|
||||||
|
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||||
|
reported to the community leaders responsible for enforcement at
|
||||||
|
ken.hibino7@gmail.com.
|
||||||
|
All complaints will be reviewed and investigated promptly and fairly.
|
||||||
|
|
||||||
|
All community leaders are obligated to respect the privacy and security of the
|
||||||
|
reporter of any incident.
|
||||||
|
|
||||||
|
## Enforcement Guidelines
|
||||||
|
|
||||||
|
Community leaders will follow these Community Impact Guidelines in determining
|
||||||
|
the consequences for any action they deem in violation of this Code of Conduct:
|
||||||
|
|
||||||
|
### 1. Correction
|
||||||
|
|
||||||
|
**Community Impact**: Use of inappropriate language or other behavior deemed
|
||||||
|
unprofessional or unwelcome in the community.
|
||||||
|
|
||||||
|
**Consequence**: A private, written warning from community leaders, providing
|
||||||
|
clarity around the nature of the violation and an explanation of why the
|
||||||
|
behavior was inappropriate. A public apology may be requested.
|
||||||
|
|
||||||
|
### 2. Warning
|
||||||
|
|
||||||
|
**Community Impact**: A violation through a single incident or series
|
||||||
|
of actions.
|
||||||
|
|
||||||
|
**Consequence**: A warning with consequences for continued behavior. No
|
||||||
|
interaction with the people involved, including unsolicited interaction with
|
||||||
|
those enforcing the Code of Conduct, for a specified period of time. This
|
||||||
|
includes avoiding interactions in community spaces as well as external channels
|
||||||
|
like social media. Violating these terms may lead to a temporary or
|
||||||
|
permanent ban.
|
||||||
|
|
||||||
|
### 3. Temporary Ban
|
||||||
|
|
||||||
|
**Community Impact**: A serious violation of community standards, including
|
||||||
|
sustained inappropriate behavior.
|
||||||
|
|
||||||
|
**Consequence**: A temporary ban from any sort of interaction or public
|
||||||
|
communication with the community for a specified period of time. No public or
|
||||||
|
private interaction with the people involved, including unsolicited interaction
|
||||||
|
with those enforcing the Code of Conduct, is allowed during this period.
|
||||||
|
Violating these terms may lead to a permanent ban.
|
||||||
|
|
||||||
|
### 4. Permanent Ban
|
||||||
|
|
||||||
|
**Community Impact**: Demonstrating a pattern of violation of community
|
||||||
|
standards, including sustained inappropriate behavior, harassment of an
|
||||||
|
individual, or aggression toward or disparagement of classes of individuals.
|
||||||
|
|
||||||
|
**Consequence**: A permanent ban from any sort of public interaction within
|
||||||
|
the community.
|
||||||
|
|
||||||
|
## Attribution
|
||||||
|
|
||||||
|
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
|
||||||
|
version 2.0, available at
|
||||||
|
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
|
||||||
|
|
||||||
|
Community Impact Guidelines were inspired by [Mozilla's code of conduct
|
||||||
|
enforcement ladder](https://github.com/mozilla/diversity).
|
||||||
|
|
||||||
|
[homepage]: https://www.contributor-covenant.org
|
||||||
|
|
||||||
|
For answers to common questions about this code of conduct, see the FAQ at
|
||||||
|
https://www.contributor-covenant.org/faq. Translations are available at
|
||||||
|
https://www.contributor-covenant.org/translations.
|
||||||
@@ -45,6 +45,7 @@ Thank you! We'll try to respond as quickly as possible.
|
|||||||
6. Create a new pull request
|
6. Create a new pull request
|
||||||
|
|
||||||
Please try to keep your pull request focused in scope and avoid including unrelated commits.
|
Please try to keep your pull request focused in scope and avoid including unrelated commits.
|
||||||
|
Please run tests against redis cluster locally with `--redis_cluster` flag to ensure that code works for Redis cluster. TODO: Run tests using Redis cluster on CI.
|
||||||
|
|
||||||
After you have submitted your pull request, we'll try to get back to you as soon as possible. We may suggest some changes or improvements.
|
After you have submitted your pull request, we'll try to get back to you as soon as possible. We may suggest some changes or improvements.
|
||||||
|
|
||||||
|
|||||||
7
Makefile
Normal file
7
Makefile
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
ROOT_DIR:=$(shell dirname $(realpath $(firstword $(MAKEFILE_LIST))))
|
||||||
|
|
||||||
|
proto: internal/proto/asynq.proto
|
||||||
|
protoc -I=$(ROOT_DIR)/internal/proto \
|
||||||
|
--go_out=$(ROOT_DIR)/internal/proto \
|
||||||
|
--go_opt=module=github.com/hibiken/asynq/internal/proto \
|
||||||
|
$(ROOT_DIR)/internal/proto/asynq.proto
|
||||||
256
README.md
256
README.md
@@ -1,86 +1,114 @@
|
|||||||
# Asynq
|
<img src="https://user-images.githubusercontent.com/11155743/114697792-ffbfa580-9d26-11eb-8e5b-33bef69476dc.png" alt="Asynq logo" width="360px" />
|
||||||
|
|
||||||
|
# Simple, reliable & efficient distributed task queue in Go
|
||||||
|
|
||||||
[](https://travis-ci.com/hibiken/asynq)
|
|
||||||
[](https://opensource.org/licenses/MIT)
|
|
||||||
[](https://goreportcard.com/report/github.com/hibiken/asynq)
|
|
||||||
[](https://godoc.org/github.com/hibiken/asynq)
|
[](https://godoc.org/github.com/hibiken/asynq)
|
||||||
|
[](https://goreportcard.com/report/github.com/hibiken/asynq)
|
||||||
|

|
||||||
|
[](https://opensource.org/licenses/MIT)
|
||||||
[](https://gitter.im/go-asynq/community)
|
[](https://gitter.im/go-asynq/community)
|
||||||
[](https://codecov.io/gh/hibiken/asynq)
|
|
||||||
|
|
||||||
## Overview
|
Asynq is a Go library for queueing tasks and processing them asynchronously with workers. It's backed by [Redis](https://redis.io/) and is designed to be scalable yet easy to get started.
|
||||||
|
|
||||||
Asynq is a Go library for queueing tasks and processing them in the background with workers. It is backed by Redis and it is designed to have a low barrier to entry. It should be integrated in your web stack easily.
|
|
||||||
|
|
||||||
Highlevel overview of how Asynq works:
|
Highlevel overview of how Asynq works:
|
||||||
|
|
||||||
- Client puts task on a queue
|
- Client puts tasks on a queue
|
||||||
- Server pulls task off queues and starts a worker goroutine for each task
|
- Server pulls tasks off queues and starts a worker goroutine for each task
|
||||||
- Tasks are processed concurrently by multiple workers
|
- Tasks are processed concurrently by multiple workers
|
||||||
|
|
||||||
Task queues are used as a mechanism to distribute work across multiple machines.
|
Task queues are used as a mechanism to distribute work across multiple machines. A system can consist of multiple worker servers and brokers, giving way to high availability and horizontal scaling.
|
||||||
A system can consist of multiple worker servers and brokers, giving way to high availability and horizontal scaling.
|
|
||||||
|
|
||||||

|
**Example use case**
|
||||||
|
|
||||||
## Stability and Compatibility
|

|
||||||
|
|
||||||
**Important Note**: Current major version is zero (v0.x.x) to accomodate rapid development and fast iteration while getting early feedback from users (Feedback on APIs are appreciated!). The public API could change without a major version update before v1.0.0 release.
|
|
||||||
|
|
||||||
**Status**: The library is currently undergoing heavy development with frequent, breaking API changes.
|
|
||||||
|
|
||||||
## Features
|
## Features
|
||||||
|
|
||||||
- Guaranteed [at least one execution](https://www.cloudcomputingpatterns.org/at_least_once_delivery/) of a task
|
- Guaranteed [at least one execution](https://www.cloudcomputingpatterns.org/at_least_once_delivery/) of a task
|
||||||
- Scheduling of tasks
|
- Scheduling of tasks
|
||||||
- Durability since tasks are written to Redis
|
|
||||||
- [Retries](https://github.com/hibiken/asynq/wiki/Task-Retry) of failed tasks
|
- [Retries](https://github.com/hibiken/asynq/wiki/Task-Retry) of failed tasks
|
||||||
- [Weighted priority queues](https://github.com/hibiken/asynq/wiki/Priority-Queues#weighted-priority-queues)
|
- Automatic recovery of tasks in the event of a worker crash
|
||||||
- [Strict priority queues](https://github.com/hibiken/asynq/wiki/Priority-Queues#strict-priority-queues)
|
- [Weighted priority queues](https://github.com/hibiken/asynq/wiki/Queue-Priority#weighted-priority)
|
||||||
|
- [Strict priority queues](https://github.com/hibiken/asynq/wiki/Queue-Priority#strict-priority)
|
||||||
- Low latency to add a task since writes are fast in Redis
|
- Low latency to add a task since writes are fast in Redis
|
||||||
- De-duplication of tasks using [unique option](https://github.com/hibiken/asynq/wiki/Unique-Tasks)
|
- De-duplication of tasks using [unique option](https://github.com/hibiken/asynq/wiki/Unique-Tasks)
|
||||||
- Allow [timeout and deadline per task](https://github.com/hibiken/asynq/wiki/Task-Timeout-and-Cancelation)
|
- Allow [timeout and deadline per task](https://github.com/hibiken/asynq/wiki/Task-Timeout-and-Cancelation)
|
||||||
- [Flexible handler interface with support for middlewares](https://github.com/hibiken/asynq/wiki/Handler-Deep-Dive)
|
- [Flexible handler interface with support for middlewares](https://github.com/hibiken/asynq/wiki/Handler-Deep-Dive)
|
||||||
- [Support Redis Sentinels](https://github.com/hibiken/asynq/wiki/Automatic-Failover) for HA
|
- [Ability to pause queue](/tools/asynq/README.md#pause) to stop processing tasks from the queue
|
||||||
|
- [Periodic Tasks](https://github.com/hibiken/asynq/wiki/Periodic-Tasks)
|
||||||
|
- [Support Redis Cluster](https://github.com/hibiken/asynq/wiki/Redis-Cluster) for automatic sharding and high availability
|
||||||
|
- [Support Redis Sentinels](https://github.com/hibiken/asynq/wiki/Automatic-Failover) for high availability
|
||||||
|
- Integration with [Prometheus](https://prometheus.io/) to collect and visualize queue metrics
|
||||||
|
- [Web UI](#web-ui) to inspect and remote-control queues and tasks
|
||||||
- [CLI](#command-line-tool) to inspect and remote-control queues and tasks
|
- [CLI](#command-line-tool) to inspect and remote-control queues and tasks
|
||||||
|
|
||||||
|
## Stability and Compatibility
|
||||||
|
|
||||||
|
**Status**: The library is currently undergoing **heavy development** with frequent, breaking API changes.
|
||||||
|
|
||||||
|
> ☝️ **Important Note**: Current major version is zero (`v0.x.x`) to accomodate rapid development and fast iteration while getting early feedback from users (_feedback on APIs are appreciated!_). The public API could change without a major version update before `v1.0.0` release.
|
||||||
|
|
||||||
## Quickstart
|
## Quickstart
|
||||||
|
|
||||||
First, make sure you are running a Redis server locally.
|
Make sure you have Go installed ([download](https://golang.org/dl/)). Version `1.14` or higher is required.
|
||||||
|
|
||||||
|
Initialize your project by creating a folder and then running `go mod init github.com/your/repo` ([learn more](https://blog.golang.org/using-go-modules)) inside the folder. Then install Asynq library with the [`go get`](https://golang.org/cmd/go/#hdr-Add_dependencies_to_current_module_and_install_them) command:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
$ redis-server
|
go get -u github.com/hibiken/asynq
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Make sure you're running a Redis server locally or from a [Docker](https://hub.docker.com/_/redis) container. Version `4.0` or higher is required.
|
||||||
|
|
||||||
Next, write a package that encapsulates task creation and task handling.
|
Next, write a package that encapsulates task creation and task handling.
|
||||||
|
|
||||||
```go
|
```go
|
||||||
package tasks
|
package tasks
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"time"
|
||||||
"github.com/hibiken/asynq"
|
"github.com/hibiken/asynq"
|
||||||
)
|
)
|
||||||
|
|
||||||
// A list of task types.
|
// A list of task types.
|
||||||
const (
|
const (
|
||||||
EmailDelivery = "email:deliver"
|
TypeEmailDelivery = "email:deliver"
|
||||||
ImageProcessing = "image:process"
|
TypeImageResize = "image:resize"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type EmailDeliveryPayload struct {
|
||||||
|
UserID int
|
||||||
|
TemplateID string
|
||||||
|
}
|
||||||
|
|
||||||
|
type ImageResizePayload struct {
|
||||||
|
SourceURL string
|
||||||
|
}
|
||||||
|
|
||||||
//----------------------------------------------
|
//----------------------------------------------
|
||||||
// Write a function NewXXXTask to create a task.
|
// Write a function NewXXXTask to create a task.
|
||||||
// A task consists of a type and a payload.
|
// A task consists of a type and a payload.
|
||||||
//----------------------------------------------
|
//----------------------------------------------
|
||||||
|
|
||||||
func NewEmailDeliveryTask(userID int, tmplID string) *asynq.Task {
|
func NewEmailDeliveryTask(userID int, tmplID string) (*asynq.Task, error) {
|
||||||
payload := map[string]interface{}{"user_id": userID, "template_id": tmplID}
|
payload, err := json.Marshal(EmailDeliveryPayload{UserID: userID, TemplateID: tmplID})
|
||||||
return asynq.NewTask(EmailDelivery, payload)
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return asynq.NewTask(TypeEmailDelivery, payload), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewImageProcessingTask(src, dst string) *asynq.Task {
|
func NewImageResizeTask(src string) (*asynq.Task, error) {
|
||||||
payload := map[string]interface{}{"src": src, "dst": dst}
|
payload, err := json.Marshal(ImageResizePayload{SourceURL: src})
|
||||||
return asynq.NewTask(ImageProcessing, payload)
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// task options can be passed to NewTask, which can be overridden at enqueue time.
|
||||||
|
return asynq.NewTask(TypeImageResize, payload, asynq.MaxRetry(5), asynq.Timeout(20 * time.Minute)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
//---------------------------------------------------------------
|
//---------------------------------------------------------------
|
||||||
@@ -92,51 +120,42 @@ func NewImageProcessingTask(src, dst string) *asynq.Task {
|
|||||||
//---------------------------------------------------------------
|
//---------------------------------------------------------------
|
||||||
|
|
||||||
func HandleEmailDeliveryTask(ctx context.Context, t *asynq.Task) error {
|
func HandleEmailDeliveryTask(ctx context.Context, t *asynq.Task) error {
|
||||||
userID, err := t.Payload.GetInt("user_id")
|
var p EmailDeliveryPayload
|
||||||
if err != nil {
|
if err := json.Unmarshal(t.Payload(), &p); err != nil {
|
||||||
return err
|
return fmt.Errorf("json.Unmarshal failed: %v: %w", err, asynq.SkipRetry)
|
||||||
}
|
}
|
||||||
tmplID, err := t.Payload.GetString("template_id")
|
log.Printf("Sending Email to User: user_id=%d, template_id=%s", p.UserID, p.TemplateID)
|
||||||
if err != nil {
|
// Email delivery code ...
|
||||||
return err
|
|
||||||
}
|
|
||||||
fmt.Printf("Send Email to User: user_id = %d, template_id = %s\n", userID, tmplID)
|
|
||||||
// Email delivery logic ...
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ImageProcessor implements asynq.Handler interface.
|
// ImageProcessor implements asynq.Handler interface.
|
||||||
type ImageProcesser struct {
|
type ImageProcessor struct {
|
||||||
// ... fields for struct
|
// ... fields for struct
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *ImageProcessor) ProcessTask(ctx context.Context, t *asynq.Task) error {
|
func (processor *ImageProcessor) ProcessTask(ctx context.Context, t *asynq.Task) error {
|
||||||
src, err := t.Payload.GetString("src")
|
var p ImageResizePayload
|
||||||
if err != nil {
|
if err := json.Unmarshal(t.Payload(), &p); err != nil {
|
||||||
return err
|
return fmt.Errorf("json.Unmarshal failed: %v: %w", err, asynq.SkipRetry)
|
||||||
}
|
}
|
||||||
dst, err := t.Payload.GetString("dst")
|
log.Printf("Resizing image: src=%s", p.SourceURL)
|
||||||
if err != nil {
|
// Image resizing code ...
|
||||||
return err
|
|
||||||
}
|
|
||||||
fmt.Printf("Process image: src = %s, dst = %s\n", src, dst)
|
|
||||||
// Image processing logic ...
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewImageProcessor() *ImageProcessor {
|
func NewImageProcessor() *ImageProcessor {
|
||||||
// ... return an instance
|
return &ImageProcessor{}
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
In your web application code, import the above package and use [`Client`](https://pkg.go.dev/github.com/hibiken/asynq?tab=doc#Client) to put tasks on the queue.
|
In your application code, import the above package and use [`Client`](https://pkg.go.dev/github.com/hibiken/asynq?tab=doc#Client) to put tasks on queues.
|
||||||
A task will be processed asynchronously by a background worker as soon as the task gets enqueued.
|
|
||||||
Scheduled tasks will be stored in Redis and will be enqueued at the specified time.
|
|
||||||
|
|
||||||
```go
|
```go
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"log"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/hibiken/asynq"
|
"github.com/hibiken/asynq"
|
||||||
@@ -146,64 +165,57 @@ import (
|
|||||||
const redisAddr = "127.0.0.1:6379"
|
const redisAddr = "127.0.0.1:6379"
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
r := asynq.RedisClientOpt{Addr: redisAddr}
|
client := asynq.NewClient(asynq.RedisClientOpt{Addr: redisAddr})
|
||||||
c := asynq.NewClient(r)
|
defer client.Close()
|
||||||
defer c.Close()
|
|
||||||
|
|
||||||
// ------------------------------------------------------
|
// ------------------------------------------------------
|
||||||
// Example 1: Enqueue task to be processed immediately.
|
// Example 1: Enqueue task to be processed immediately.
|
||||||
// Use (*Client).Enqueue method.
|
// Use (*Client).Enqueue method.
|
||||||
// ------------------------------------------------------
|
// ------------------------------------------------------
|
||||||
|
|
||||||
t := tasks.NewEmailDeliveryTask(42, "some:template:id")
|
task, err := tasks.NewEmailDeliveryTask(42, "some:template:id")
|
||||||
err := c.Enqueue(t)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal("could not enqueue task: %v", err)
|
log.Fatalf("could not create task: %v", err)
|
||||||
}
|
}
|
||||||
|
info, err := client.Enqueue(task)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("could not enqueue task: %v", err)
|
||||||
|
}
|
||||||
|
log.Printf("enqueued task: id=%s queue=%s", info.ID, info.Queue)
|
||||||
|
|
||||||
|
|
||||||
// ------------------------------------------------------------
|
// ------------------------------------------------------------
|
||||||
// Example 2: Schedule task to be processed in the future.
|
// Example 2: Schedule task to be processed in the future.
|
||||||
// Use (*Client).EnqueueIn or (*Client).EnqueueAt.
|
// Use ProcessIn or ProcessAt option.
|
||||||
// ------------------------------------------------------------
|
// ------------------------------------------------------------
|
||||||
|
|
||||||
t = tasks.NewEmailDeliveryTask(42, "other:template:id")
|
info, err = client.Enqueue(task, asynq.ProcessIn(24*time.Hour))
|
||||||
err = c.EnqueueIn(24*time.Hour, t)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal("could not schedule task: %v", err)
|
log.Fatalf("could not schedule task: %v", err)
|
||||||
}
|
}
|
||||||
|
log.Printf("enqueued task: id=%s queue=%s", info.ID, info.Queue)
|
||||||
|
|
||||||
|
|
||||||
// ----------------------------------------------------------------------------
|
// ----------------------------------------------------------------------------
|
||||||
// Example 3: Set options to tune task processing behavior.
|
// Example 3: Set other options to tune task processing behavior.
|
||||||
// Options include MaxRetry, Queue, Timeout, Deadline, Unique etc.
|
// Options include MaxRetry, Queue, Timeout, Deadline, Unique etc.
|
||||||
// ----------------------------------------------------------------------------
|
// ----------------------------------------------------------------------------
|
||||||
|
|
||||||
c.SetDefaultOptions(tasks.ImageProcessing, asynq.MaxRetry(10), asynq.Timeout(time.Minute))
|
task, err = tasks.NewImageResizeTask("https://example.com/myassets/image.jpg")
|
||||||
|
|
||||||
t = tasks.NewImageProcessingTask("some/blobstore/url", "other/blobstore/url")
|
|
||||||
err = c.Enqueue(t)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal("could not enqueue task: %v", err)
|
log.Fatalf("could not create task: %v", err)
|
||||||
}
|
}
|
||||||
|
info, err = client.Enqueue(task, asynq.MaxRetry(10), asynq.Timeout(3 * time.Minute))
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
// Example 4: Pass options to tune task processing behavior at enqueue time.
|
|
||||||
// Options passed at enqueue time override default ones, if any.
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
t = tasks.NewImageProcessingTask("some/blobstore/url", "other/blobstore/url")
|
|
||||||
err = c.Enqueue(t, asynq.Queue("critical"), asynq.Timeout(30*time.Second))
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal("could not enqueue task: %v", err)
|
log.Fatalf("could not enqueue task: %v", err)
|
||||||
}
|
}
|
||||||
|
log.Printf("enqueued task: id=%s queue=%s", info.ID, info.Queue)
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
Next, create a worker server to process these tasks in the background.
|
Next, start a worker server to process these tasks in the background. To start the background workers, use [`Server`](https://pkg.go.dev/github.com/hibiken/asynq?tab=doc#Server) and provide your [`Handler`](https://pkg.go.dev/github.com/hibiken/asynq?tab=doc#Handler) to process the tasks.
|
||||||
To start the background workers, use [`Server`](https://pkg.go.dev/github.com/hibiken/asynq?tab=doc#Server) and provide your [`Handler`](https://pkg.go.dev/github.com/hibiken/asynq?tab=doc#Handler) to process the tasks.
|
|
||||||
|
|
||||||
You can optionally use [`ServeMux`](https://pkg.go.dev/github.com/hibiken/asynq?tab=doc#ServeMux) to create a handler, just as you would with [`"net/http"`](https://golang.org/pkg/net/http/) Handler.
|
You can optionally use [`ServeMux`](https://pkg.go.dev/github.com/hibiken/asynq?tab=doc#ServeMux) to create a handler, just as you would with [`net/http`](https://golang.org/pkg/net/http/) Handler.
|
||||||
|
|
||||||
```go
|
```go
|
||||||
package main
|
package main
|
||||||
@@ -218,9 +230,9 @@ import (
|
|||||||
const redisAddr = "127.0.0.1:6379"
|
const redisAddr = "127.0.0.1:6379"
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
r := asynq.RedisClientOpt{Addr: redisAddr}
|
srv := asynq.NewServer(
|
||||||
|
asynq.RedisClientOpt{Addr: redisAddr},
|
||||||
srv := asynq.NewServer(r, asynq.Config{
|
asynq.Config{
|
||||||
// Specify how many concurrent workers to use
|
// Specify how many concurrent workers to use
|
||||||
Concurrency: 10,
|
Concurrency: 10,
|
||||||
// Optionally specify multiple queues with different priority.
|
// Optionally specify multiple queues with different priority.
|
||||||
@@ -230,12 +242,13 @@ func main() {
|
|||||||
"low": 1,
|
"low": 1,
|
||||||
},
|
},
|
||||||
// See the godoc for other configuration options
|
// See the godoc for other configuration options
|
||||||
})
|
},
|
||||||
|
)
|
||||||
|
|
||||||
// mux maps a type to a handler
|
// mux maps a type to a handler
|
||||||
mux := asynq.NewServeMux()
|
mux := asynq.NewServeMux()
|
||||||
mux.HandleFunc(tasks.EmailDelivery, tasks.HandleEmailDeliveryTask)
|
mux.HandleFunc(tasks.TypeEmailDelivery, tasks.HandleEmailDeliveryTask)
|
||||||
mux.Handle(tasks.ImageProcessing, tasks.NewImageProcessor())
|
mux.Handle(tasks.TypeImageResize, tasks.NewImageProcessor())
|
||||||
// ...register other handlers...
|
// ...register other handlers...
|
||||||
|
|
||||||
if err := srv.Run(mux); err != nil {
|
if err := srv.Run(mux); err != nil {
|
||||||
@@ -244,52 +257,55 @@ func main() {
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
For a more detailed walk-through of the library, see our [Getting Started Guide](https://github.com/hibiken/asynq/wiki/Getting-Started).
|
For a more detailed walk-through of the library, see our [Getting Started](https://github.com/hibiken/asynq/wiki/Getting-Started) guide.
|
||||||
|
|
||||||
To Learn more about `asynq` features and APIs, see our [Wiki](https://github.com/hibiken/asynq/wiki) and [godoc](https://godoc.org/github.com/hibiken/asynq).
|
To learn more about `asynq` features and APIs, see the package [godoc](https://godoc.org/github.com/hibiken/asynq).
|
||||||
|
|
||||||
|
## Web UI
|
||||||
|
|
||||||
|
[Asynqmon](https://github.com/hibiken/asynqmon) is a web based tool for monitoring and administrating Asynq queues and tasks.
|
||||||
|
|
||||||
|
Here's a few screenshots of the Web UI:
|
||||||
|
|
||||||
|
**Queues view**
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
**Tasks view**
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
**Metrics view**
|
||||||
|
<img width="1532" alt="Screen Shot 2021-12-19 at 4 37 19 PM" src="https://user-images.githubusercontent.com/10953044/146777420-cae6c476-bac6-469c-acce-b2f6584e8707.png">
|
||||||
|
|
||||||
|
**Settings and adaptive dark mode**
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
For details on how to use the tool, refer to the tool's [README](https://github.com/hibiken/asynqmon#readme).
|
||||||
|
|
||||||
## Command Line Tool
|
## Command Line Tool
|
||||||
|
|
||||||
Asynq ships with a command line tool to inspect the state of queues and tasks.
|
Asynq ships with a command line tool to inspect the state of queues and tasks.
|
||||||
|
|
||||||
Here's an example of running the `stats` command.
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
For details on how to use the tool, refer to the tool's [README](/tools/asynq/README.md).
|
|
||||||
|
|
||||||
## Installation
|
|
||||||
|
|
||||||
To install `asynq` library, run the following command:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
go get -u github.com/hibiken/asynq
|
|
||||||
```
|
|
||||||
|
|
||||||
To install the CLI tool, run the following command:
|
To install the CLI tool, run the following command:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
go get -u github.com/hibiken/asynq/tools/asynq
|
go get -u github.com/hibiken/asynq/tools/asynq
|
||||||
```
|
```
|
||||||
|
|
||||||
## Requirements
|
Here's an example of running the `asynq stats` command:
|
||||||
|
|
||||||
| Dependency | Version |
|

|
||||||
| -------------------------- | ------- |
|
|
||||||
| [Redis](https://redis.io/) | v2.8+ |
|
For details on how to use the tool, refer to the tool's [README](/tools/asynq/README.md).
|
||||||
| [Go](https://golang.org/) | v1.13+ |
|
|
||||||
|
|
||||||
## Contributing
|
## Contributing
|
||||||
|
|
||||||
We are open to, and grateful for, any contributions (Github issues/pull-requests, feedback on Gitter channel, etc) made by the community.
|
We are open to, and grateful for, any contributions (GitHub issues/PRs, feedback on [Gitter channel](https://gitter.im/go-asynq/community), etc) made by the community.
|
||||||
|
|
||||||
Please see the [Contribution Guide](/CONTRIBUTING.md) before contributing.
|
Please see the [Contribution Guide](/CONTRIBUTING.md) before contributing.
|
||||||
|
|
||||||
## Acknowledgements
|
|
||||||
|
|
||||||
- [Sidekiq](https://github.com/mperham/sidekiq) : Many of the design ideas are taken from sidekiq and its Web UI
|
|
||||||
- [RQ](https://github.com/rq/rq) : Client APIs are inspired by rq library.
|
|
||||||
- [Cobra](https://github.com/spf13/cobra) : Asynq CLI is built with cobra
|
|
||||||
|
|
||||||
## License
|
## License
|
||||||
|
|
||||||
Asynq is released under the MIT license. See [LICENSE](https://github.com/hibiken/asynq/blob/master/LICENSE).
|
Copyright (c) 2019-present [Ken Hibino](https://github.com/hibiken) and [Contributors](https://github.com/hibiken/asynq/graphs/contributors). `Asynq` is free and open-source software licensed under the [MIT License](https://github.com/hibiken/asynq/blob/master/LICENSE). Official logo was created by [Vic Shóstak](https://github.com/koddr) and distributed under [Creative Commons](https://creativecommons.org/publicdomain/zero/1.0/) license (CC0 1.0 Universal).
|
||||||
|
|||||||
411
asynq.go
411
asynq.go
@@ -5,40 +5,221 @@
|
|||||||
package asynq
|
package asynq
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/url"
|
"net/url"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/go-redis/redis/v7"
|
"github.com/go-redis/redis/v8"
|
||||||
|
"github.com/hibiken/asynq/internal/base"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Task represents a unit of work to be performed.
|
// Task represents a unit of work to be performed.
|
||||||
type Task struct {
|
type Task struct {
|
||||||
// Type indicates the type of task to be performed.
|
// typename indicates the type of task to be performed.
|
||||||
Type string
|
typename string
|
||||||
|
|
||||||
// Payload holds data needed to perform the task.
|
// payload holds data needed to perform the task.
|
||||||
Payload Payload
|
payload []byte
|
||||||
|
|
||||||
|
// opts holds options for the task.
|
||||||
|
opts []Option
|
||||||
|
|
||||||
|
// w is the ResultWriter for the task.
|
||||||
|
w *ResultWriter
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (t *Task) Type() string { return t.typename }
|
||||||
|
func (t *Task) Payload() []byte { return t.payload }
|
||||||
|
|
||||||
|
// ResultWriter returns a pointer to the ResultWriter associated with the task.
|
||||||
|
//
|
||||||
|
// Nil pointer is returned if called on a newly created task (i.e. task created by calling NewTask).
|
||||||
|
// Only the tasks passed to Handler.ProcessTask have a valid ResultWriter pointer.
|
||||||
|
func (t *Task) ResultWriter() *ResultWriter { return t.w }
|
||||||
|
|
||||||
// NewTask returns a new Task given a type name and payload data.
|
// NewTask returns a new Task given a type name and payload data.
|
||||||
//
|
// Options can be passed to configure task processing behavior.
|
||||||
// The payload values must be serializable.
|
func NewTask(typename string, payload []byte, opts ...Option) *Task {
|
||||||
func NewTask(typename string, payload map[string]interface{}) *Task {
|
|
||||||
return &Task{
|
return &Task{
|
||||||
Type: typename,
|
typename: typename,
|
||||||
Payload: Payload{payload},
|
payload: payload,
|
||||||
|
opts: opts,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// newTask creates a task with the given typename, payload and ResultWriter.
|
||||||
|
func newTask(typename string, payload []byte, w *ResultWriter) *Task {
|
||||||
|
return &Task{
|
||||||
|
typename: typename,
|
||||||
|
payload: payload,
|
||||||
|
w: w,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// A TaskInfo describes a task and its metadata.
|
||||||
|
type TaskInfo struct {
|
||||||
|
// ID is the identifier of the task.
|
||||||
|
ID string
|
||||||
|
|
||||||
|
// Queue is the name of the queue in which the task belongs.
|
||||||
|
Queue string
|
||||||
|
|
||||||
|
// Type is the type name of the task.
|
||||||
|
Type string
|
||||||
|
|
||||||
|
// Payload is the payload data of the task.
|
||||||
|
Payload []byte
|
||||||
|
|
||||||
|
// State indicates the task state.
|
||||||
|
State TaskState
|
||||||
|
|
||||||
|
// MaxRetry is the maximum number of times the task can be retried.
|
||||||
|
MaxRetry int
|
||||||
|
|
||||||
|
// Retried is the number of times the task has retried so far.
|
||||||
|
Retried int
|
||||||
|
|
||||||
|
// LastErr is the error message from the last failure.
|
||||||
|
LastErr string
|
||||||
|
|
||||||
|
// LastFailedAt is the time time of the last failure if any.
|
||||||
|
// If the task has no failures, LastFailedAt is zero time (i.e. time.Time{}).
|
||||||
|
LastFailedAt time.Time
|
||||||
|
|
||||||
|
// Timeout is the duration the task can be processed by Handler before being retried,
|
||||||
|
// zero if not specified
|
||||||
|
Timeout time.Duration
|
||||||
|
|
||||||
|
// Deadline is the deadline for the task, zero value if not specified.
|
||||||
|
Deadline time.Time
|
||||||
|
|
||||||
|
// NextProcessAt is the time the task is scheduled to be processed,
|
||||||
|
// zero if not applicable.
|
||||||
|
NextProcessAt time.Time
|
||||||
|
|
||||||
|
// IsOrphaned describes whether the task is left in active state with no worker processing it.
|
||||||
|
// An orphaned task indicates that the worker has crashed or experienced network failures and was not able to
|
||||||
|
// extend its lease on the task.
|
||||||
|
//
|
||||||
|
// This task will be recovered by running a server against the queue the task is in.
|
||||||
|
// This field is only applicable to tasks with TaskStateActive.
|
||||||
|
IsOrphaned bool
|
||||||
|
|
||||||
|
// Retention is duration of the retention period after the task is successfully processed.
|
||||||
|
Retention time.Duration
|
||||||
|
|
||||||
|
// CompletedAt is the time when the task is processed successfully.
|
||||||
|
// Zero value (i.e. time.Time{}) indicates no value.
|
||||||
|
CompletedAt time.Time
|
||||||
|
|
||||||
|
// Result holds the result data associated with the task.
|
||||||
|
// Use ResultWriter to write result data from the Handler.
|
||||||
|
Result []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// If t is non-zero, returns time converted from t as unix time in seconds.
|
||||||
|
// If t is zero, returns zero value of time.Time.
|
||||||
|
func fromUnixTimeOrZero(t int64) time.Time {
|
||||||
|
if t == 0 {
|
||||||
|
return time.Time{}
|
||||||
|
}
|
||||||
|
return time.Unix(t, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newTaskInfo(msg *base.TaskMessage, state base.TaskState, nextProcessAt time.Time, result []byte) *TaskInfo {
|
||||||
|
info := TaskInfo{
|
||||||
|
ID: msg.ID,
|
||||||
|
Queue: msg.Queue,
|
||||||
|
Type: msg.Type,
|
||||||
|
Payload: msg.Payload, // Do we need to make a copy?
|
||||||
|
MaxRetry: msg.Retry,
|
||||||
|
Retried: msg.Retried,
|
||||||
|
LastErr: msg.ErrorMsg,
|
||||||
|
Timeout: time.Duration(msg.Timeout) * time.Second,
|
||||||
|
Deadline: fromUnixTimeOrZero(msg.Deadline),
|
||||||
|
Retention: time.Duration(msg.Retention) * time.Second,
|
||||||
|
NextProcessAt: nextProcessAt,
|
||||||
|
LastFailedAt: fromUnixTimeOrZero(msg.LastFailedAt),
|
||||||
|
CompletedAt: fromUnixTimeOrZero(msg.CompletedAt),
|
||||||
|
Result: result,
|
||||||
|
}
|
||||||
|
|
||||||
|
switch state {
|
||||||
|
case base.TaskStateActive:
|
||||||
|
info.State = TaskStateActive
|
||||||
|
case base.TaskStatePending:
|
||||||
|
info.State = TaskStatePending
|
||||||
|
case base.TaskStateScheduled:
|
||||||
|
info.State = TaskStateScheduled
|
||||||
|
case base.TaskStateRetry:
|
||||||
|
info.State = TaskStateRetry
|
||||||
|
case base.TaskStateArchived:
|
||||||
|
info.State = TaskStateArchived
|
||||||
|
case base.TaskStateCompleted:
|
||||||
|
info.State = TaskStateCompleted
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("internal error: unknown state: %d", state))
|
||||||
|
}
|
||||||
|
return &info
|
||||||
|
}
|
||||||
|
|
||||||
|
// TaskState denotes the state of a task.
|
||||||
|
type TaskState int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Indicates that the task is currently being processed by Handler.
|
||||||
|
TaskStateActive TaskState = iota + 1
|
||||||
|
|
||||||
|
// Indicates that the task is ready to be processed by Handler.
|
||||||
|
TaskStatePending
|
||||||
|
|
||||||
|
// Indicates that the task is scheduled to be processed some time in the future.
|
||||||
|
TaskStateScheduled
|
||||||
|
|
||||||
|
// Indicates that the task has previously failed and scheduled to be processed some time in the future.
|
||||||
|
TaskStateRetry
|
||||||
|
|
||||||
|
// Indicates that the task is archived and stored for inspection purposes.
|
||||||
|
TaskStateArchived
|
||||||
|
|
||||||
|
// Indicates that the task is processed successfully and retained until the retention TTL expires.
|
||||||
|
TaskStateCompleted
|
||||||
|
)
|
||||||
|
|
||||||
|
func (s TaskState) String() string {
|
||||||
|
switch s {
|
||||||
|
case TaskStateActive:
|
||||||
|
return "active"
|
||||||
|
case TaskStatePending:
|
||||||
|
return "pending"
|
||||||
|
case TaskStateScheduled:
|
||||||
|
return "scheduled"
|
||||||
|
case TaskStateRetry:
|
||||||
|
return "retry"
|
||||||
|
case TaskStateArchived:
|
||||||
|
return "archived"
|
||||||
|
case TaskStateCompleted:
|
||||||
|
return "completed"
|
||||||
|
}
|
||||||
|
panic("asynq: unknown task state")
|
||||||
|
}
|
||||||
|
|
||||||
// RedisConnOpt is a discriminated union of types that represent Redis connection configuration option.
|
// RedisConnOpt is a discriminated union of types that represent Redis connection configuration option.
|
||||||
//
|
//
|
||||||
// RedisConnOpt represents a sum of following types:
|
// RedisConnOpt represents a sum of following types:
|
||||||
//
|
//
|
||||||
// RedisClientOpt | *RedisClientOpt | RedisFailoverClientOpt | *RedisFailoverClientOpt
|
// - RedisClientOpt
|
||||||
type RedisConnOpt interface{}
|
// - RedisFailoverClientOpt
|
||||||
|
// - RedisClusterClientOpt
|
||||||
|
type RedisConnOpt interface {
|
||||||
|
// MakeRedisClient returns a new redis client instance.
|
||||||
|
// Return value is intentionally opaque to hide the implementation detail of redis client.
|
||||||
|
MakeRedisClient() interface{}
|
||||||
|
}
|
||||||
|
|
||||||
// RedisClientOpt is used to create a redis client that connects
|
// RedisClientOpt is used to create a redis client that connects
|
||||||
// to a redis server directly.
|
// to a redis server directly.
|
||||||
@@ -50,13 +231,38 @@ type RedisClientOpt struct {
|
|||||||
// Redis server address in "host:port" format.
|
// Redis server address in "host:port" format.
|
||||||
Addr string
|
Addr string
|
||||||
|
|
||||||
// Redis server password.
|
// Username to authenticate the current connection when Redis ACLs are used.
|
||||||
|
// See: https://redis.io/commands/auth.
|
||||||
|
Username string
|
||||||
|
|
||||||
|
// Password to authenticate the current connection.
|
||||||
|
// See: https://redis.io/commands/auth.
|
||||||
Password string
|
Password string
|
||||||
|
|
||||||
// Redis DB to select after connecting to a server.
|
// Redis DB to select after connecting to a server.
|
||||||
// See: https://redis.io/commands/select.
|
// See: https://redis.io/commands/select.
|
||||||
DB int
|
DB int
|
||||||
|
|
||||||
|
// Dial timeout for establishing new connections.
|
||||||
|
// Default is 5 seconds.
|
||||||
|
DialTimeout time.Duration
|
||||||
|
|
||||||
|
// Timeout for socket reads.
|
||||||
|
// If timeout is reached, read commands will fail with a timeout error
|
||||||
|
// instead of blocking.
|
||||||
|
//
|
||||||
|
// Use value -1 for no timeout and 0 for default.
|
||||||
|
// Default is 3 seconds.
|
||||||
|
ReadTimeout time.Duration
|
||||||
|
|
||||||
|
// Timeout for socket writes.
|
||||||
|
// If timeout is reached, write commands will fail with a timeout error
|
||||||
|
// instead of blocking.
|
||||||
|
//
|
||||||
|
// Use value -1 for no timeout and 0 for default.
|
||||||
|
// Default is ReadTimout.
|
||||||
|
WriteTimeout time.Duration
|
||||||
|
|
||||||
// Maximum number of socket connections.
|
// Maximum number of socket connections.
|
||||||
// Default is 10 connections per every CPU as reported by runtime.NumCPU.
|
// Default is 10 connections per every CPU as reported by runtime.NumCPU.
|
||||||
PoolSize int
|
PoolSize int
|
||||||
@@ -66,6 +272,21 @@ type RedisClientOpt struct {
|
|||||||
TLSConfig *tls.Config
|
TLSConfig *tls.Config
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (opt RedisClientOpt) MakeRedisClient() interface{} {
|
||||||
|
return redis.NewClient(&redis.Options{
|
||||||
|
Network: opt.Network,
|
||||||
|
Addr: opt.Addr,
|
||||||
|
Username: opt.Username,
|
||||||
|
Password: opt.Password,
|
||||||
|
DB: opt.DB,
|
||||||
|
DialTimeout: opt.DialTimeout,
|
||||||
|
ReadTimeout: opt.ReadTimeout,
|
||||||
|
WriteTimeout: opt.WriteTimeout,
|
||||||
|
PoolSize: opt.PoolSize,
|
||||||
|
TLSConfig: opt.TLSConfig,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// RedisFailoverClientOpt is used to creates a redis client that talks
|
// RedisFailoverClientOpt is used to creates a redis client that talks
|
||||||
// to redis sentinels for service discovery and has an automatic failover
|
// to redis sentinels for service discovery and has an automatic failover
|
||||||
// capability.
|
// capability.
|
||||||
@@ -81,13 +302,38 @@ type RedisFailoverClientOpt struct {
|
|||||||
// Redis sentinel password.
|
// Redis sentinel password.
|
||||||
SentinelPassword string
|
SentinelPassword string
|
||||||
|
|
||||||
// Redis server password.
|
// Username to authenticate the current connection when Redis ACLs are used.
|
||||||
|
// See: https://redis.io/commands/auth.
|
||||||
|
Username string
|
||||||
|
|
||||||
|
// Password to authenticate the current connection.
|
||||||
|
// See: https://redis.io/commands/auth.
|
||||||
Password string
|
Password string
|
||||||
|
|
||||||
// Redis DB to select after connecting to a server.
|
// Redis DB to select after connecting to a server.
|
||||||
// See: https://redis.io/commands/select.
|
// See: https://redis.io/commands/select.
|
||||||
DB int
|
DB int
|
||||||
|
|
||||||
|
// Dial timeout for establishing new connections.
|
||||||
|
// Default is 5 seconds.
|
||||||
|
DialTimeout time.Duration
|
||||||
|
|
||||||
|
// Timeout for socket reads.
|
||||||
|
// If timeout is reached, read commands will fail with a timeout error
|
||||||
|
// instead of blocking.
|
||||||
|
//
|
||||||
|
// Use value -1 for no timeout and 0 for default.
|
||||||
|
// Default is 3 seconds.
|
||||||
|
ReadTimeout time.Duration
|
||||||
|
|
||||||
|
// Timeout for socket writes.
|
||||||
|
// If timeout is reached, write commands will fail with a timeout error
|
||||||
|
// instead of blocking.
|
||||||
|
//
|
||||||
|
// Use value -1 for no timeout and 0 for default.
|
||||||
|
// Default is ReadTimeout
|
||||||
|
WriteTimeout time.Duration
|
||||||
|
|
||||||
// Maximum number of socket connections.
|
// Maximum number of socket connections.
|
||||||
// Default is 10 connections per every CPU as reported by runtime.NumCPU.
|
// Default is 10 connections per every CPU as reported by runtime.NumCPU.
|
||||||
PoolSize int
|
PoolSize int
|
||||||
@@ -97,6 +343,79 @@ type RedisFailoverClientOpt struct {
|
|||||||
TLSConfig *tls.Config
|
TLSConfig *tls.Config
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (opt RedisFailoverClientOpt) MakeRedisClient() interface{} {
|
||||||
|
return redis.NewFailoverClient(&redis.FailoverOptions{
|
||||||
|
MasterName: opt.MasterName,
|
||||||
|
SentinelAddrs: opt.SentinelAddrs,
|
||||||
|
SentinelPassword: opt.SentinelPassword,
|
||||||
|
Username: opt.Username,
|
||||||
|
Password: opt.Password,
|
||||||
|
DB: opt.DB,
|
||||||
|
DialTimeout: opt.DialTimeout,
|
||||||
|
ReadTimeout: opt.ReadTimeout,
|
||||||
|
WriteTimeout: opt.WriteTimeout,
|
||||||
|
PoolSize: opt.PoolSize,
|
||||||
|
TLSConfig: opt.TLSConfig,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// RedisClusterClientOpt is used to creates a redis client that connects to
|
||||||
|
// redis cluster.
|
||||||
|
type RedisClusterClientOpt struct {
|
||||||
|
// A seed list of host:port addresses of cluster nodes.
|
||||||
|
Addrs []string
|
||||||
|
|
||||||
|
// The maximum number of retries before giving up.
|
||||||
|
// Command is retried on network errors and MOVED/ASK redirects.
|
||||||
|
// Default is 8 retries.
|
||||||
|
MaxRedirects int
|
||||||
|
|
||||||
|
// Username to authenticate the current connection when Redis ACLs are used.
|
||||||
|
// See: https://redis.io/commands/auth.
|
||||||
|
Username string
|
||||||
|
|
||||||
|
// Password to authenticate the current connection.
|
||||||
|
// See: https://redis.io/commands/auth.
|
||||||
|
Password string
|
||||||
|
|
||||||
|
// Dial timeout for establishing new connections.
|
||||||
|
// Default is 5 seconds.
|
||||||
|
DialTimeout time.Duration
|
||||||
|
|
||||||
|
// Timeout for socket reads.
|
||||||
|
// If timeout is reached, read commands will fail with a timeout error
|
||||||
|
// instead of blocking.
|
||||||
|
//
|
||||||
|
// Use value -1 for no timeout and 0 for default.
|
||||||
|
// Default is 3 seconds.
|
||||||
|
ReadTimeout time.Duration
|
||||||
|
|
||||||
|
// Timeout for socket writes.
|
||||||
|
// If timeout is reached, write commands will fail with a timeout error
|
||||||
|
// instead of blocking.
|
||||||
|
//
|
||||||
|
// Use value -1 for no timeout and 0 for default.
|
||||||
|
// Default is ReadTimeout.
|
||||||
|
WriteTimeout time.Duration
|
||||||
|
|
||||||
|
// TLS Config used to connect to a server.
|
||||||
|
// TLS will be negotiated only if this field is set.
|
||||||
|
TLSConfig *tls.Config
|
||||||
|
}
|
||||||
|
|
||||||
|
func (opt RedisClusterClientOpt) MakeRedisClient() interface{} {
|
||||||
|
return redis.NewClusterClient(&redis.ClusterOptions{
|
||||||
|
Addrs: opt.Addrs,
|
||||||
|
MaxRedirects: opt.MaxRedirects,
|
||||||
|
Username: opt.Username,
|
||||||
|
Password: opt.Password,
|
||||||
|
DialTimeout: opt.DialTimeout,
|
||||||
|
ReadTimeout: opt.ReadTimeout,
|
||||||
|
WriteTimeout: opt.WriteTimeout,
|
||||||
|
TLSConfig: opt.TLSConfig,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// ParseRedisURI parses redis uri string and returns RedisConnOpt if uri is valid.
|
// ParseRedisURI parses redis uri string and returns RedisConnOpt if uri is valid.
|
||||||
// It returns a non-nil error if uri cannot be parsed.
|
// It returns a non-nil error if uri cannot be parsed.
|
||||||
//
|
//
|
||||||
@@ -170,50 +489,26 @@ func parseRedisSentinelURI(u *url.URL) (RedisConnOpt, error) {
|
|||||||
return RedisFailoverClientOpt{MasterName: master, SentinelAddrs: addrs, Password: password}, nil
|
return RedisFailoverClientOpt{MasterName: master, SentinelAddrs: addrs, Password: password}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// createRedisClient returns a redis client given a redis connection configuration.
|
// ResultWriter is a client interface to write result data for a task.
|
||||||
//
|
// It writes the data to the redis instance the server is connected to.
|
||||||
// Passing an unexpected type as a RedisConnOpt argument will cause panic.
|
type ResultWriter struct {
|
||||||
func createRedisClient(r RedisConnOpt) *redis.Client {
|
id string // task ID this writer is responsible for
|
||||||
switch r := r.(type) {
|
qname string // queue name the task belongs to
|
||||||
case *RedisClientOpt:
|
broker base.Broker
|
||||||
return redis.NewClient(&redis.Options{
|
ctx context.Context // context associated with the task
|
||||||
Network: r.Network,
|
}
|
||||||
Addr: r.Addr,
|
|
||||||
Password: r.Password,
|
// Write writes the given data as a result of the task the ResultWriter is associated with.
|
||||||
DB: r.DB,
|
func (w *ResultWriter) Write(data []byte) (n int, err error) {
|
||||||
PoolSize: r.PoolSize,
|
select {
|
||||||
TLSConfig: r.TLSConfig,
|
case <-w.ctx.Done():
|
||||||
})
|
return 0, fmt.Errorf("failed to result task result: %v", w.ctx.Err())
|
||||||
case RedisClientOpt:
|
|
||||||
return redis.NewClient(&redis.Options{
|
|
||||||
Network: r.Network,
|
|
||||||
Addr: r.Addr,
|
|
||||||
Password: r.Password,
|
|
||||||
DB: r.DB,
|
|
||||||
PoolSize: r.PoolSize,
|
|
||||||
TLSConfig: r.TLSConfig,
|
|
||||||
})
|
|
||||||
case *RedisFailoverClientOpt:
|
|
||||||
return redis.NewFailoverClient(&redis.FailoverOptions{
|
|
||||||
MasterName: r.MasterName,
|
|
||||||
SentinelAddrs: r.SentinelAddrs,
|
|
||||||
SentinelPassword: r.SentinelPassword,
|
|
||||||
Password: r.Password,
|
|
||||||
DB: r.DB,
|
|
||||||
PoolSize: r.PoolSize,
|
|
||||||
TLSConfig: r.TLSConfig,
|
|
||||||
})
|
|
||||||
case RedisFailoverClientOpt:
|
|
||||||
return redis.NewFailoverClient(&redis.FailoverOptions{
|
|
||||||
MasterName: r.MasterName,
|
|
||||||
SentinelAddrs: r.SentinelAddrs,
|
|
||||||
SentinelPassword: r.SentinelPassword,
|
|
||||||
Password: r.Password,
|
|
||||||
DB: r.DB,
|
|
||||||
PoolSize: r.PoolSize,
|
|
||||||
TLSConfig: r.TLSConfig,
|
|
||||||
})
|
|
||||||
default:
|
default:
|
||||||
panic(fmt.Sprintf("asynq: unexpected type %T for RedisConnOpt", r))
|
|
||||||
}
|
}
|
||||||
|
return w.broker.WriteResult(w.qname, w.id, data)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TaskID returns the ID of the task the ResultWriter is associated with.
|
||||||
|
func (w *ResultWriter) TaskID() string {
|
||||||
|
return w.id
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,9 +7,10 @@ package asynq
|
|||||||
import (
|
import (
|
||||||
"flag"
|
"flag"
|
||||||
"sort"
|
"sort"
|
||||||
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/go-redis/redis/v7"
|
"github.com/go-redis/redis/v8"
|
||||||
"github.com/google/go-cmp/cmp"
|
"github.com/google/go-cmp/cmp"
|
||||||
h "github.com/hibiken/asynq/internal/asynqtest"
|
h "github.com/hibiken/asynq/internal/asynqtest"
|
||||||
"github.com/hibiken/asynq/internal/log"
|
"github.com/hibiken/asynq/internal/log"
|
||||||
@@ -24,6 +25,9 @@ var (
|
|||||||
redisAddr string
|
redisAddr string
|
||||||
redisDB int
|
redisDB int
|
||||||
|
|
||||||
|
useRedisCluster bool
|
||||||
|
redisClusterAddrs string // comma-separated list of host:port
|
||||||
|
|
||||||
testLogLevel = FatalLevel
|
testLogLevel = FatalLevel
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -32,27 +36,56 @@ var testLogger *log.Logger
|
|||||||
func init() {
|
func init() {
|
||||||
flag.StringVar(&redisAddr, "redis_addr", "localhost:6379", "redis address to use in testing")
|
flag.StringVar(&redisAddr, "redis_addr", "localhost:6379", "redis address to use in testing")
|
||||||
flag.IntVar(&redisDB, "redis_db", 14, "redis db number to use in testing")
|
flag.IntVar(&redisDB, "redis_db", 14, "redis db number to use in testing")
|
||||||
|
flag.BoolVar(&useRedisCluster, "redis_cluster", false, "use redis cluster as a broker in testing")
|
||||||
|
flag.StringVar(&redisClusterAddrs, "redis_cluster_addrs", "localhost:7000,localhost:7001,localhost:7002", "comma separated list of redis server addresses")
|
||||||
flag.Var(&testLogLevel, "loglevel", "log level to use in testing")
|
flag.Var(&testLogLevel, "loglevel", "log level to use in testing")
|
||||||
|
|
||||||
testLogger = log.NewLogger(nil)
|
testLogger = log.NewLogger(nil)
|
||||||
testLogger.SetLevel(toInternalLogLevel(testLogLevel))
|
testLogger.SetLevel(toInternalLogLevel(testLogLevel))
|
||||||
}
|
}
|
||||||
|
|
||||||
func setup(tb testing.TB) *redis.Client {
|
func setup(tb testing.TB) (r redis.UniversalClient) {
|
||||||
tb.Helper()
|
tb.Helper()
|
||||||
r := redis.NewClient(&redis.Options{
|
if useRedisCluster {
|
||||||
|
addrs := strings.Split(redisClusterAddrs, ",")
|
||||||
|
if len(addrs) == 0 {
|
||||||
|
tb.Fatal("No redis cluster addresses provided. Please set addresses using --redis_cluster_addrs flag.")
|
||||||
|
}
|
||||||
|
r = redis.NewClusterClient(&redis.ClusterOptions{
|
||||||
|
Addrs: addrs,
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
r = redis.NewClient(&redis.Options{
|
||||||
Addr: redisAddr,
|
Addr: redisAddr,
|
||||||
DB: redisDB,
|
DB: redisDB,
|
||||||
})
|
})
|
||||||
|
}
|
||||||
// Start each test with a clean slate.
|
// Start each test with a clean slate.
|
||||||
h.FlushDB(tb, r)
|
h.FlushDB(tb, r)
|
||||||
return r
|
return r
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getRedisConnOpt(tb testing.TB) RedisConnOpt {
|
||||||
|
tb.Helper()
|
||||||
|
if useRedisCluster {
|
||||||
|
addrs := strings.Split(redisClusterAddrs, ",")
|
||||||
|
if len(addrs) == 0 {
|
||||||
|
tb.Fatal("No redis cluster addresses provided. Please set addresses using --redis_cluster_addrs flag.")
|
||||||
|
}
|
||||||
|
return RedisClusterClientOpt{
|
||||||
|
Addrs: addrs,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return RedisClientOpt{
|
||||||
|
Addr: redisAddr,
|
||||||
|
DB: redisDB,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
var sortTaskOpt = cmp.Transformer("SortMsg", func(in []*Task) []*Task {
|
var sortTaskOpt = cmp.Transformer("SortMsg", func(in []*Task) []*Task {
|
||||||
out := append([]*Task(nil), in...) // Copy input to avoid mutating it
|
out := append([]*Task(nil), in...) // Copy input to avoid mutating it
|
||||||
sort.Slice(out, func(i, j int) bool {
|
sort.Slice(out, func(i, j int) bool {
|
||||||
return out[i].Type < out[j].Type
|
return out[i].Type() < out[j].Type()
|
||||||
})
|
})
|
||||||
return out
|
return out
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -6,37 +6,46 @@ package asynq
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/rand"
|
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
h "github.com/hibiken/asynq/internal/asynqtest"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Creates a new task of type "task<n>" with payload {"data": n}.
|
||||||
|
func makeTask(n int) *Task {
|
||||||
|
b, err := json.Marshal(map[string]int{"data": n})
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return NewTask(fmt.Sprintf("task%d", n), b)
|
||||||
|
}
|
||||||
|
|
||||||
// Simple E2E Benchmark testing with no scheduled tasks and retries.
|
// Simple E2E Benchmark testing with no scheduled tasks and retries.
|
||||||
func BenchmarkEndToEndSimple(b *testing.B) {
|
func BenchmarkEndToEndSimple(b *testing.B) {
|
||||||
const count = 100000
|
const count = 100000
|
||||||
for n := 0; n < b.N; n++ {
|
for n := 0; n < b.N; n++ {
|
||||||
b.StopTimer() // begin setup
|
b.StopTimer() // begin setup
|
||||||
setup(b)
|
setup(b)
|
||||||
redis := &RedisClientOpt{
|
redis := getRedisConnOpt(b)
|
||||||
Addr: redisAddr,
|
|
||||||
DB: redisDB,
|
|
||||||
}
|
|
||||||
client := NewClient(redis)
|
client := NewClient(redis)
|
||||||
srv := NewServer(redis, Config{
|
srv := NewServer(redis, Config{
|
||||||
Concurrency: 10,
|
Concurrency: 10,
|
||||||
RetryDelayFunc: func(n int, err error, t *Task) time.Duration {
|
RetryDelayFunc: func(n int, err error, t *Task) time.Duration {
|
||||||
return time.Second
|
return time.Second
|
||||||
},
|
},
|
||||||
|
LogLevel: testLogLevel,
|
||||||
})
|
})
|
||||||
// Create a bunch of tasks
|
// Create a bunch of tasks
|
||||||
for i := 0; i < count; i++ {
|
for i := 0; i < count; i++ {
|
||||||
t := NewTask(fmt.Sprintf("task%d", i), map[string]interface{}{"data": i})
|
if _, err := client.Enqueue(makeTask(i)); err != nil {
|
||||||
if err := client.Enqueue(t); err != nil {
|
|
||||||
b.Fatalf("could not enqueue a task: %v", err)
|
b.Fatalf("could not enqueue a task: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
client.Close()
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
wg.Add(count)
|
wg.Add(count)
|
||||||
@@ -60,38 +69,47 @@ func BenchmarkEndToEnd(b *testing.B) {
|
|||||||
const count = 100000
|
const count = 100000
|
||||||
for n := 0; n < b.N; n++ {
|
for n := 0; n < b.N; n++ {
|
||||||
b.StopTimer() // begin setup
|
b.StopTimer() // begin setup
|
||||||
rand.Seed(time.Now().UnixNano())
|
|
||||||
setup(b)
|
setup(b)
|
||||||
redis := &RedisClientOpt{
|
redis := getRedisConnOpt(b)
|
||||||
Addr: redisAddr,
|
|
||||||
DB: redisDB,
|
|
||||||
}
|
|
||||||
client := NewClient(redis)
|
client := NewClient(redis)
|
||||||
srv := NewServer(redis, Config{
|
srv := NewServer(redis, Config{
|
||||||
Concurrency: 10,
|
Concurrency: 10,
|
||||||
RetryDelayFunc: func(n int, err error, t *Task) time.Duration {
|
RetryDelayFunc: func(n int, err error, t *Task) time.Duration {
|
||||||
return time.Second
|
return time.Second
|
||||||
},
|
},
|
||||||
|
LogLevel: testLogLevel,
|
||||||
})
|
})
|
||||||
// Create a bunch of tasks
|
// Create a bunch of tasks
|
||||||
for i := 0; i < count; i++ {
|
for i := 0; i < count; i++ {
|
||||||
t := NewTask(fmt.Sprintf("task%d", i), map[string]interface{}{"data": i})
|
if _, err := client.Enqueue(makeTask(i)); err != nil {
|
||||||
if err := client.Enqueue(t); err != nil {
|
|
||||||
b.Fatalf("could not enqueue a task: %v", err)
|
b.Fatalf("could not enqueue a task: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for i := 0; i < count; i++ {
|
for i := 0; i < count; i++ {
|
||||||
t := NewTask(fmt.Sprintf("scheduled%d", i), map[string]interface{}{"data": i})
|
if _, err := client.Enqueue(makeTask(i), ProcessIn(1*time.Second)); err != nil {
|
||||||
if err := client.EnqueueAt(time.Now().Add(time.Second), t); err != nil {
|
|
||||||
b.Fatalf("could not enqueue a task: %v", err)
|
b.Fatalf("could not enqueue a task: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
client.Close()
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
wg.Add(count * 2)
|
wg.Add(count * 2)
|
||||||
handler := func(ctx context.Context, t *Task) error {
|
handler := func(ctx context.Context, t *Task) error {
|
||||||
// randomly fail 1% of tasks
|
var p map[string]int
|
||||||
if rand.Intn(100) == 1 {
|
if err := json.Unmarshal(t.Payload(), &p); err != nil {
|
||||||
|
b.Logf("internal error: %v", err)
|
||||||
|
}
|
||||||
|
n, ok := p["data"]
|
||||||
|
if !ok {
|
||||||
|
n = 1
|
||||||
|
b.Logf("internal error: could not get data from payload")
|
||||||
|
}
|
||||||
|
retried, ok := GetRetryCount(ctx)
|
||||||
|
if !ok {
|
||||||
|
b.Logf("internal error: could not get retry count from context")
|
||||||
|
}
|
||||||
|
// Fail 1% of tasks for the first attempt.
|
||||||
|
if retried == 0 && n%100 == 0 {
|
||||||
return fmt.Errorf(":(")
|
return fmt.Errorf(":(")
|
||||||
}
|
}
|
||||||
wg.Done()
|
wg.Done()
|
||||||
@@ -119,10 +137,7 @@ func BenchmarkEndToEndMultipleQueues(b *testing.B) {
|
|||||||
for n := 0; n < b.N; n++ {
|
for n := 0; n < b.N; n++ {
|
||||||
b.StopTimer() // begin setup
|
b.StopTimer() // begin setup
|
||||||
setup(b)
|
setup(b)
|
||||||
redis := &RedisClientOpt{
|
redis := getRedisConnOpt(b)
|
||||||
Addr: redisAddr,
|
|
||||||
DB: redisDB,
|
|
||||||
}
|
|
||||||
client := NewClient(redis)
|
client := NewClient(redis)
|
||||||
srv := NewServer(redis, Config{
|
srv := NewServer(redis, Config{
|
||||||
Concurrency: 10,
|
Concurrency: 10,
|
||||||
@@ -131,26 +146,25 @@ func BenchmarkEndToEndMultipleQueues(b *testing.B) {
|
|||||||
"default": 3,
|
"default": 3,
|
||||||
"low": 1,
|
"low": 1,
|
||||||
},
|
},
|
||||||
|
LogLevel: testLogLevel,
|
||||||
})
|
})
|
||||||
// Create a bunch of tasks
|
// Create a bunch of tasks
|
||||||
for i := 0; i < highCount; i++ {
|
for i := 0; i < highCount; i++ {
|
||||||
t := NewTask(fmt.Sprintf("task%d", i), map[string]interface{}{"data": i})
|
if _, err := client.Enqueue(makeTask(i), Queue("high")); err != nil {
|
||||||
if err := client.Enqueue(t, Queue("high")); err != nil {
|
|
||||||
b.Fatalf("could not enqueue a task: %v", err)
|
b.Fatalf("could not enqueue a task: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for i := 0; i < defaultCount; i++ {
|
for i := 0; i < defaultCount; i++ {
|
||||||
t := NewTask(fmt.Sprintf("task%d", i), map[string]interface{}{"data": i})
|
if _, err := client.Enqueue(makeTask(i)); err != nil {
|
||||||
if err := client.Enqueue(t); err != nil {
|
|
||||||
b.Fatalf("could not enqueue a task: %v", err)
|
b.Fatalf("could not enqueue a task: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for i := 0; i < lowCount; i++ {
|
for i := 0; i < lowCount; i++ {
|
||||||
t := NewTask(fmt.Sprintf("task%d", i), map[string]interface{}{"data": i})
|
if _, err := client.Enqueue(makeTask(i), Queue("low")); err != nil {
|
||||||
if err := client.Enqueue(t, Queue("low")); err != nil {
|
|
||||||
b.Fatalf("could not enqueue a task: %v", err)
|
b.Fatalf("could not enqueue a task: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
client.Close()
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
wg.Add(highCount + defaultCount + lowCount)
|
wg.Add(highCount + defaultCount + lowCount)
|
||||||
@@ -168,3 +182,58 @@ func BenchmarkEndToEndMultipleQueues(b *testing.B) {
|
|||||||
b.StartTimer() // end teardown
|
b.StartTimer() // end teardown
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// E2E benchmark to check client enqueue operation performs correctly,
|
||||||
|
// while server is busy processing tasks.
|
||||||
|
func BenchmarkClientWhileServerRunning(b *testing.B) {
|
||||||
|
const count = 10000
|
||||||
|
for n := 0; n < b.N; n++ {
|
||||||
|
b.StopTimer() // begin setup
|
||||||
|
setup(b)
|
||||||
|
redis := getRedisConnOpt(b)
|
||||||
|
client := NewClient(redis)
|
||||||
|
srv := NewServer(redis, Config{
|
||||||
|
Concurrency: 10,
|
||||||
|
RetryDelayFunc: func(n int, err error, t *Task) time.Duration {
|
||||||
|
return time.Second
|
||||||
|
},
|
||||||
|
LogLevel: testLogLevel,
|
||||||
|
})
|
||||||
|
// Enqueue 10,000 tasks.
|
||||||
|
for i := 0; i < count; i++ {
|
||||||
|
if _, err := client.Enqueue(makeTask(i)); err != nil {
|
||||||
|
b.Fatalf("could not enqueue a task: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Schedule 10,000 tasks.
|
||||||
|
for i := 0; i < count; i++ {
|
||||||
|
if _, err := client.Enqueue(makeTask(i), ProcessIn(1*time.Second)); err != nil {
|
||||||
|
b.Fatalf("could not enqueue a task: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
handler := func(ctx context.Context, t *Task) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
srv.Start(HandlerFunc(handler))
|
||||||
|
|
||||||
|
b.StartTimer() // end setup
|
||||||
|
|
||||||
|
b.Log("Starting enqueueing")
|
||||||
|
enqueued := 0
|
||||||
|
for enqueued < 100000 {
|
||||||
|
t := NewTask(fmt.Sprintf("enqueued%d", enqueued), h.JSON(map[string]interface{}{"data": enqueued}))
|
||||||
|
if _, err := client.Enqueue(t); err != nil {
|
||||||
|
b.Logf("could not enqueue task %d: %v", enqueued, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
enqueued++
|
||||||
|
}
|
||||||
|
b.Logf("Finished enqueueing %d tasks", enqueued)
|
||||||
|
|
||||||
|
b.StopTimer() // begin teardown
|
||||||
|
srv.Stop()
|
||||||
|
client.Close()
|
||||||
|
b.StartTimer() // end teardown
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
370
client.go
370
client.go
@@ -5,16 +5,16 @@
|
|||||||
package asynq
|
package asynq
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"sort"
|
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/go-redis/redis/v8"
|
||||||
|
"github.com/google/uuid"
|
||||||
"github.com/hibiken/asynq/internal/base"
|
"github.com/hibiken/asynq/internal/base"
|
||||||
|
"github.com/hibiken/asynq/internal/errors"
|
||||||
"github.com/hibiken/asynq/internal/rdb"
|
"github.com/hibiken/asynq/internal/rdb"
|
||||||
"github.com/rs/xid"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// A Client is responsible for scheduling tasks.
|
// A Client is responsible for scheduling tasks.
|
||||||
@@ -24,30 +24,55 @@ import (
|
|||||||
//
|
//
|
||||||
// Clients are safe for concurrent use by multiple goroutines.
|
// Clients are safe for concurrent use by multiple goroutines.
|
||||||
type Client struct {
|
type Client struct {
|
||||||
mu sync.Mutex
|
|
||||||
opts map[string][]Option
|
|
||||||
rdb *rdb.RDB
|
rdb *rdb.RDB
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewClient and returns a new Client given a redis connection option.
|
// NewClient returns a new Client instance given a redis connection option.
|
||||||
func NewClient(r RedisConnOpt) *Client {
|
func NewClient(r RedisConnOpt) *Client {
|
||||||
rdb := rdb.NewRDB(createRedisClient(r))
|
c, ok := r.MakeRedisClient().(redis.UniversalClient)
|
||||||
return &Client{
|
if !ok {
|
||||||
opts: make(map[string][]Option),
|
panic(fmt.Sprintf("asynq: unsupported RedisConnOpt type %T", r))
|
||||||
rdb: rdb,
|
|
||||||
}
|
}
|
||||||
|
return &Client{rdb: rdb.NewRDB(c)}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type OptionType int
|
||||||
|
|
||||||
|
const (
|
||||||
|
MaxRetryOpt OptionType = iota
|
||||||
|
QueueOpt
|
||||||
|
TimeoutOpt
|
||||||
|
DeadlineOpt
|
||||||
|
UniqueOpt
|
||||||
|
ProcessAtOpt
|
||||||
|
ProcessInOpt
|
||||||
|
TaskIDOpt
|
||||||
|
RetentionOpt
|
||||||
|
)
|
||||||
|
|
||||||
// Option specifies the task processing behavior.
|
// Option specifies the task processing behavior.
|
||||||
type Option interface{}
|
type Option interface {
|
||||||
|
// String returns a string representation of the option.
|
||||||
|
String() string
|
||||||
|
|
||||||
|
// Type describes the type of the option.
|
||||||
|
Type() OptionType
|
||||||
|
|
||||||
|
// Value returns a value used to create this option.
|
||||||
|
Value() interface{}
|
||||||
|
}
|
||||||
|
|
||||||
// Internal option representations.
|
// Internal option representations.
|
||||||
type (
|
type (
|
||||||
retryOption int
|
retryOption int
|
||||||
queueOption string
|
queueOption string
|
||||||
|
taskIDOption string
|
||||||
timeoutOption time.Duration
|
timeoutOption time.Duration
|
||||||
deadlineOption time.Time
|
deadlineOption time.Time
|
||||||
uniqueOption time.Duration
|
uniqueOption time.Duration
|
||||||
|
processAtOption time.Time
|
||||||
|
processInOption time.Duration
|
||||||
|
retentionOption time.Duration
|
||||||
)
|
)
|
||||||
|
|
||||||
// MaxRetry returns an option to specify the max number of times
|
// MaxRetry returns an option to specify the max number of times
|
||||||
@@ -61,29 +86,65 @@ func MaxRetry(n int) Option {
|
|||||||
return retryOption(n)
|
return retryOption(n)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (n retryOption) String() string { return fmt.Sprintf("MaxRetry(%d)", int(n)) }
|
||||||
|
func (n retryOption) Type() OptionType { return MaxRetryOpt }
|
||||||
|
func (n retryOption) Value() interface{} { return int(n) }
|
||||||
|
|
||||||
// Queue returns an option to specify the queue to enqueue the task into.
|
// Queue returns an option to specify the queue to enqueue the task into.
|
||||||
//
|
func Queue(qname string) Option {
|
||||||
// Queue name is case-insensitive and the lowercased version is used.
|
return queueOption(qname)
|
||||||
func Queue(name string) Option {
|
|
||||||
return queueOption(strings.ToLower(name))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (qname queueOption) String() string { return fmt.Sprintf("Queue(%q)", string(qname)) }
|
||||||
|
func (qname queueOption) Type() OptionType { return QueueOpt }
|
||||||
|
func (qname queueOption) Value() interface{} { return string(qname) }
|
||||||
|
|
||||||
|
// TaskID returns an option to specify the task ID.
|
||||||
|
func TaskID(id string) Option {
|
||||||
|
return taskIDOption(id)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (id taskIDOption) String() string { return fmt.Sprintf("TaskID(%q)", string(id)) }
|
||||||
|
func (id taskIDOption) Type() OptionType { return TaskIDOpt }
|
||||||
|
func (id taskIDOption) Value() interface{} { return string(id) }
|
||||||
|
|
||||||
// Timeout returns an option to specify how long a task may run.
|
// Timeout returns an option to specify how long a task may run.
|
||||||
|
// If the timeout elapses before the Handler returns, then the task
|
||||||
|
// will be retried.
|
||||||
//
|
//
|
||||||
// Zero duration means no limit.
|
// Zero duration means no limit.
|
||||||
|
//
|
||||||
|
// If there's a conflicting Deadline option, whichever comes earliest
|
||||||
|
// will be used.
|
||||||
func Timeout(d time.Duration) Option {
|
func Timeout(d time.Duration) Option {
|
||||||
return timeoutOption(d)
|
return timeoutOption(d)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d timeoutOption) String() string { return fmt.Sprintf("Timeout(%v)", time.Duration(d)) }
|
||||||
|
func (d timeoutOption) Type() OptionType { return TimeoutOpt }
|
||||||
|
func (d timeoutOption) Value() interface{} { return time.Duration(d) }
|
||||||
|
|
||||||
// Deadline returns an option to specify the deadline for the given task.
|
// Deadline returns an option to specify the deadline for the given task.
|
||||||
|
// If it reaches the deadline before the Handler returns, then the task
|
||||||
|
// will be retried.
|
||||||
|
//
|
||||||
|
// If there's a conflicting Timeout option, whichever comes earliest
|
||||||
|
// will be used.
|
||||||
func Deadline(t time.Time) Option {
|
func Deadline(t time.Time) Option {
|
||||||
return deadlineOption(t)
|
return deadlineOption(t)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (t deadlineOption) String() string {
|
||||||
|
return fmt.Sprintf("Deadline(%v)", time.Time(t).Format(time.UnixDate))
|
||||||
|
}
|
||||||
|
func (t deadlineOption) Type() OptionType { return DeadlineOpt }
|
||||||
|
func (t deadlineOption) Value() interface{} { return time.Time(t) }
|
||||||
|
|
||||||
// Unique returns an option to enqueue a task only if the given task is unique.
|
// Unique returns an option to enqueue a task only if the given task is unique.
|
||||||
// Task enqueued with this option is guaranteed to be unique within the given ttl.
|
// Task enqueued with this option is guaranteed to be unique within the given ttl.
|
||||||
// Once the task gets processed successfully or once the TTL has expired, another task with the same uniqueness may be enqueued.
|
// Once the task gets processed successfully or once the TTL has expired, another task with the same uniqueness may be enqueued.
|
||||||
// ErrDuplicateTask error is returned when enqueueing a duplicate task.
|
// ErrDuplicateTask error is returned when enqueueing a duplicate task.
|
||||||
|
// TTL duration must be greater than or equal to 1 second.
|
||||||
//
|
//
|
||||||
// Uniqueness of a task is based on the following properties:
|
// Uniqueness of a task is based on the following properties:
|
||||||
// - Task Type
|
// - Task Type
|
||||||
@@ -93,167 +154,242 @@ func Unique(ttl time.Duration) Option {
|
|||||||
return uniqueOption(ttl)
|
return uniqueOption(ttl)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (ttl uniqueOption) String() string { return fmt.Sprintf("Unique(%v)", time.Duration(ttl)) }
|
||||||
|
func (ttl uniqueOption) Type() OptionType { return UniqueOpt }
|
||||||
|
func (ttl uniqueOption) Value() interface{} { return time.Duration(ttl) }
|
||||||
|
|
||||||
|
// ProcessAt returns an option to specify when to process the given task.
|
||||||
|
//
|
||||||
|
// If there's a conflicting ProcessIn option, the last option passed to Enqueue overrides the others.
|
||||||
|
func ProcessAt(t time.Time) Option {
|
||||||
|
return processAtOption(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t processAtOption) String() string {
|
||||||
|
return fmt.Sprintf("ProcessAt(%v)", time.Time(t).Format(time.UnixDate))
|
||||||
|
}
|
||||||
|
func (t processAtOption) Type() OptionType { return ProcessAtOpt }
|
||||||
|
func (t processAtOption) Value() interface{} { return time.Time(t) }
|
||||||
|
|
||||||
|
// ProcessIn returns an option to specify when to process the given task relative to the current time.
|
||||||
|
//
|
||||||
|
// If there's a conflicting ProcessAt option, the last option passed to Enqueue overrides the others.
|
||||||
|
func ProcessIn(d time.Duration) Option {
|
||||||
|
return processInOption(d)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d processInOption) String() string { return fmt.Sprintf("ProcessIn(%v)", time.Duration(d)) }
|
||||||
|
func (d processInOption) Type() OptionType { return ProcessInOpt }
|
||||||
|
func (d processInOption) Value() interface{} { return time.Duration(d) }
|
||||||
|
|
||||||
|
// Retention returns an option to specify the duration of retention period for the task.
|
||||||
|
// If this option is provided, the task will be stored as a completed task after successful processing.
|
||||||
|
// A completed task will be deleted after the specified duration elapses.
|
||||||
|
func Retention(d time.Duration) Option {
|
||||||
|
return retentionOption(d)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ttl retentionOption) String() string { return fmt.Sprintf("Retention(%v)", time.Duration(ttl)) }
|
||||||
|
func (ttl retentionOption) Type() OptionType { return RetentionOpt }
|
||||||
|
func (ttl retentionOption) Value() interface{} { return time.Duration(ttl) }
|
||||||
|
|
||||||
// ErrDuplicateTask indicates that the given task could not be enqueued since it's a duplicate of another task.
|
// ErrDuplicateTask indicates that the given task could not be enqueued since it's a duplicate of another task.
|
||||||
//
|
//
|
||||||
// ErrDuplicateTask error only applies to tasks enqueued with a Unique option.
|
// ErrDuplicateTask error only applies to tasks enqueued with a Unique option.
|
||||||
var ErrDuplicateTask = errors.New("task already exists")
|
var ErrDuplicateTask = errors.New("task already exists")
|
||||||
|
|
||||||
|
// ErrTaskIDConflict indicates that the given task could not be enqueued since its task ID already exists.
|
||||||
|
//
|
||||||
|
// ErrTaskIDConflict error only applies to tasks enqueued with a TaskID option.
|
||||||
|
var ErrTaskIDConflict = errors.New("task ID conflicts with another task")
|
||||||
|
|
||||||
type option struct {
|
type option struct {
|
||||||
retry int
|
retry int
|
||||||
queue string
|
queue string
|
||||||
|
taskID string
|
||||||
timeout time.Duration
|
timeout time.Duration
|
||||||
deadline time.Time
|
deadline time.Time
|
||||||
uniqueTTL time.Duration
|
uniqueTTL time.Duration
|
||||||
|
processAt time.Time
|
||||||
|
retention time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
func composeOptions(opts ...Option) option {
|
// composeOptions merges user provided options into the default options
|
||||||
|
// and returns the composed option.
|
||||||
|
// It also validates the user provided options and returns an error if any of
|
||||||
|
// the user provided options fail the validations.
|
||||||
|
func composeOptions(opts ...Option) (option, error) {
|
||||||
res := option{
|
res := option{
|
||||||
retry: defaultMaxRetry,
|
retry: defaultMaxRetry,
|
||||||
queue: base.DefaultQueueName,
|
queue: base.DefaultQueueName,
|
||||||
timeout: 0,
|
taskID: uuid.NewString(),
|
||||||
|
timeout: 0, // do not set to deafultTimeout here
|
||||||
deadline: time.Time{},
|
deadline: time.Time{},
|
||||||
|
processAt: time.Now(),
|
||||||
}
|
}
|
||||||
for _, opt := range opts {
|
for _, opt := range opts {
|
||||||
switch opt := opt.(type) {
|
switch opt := opt.(type) {
|
||||||
case retryOption:
|
case retryOption:
|
||||||
res.retry = int(opt)
|
res.retry = int(opt)
|
||||||
case queueOption:
|
case queueOption:
|
||||||
res.queue = string(opt)
|
qname := string(opt)
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
return option{}, err
|
||||||
|
}
|
||||||
|
res.queue = qname
|
||||||
|
case taskIDOption:
|
||||||
|
id := string(opt)
|
||||||
|
if err := validateTaskID(id); err != nil {
|
||||||
|
return option{}, err
|
||||||
|
}
|
||||||
|
res.taskID = id
|
||||||
case timeoutOption:
|
case timeoutOption:
|
||||||
res.timeout = time.Duration(opt)
|
res.timeout = time.Duration(opt)
|
||||||
case deadlineOption:
|
case deadlineOption:
|
||||||
res.deadline = time.Time(opt)
|
res.deadline = time.Time(opt)
|
||||||
case uniqueOption:
|
case uniqueOption:
|
||||||
res.uniqueTTL = time.Duration(opt)
|
ttl := time.Duration(opt)
|
||||||
|
if ttl < 1*time.Second {
|
||||||
|
return option{}, errors.New("Unique TTL cannot be less than 1s")
|
||||||
|
}
|
||||||
|
res.uniqueTTL = ttl
|
||||||
|
case processAtOption:
|
||||||
|
res.processAt = time.Time(opt)
|
||||||
|
case processInOption:
|
||||||
|
res.processAt = time.Now().Add(time.Duration(opt))
|
||||||
|
case retentionOption:
|
||||||
|
res.retention = time.Duration(opt)
|
||||||
default:
|
default:
|
||||||
// ignore unexpected option
|
// ignore unexpected option
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return res
|
return res, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// uniqueKey computes the redis key used for the given task.
|
// validates user provided task ID string.
|
||||||
// It returns an empty string if ttl is zero.
|
func validateTaskID(id string) error {
|
||||||
func uniqueKey(t *Task, ttl time.Duration, qname string) string {
|
if strings.TrimSpace(id) == "" {
|
||||||
if ttl == 0 {
|
return errors.New("task ID cannot be empty")
|
||||||
return ""
|
|
||||||
}
|
}
|
||||||
return fmt.Sprintf("%s:%s:%s", t.Type, serializePayload(t.Payload.data), qname)
|
return nil
|
||||||
}
|
|
||||||
|
|
||||||
func serializePayload(payload map[string]interface{}) string {
|
|
||||||
if payload == nil {
|
|
||||||
return "nil"
|
|
||||||
}
|
|
||||||
type entry struct {
|
|
||||||
k string
|
|
||||||
v interface{}
|
|
||||||
}
|
|
||||||
var es []entry
|
|
||||||
for k, v := range payload {
|
|
||||||
es = append(es, entry{k, v})
|
|
||||||
}
|
|
||||||
// sort entries by key
|
|
||||||
sort.Slice(es, func(i, j int) bool { return es[i].k < es[j].k })
|
|
||||||
var b strings.Builder
|
|
||||||
for _, e := range es {
|
|
||||||
if b.Len() > 0 {
|
|
||||||
b.WriteString(",")
|
|
||||||
}
|
|
||||||
b.WriteString(fmt.Sprintf("%s=%v", e.k, e.v))
|
|
||||||
}
|
|
||||||
return b.String()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
// Default max retry count used if nothing is specified.
|
// Default max retry count used if nothing is specified.
|
||||||
const defaultMaxRetry = 25
|
defaultMaxRetry = 25
|
||||||
|
|
||||||
// SetDefaultOptions sets options to be used for a given task type.
|
// Default timeout used if both timeout and deadline are not specified.
|
||||||
// The argument opts specifies the behavior of task processing.
|
defaultTimeout = 30 * time.Minute
|
||||||
// If there are conflicting Option values the last one overrides others.
|
)
|
||||||
//
|
|
||||||
// Default options can be overridden by options passed at enqueue time.
|
|
||||||
func (c *Client) SetDefaultOptions(taskType string, opts ...Option) {
|
|
||||||
c.mu.Lock()
|
|
||||||
defer c.mu.Unlock()
|
|
||||||
c.opts[taskType] = opts
|
|
||||||
}
|
|
||||||
|
|
||||||
// EnqueueAt schedules task to be enqueued at the specified time.
|
// Value zero indicates no timeout and no deadline.
|
||||||
//
|
var (
|
||||||
// EnqueueAt returns nil if the task is scheduled successfully, otherwise returns a non-nil error.
|
noTimeout time.Duration = 0
|
||||||
//
|
noDeadline time.Time = time.Unix(0, 0)
|
||||||
// The argument opts specifies the behavior of task processing.
|
)
|
||||||
// If there are conflicting Option values the last one overrides others.
|
|
||||||
func (c *Client) EnqueueAt(t time.Time, task *Task, opts ...Option) error {
|
|
||||||
return c.enqueueAt(t, task, opts...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Enqueue enqueues task to be processed immediately.
|
// Close closes the connection with redis.
|
||||||
//
|
|
||||||
// Enqueue returns nil if the task is enqueued successfully, otherwise returns a non-nil error.
|
|
||||||
//
|
|
||||||
// The argument opts specifies the behavior of task processing.
|
|
||||||
// If there are conflicting Option values the last one overrides others.
|
|
||||||
func (c *Client) Enqueue(task *Task, opts ...Option) error {
|
|
||||||
return c.enqueueAt(time.Now(), task, opts...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// EnqueueIn schedules task to be enqueued after the specified delay.
|
|
||||||
//
|
|
||||||
// EnqueueIn returns nil if the task is scheduled successfully, otherwise returns a non-nil error.
|
|
||||||
//
|
|
||||||
// The argument opts specifies the behavior of task processing.
|
|
||||||
// If there are conflicting Option values the last one overrides others.
|
|
||||||
func (c *Client) EnqueueIn(d time.Duration, task *Task, opts ...Option) error {
|
|
||||||
return c.enqueueAt(time.Now().Add(d), task, opts...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close closes the connection with redis server.
|
|
||||||
func (c *Client) Close() error {
|
func (c *Client) Close() error {
|
||||||
return c.rdb.Close()
|
return c.rdb.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Client) enqueueAt(t time.Time, task *Task, opts ...Option) error {
|
// Enqueue enqueues the given task to a queue.
|
||||||
c.mu.Lock()
|
//
|
||||||
defer c.mu.Unlock()
|
// Enqueue returns TaskInfo and nil error if the task is enqueued successfully, otherwise returns a non-nil error.
|
||||||
if defaults, ok := c.opts[task.Type]; ok {
|
//
|
||||||
opts = append(defaults, opts...)
|
// The argument opts specifies the behavior of task processing.
|
||||||
|
// If there are conflicting Option values the last one overrides others.
|
||||||
|
// Any options provided to NewTask can be overridden by options passed to Enqueue.
|
||||||
|
// By deafult, max retry is set to 25 and timeout is set to 30 minutes.
|
||||||
|
//
|
||||||
|
// If no ProcessAt or ProcessIn options are provided, the task will be pending immediately.
|
||||||
|
//
|
||||||
|
// Enqueue uses context.Background internally; to specify the context, use EnqueueContext.
|
||||||
|
func (c *Client) Enqueue(task *Task, opts ...Option) (*TaskInfo, error) {
|
||||||
|
return c.EnqueueContext(context.Background(), task, opts...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// EnqueueContext enqueues the given task to a queue.
|
||||||
|
//
|
||||||
|
// EnqueueContext returns TaskInfo and nil error if the task is enqueued successfully, otherwise returns a non-nil error.
|
||||||
|
//
|
||||||
|
// The argument opts specifies the behavior of task processing.
|
||||||
|
// If there are conflicting Option values the last one overrides others.
|
||||||
|
// Any options provided to NewTask can be overridden by options passed to Enqueue.
|
||||||
|
// By deafult, max retry is set to 25 and timeout is set to 30 minutes.
|
||||||
|
//
|
||||||
|
// If no ProcessAt or ProcessIn options are provided, the task will be pending immediately.
|
||||||
|
//
|
||||||
|
// The first argument context applies to the enqueue operation. To specify task timeout and deadline, use Timeout and Deadline option instead.
|
||||||
|
func (c *Client) EnqueueContext(ctx context.Context, task *Task, opts ...Option) (*TaskInfo, error) {
|
||||||
|
if strings.TrimSpace(task.Type()) == "" {
|
||||||
|
return nil, fmt.Errorf("task typename cannot be empty")
|
||||||
|
}
|
||||||
|
// merge task options with the options provided at enqueue time.
|
||||||
|
opts = append(task.opts, opts...)
|
||||||
|
opt, err := composeOptions(opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
deadline := noDeadline
|
||||||
|
if !opt.deadline.IsZero() {
|
||||||
|
deadline = opt.deadline
|
||||||
|
}
|
||||||
|
timeout := noTimeout
|
||||||
|
if opt.timeout != 0 {
|
||||||
|
timeout = opt.timeout
|
||||||
|
}
|
||||||
|
if deadline.Equal(noDeadline) && timeout == noTimeout {
|
||||||
|
// If neither deadline nor timeout are set, use default timeout.
|
||||||
|
timeout = defaultTimeout
|
||||||
|
}
|
||||||
|
var uniqueKey string
|
||||||
|
if opt.uniqueTTL > 0 {
|
||||||
|
uniqueKey = base.UniqueKey(opt.queue, task.Type(), task.Payload())
|
||||||
}
|
}
|
||||||
opt := composeOptions(opts...)
|
|
||||||
msg := &base.TaskMessage{
|
msg := &base.TaskMessage{
|
||||||
ID: xid.New(),
|
ID: opt.taskID,
|
||||||
Type: task.Type,
|
Type: task.Type(),
|
||||||
Payload: task.Payload.data,
|
Payload: task.Payload(),
|
||||||
Queue: opt.queue,
|
Queue: opt.queue,
|
||||||
Retry: opt.retry,
|
Retry: opt.retry,
|
||||||
Timeout: opt.timeout.String(),
|
Deadline: deadline.Unix(),
|
||||||
Deadline: opt.deadline.Format(time.RFC3339),
|
Timeout: int64(timeout.Seconds()),
|
||||||
UniqueKey: uniqueKey(task, opt.uniqueTTL, opt.queue),
|
UniqueKey: uniqueKey,
|
||||||
|
Retention: int64(opt.retention.Seconds()),
|
||||||
}
|
}
|
||||||
var err error
|
now := time.Now()
|
||||||
if time.Now().After(t) {
|
var state base.TaskState
|
||||||
err = c.enqueue(msg, opt.uniqueTTL)
|
if opt.processAt.Before(now) || opt.processAt.Equal(now) {
|
||||||
|
opt.processAt = now
|
||||||
|
err = c.enqueue(ctx, msg, opt.uniqueTTL)
|
||||||
|
state = base.TaskStatePending
|
||||||
} else {
|
} else {
|
||||||
err = c.schedule(msg, t, opt.uniqueTTL)
|
err = c.schedule(ctx, msg, opt.processAt, opt.uniqueTTL)
|
||||||
|
state = base.TaskStateScheduled
|
||||||
}
|
}
|
||||||
if err == rdb.ErrDuplicateTask {
|
switch {
|
||||||
return fmt.Errorf("%w", ErrDuplicateTask)
|
case errors.Is(err, errors.ErrDuplicateTask):
|
||||||
|
return nil, fmt.Errorf("%w", ErrDuplicateTask)
|
||||||
|
case errors.Is(err, errors.ErrTaskIdConflict):
|
||||||
|
return nil, fmt.Errorf("%w", ErrTaskIDConflict)
|
||||||
|
case err != nil:
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
return err
|
return newTaskInfo(msg, state, opt.processAt, nil), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Client) enqueue(msg *base.TaskMessage, uniqueTTL time.Duration) error {
|
func (c *Client) enqueue(ctx context.Context, msg *base.TaskMessage, uniqueTTL time.Duration) error {
|
||||||
if uniqueTTL > 0 {
|
if uniqueTTL > 0 {
|
||||||
return c.rdb.EnqueueUnique(msg, uniqueTTL)
|
return c.rdb.EnqueueUnique(ctx, msg, uniqueTTL)
|
||||||
}
|
}
|
||||||
return c.rdb.Enqueue(msg)
|
return c.rdb.Enqueue(ctx, msg)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Client) schedule(msg *base.TaskMessage, t time.Time, uniqueTTL time.Duration) error {
|
func (c *Client) schedule(ctx context.Context, msg *base.TaskMessage, t time.Time, uniqueTTL time.Duration) error {
|
||||||
if uniqueTTL > 0 {
|
if uniqueTTL > 0 {
|
||||||
ttl := t.Add(uniqueTTL).Sub(time.Now())
|
ttl := t.Add(uniqueTTL).Sub(time.Now())
|
||||||
return c.rdb.ScheduleUnique(msg, t, ttl)
|
return c.rdb.ScheduleUnique(ctx, msg, t, ttl)
|
||||||
}
|
}
|
||||||
return c.rdb.Schedule(msg, t)
|
return c.rdb.Schedule(ctx, msg, t)
|
||||||
}
|
}
|
||||||
|
|||||||
843
client_test.go
843
client_test.go
File diff suppressed because it is too large
Load Diff
63
context.go
63
context.go
@@ -6,58 +6,16 @@ package asynq
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/hibiken/asynq/internal/base"
|
asynqcontext "github.com/hibiken/asynq/internal/context"
|
||||||
)
|
)
|
||||||
|
|
||||||
// A taskMetadata holds task scoped data to put in context.
|
|
||||||
type taskMetadata struct {
|
|
||||||
id string
|
|
||||||
maxRetry int
|
|
||||||
retryCount int
|
|
||||||
}
|
|
||||||
|
|
||||||
// ctxKey type is unexported to prevent collisions with context keys defined in
|
|
||||||
// other packages.
|
|
||||||
type ctxKey int
|
|
||||||
|
|
||||||
// metadataCtxKey is the context key for the task metadata.
|
|
||||||
// Its value of zero is arbitrary.
|
|
||||||
const metadataCtxKey ctxKey = 0
|
|
||||||
|
|
||||||
// createContext returns a context and cancel function for a given task message.
|
|
||||||
func createContext(msg *base.TaskMessage) (ctx context.Context, cancel context.CancelFunc) {
|
|
||||||
metadata := taskMetadata{
|
|
||||||
id: msg.ID.String(),
|
|
||||||
maxRetry: msg.Retry,
|
|
||||||
retryCount: msg.Retried,
|
|
||||||
}
|
|
||||||
ctx = context.WithValue(context.Background(), metadataCtxKey, metadata)
|
|
||||||
timeout, err := time.ParseDuration(msg.Timeout)
|
|
||||||
if err == nil && timeout != 0 {
|
|
||||||
ctx, cancel = context.WithTimeout(ctx, timeout)
|
|
||||||
}
|
|
||||||
deadline, err := time.Parse(time.RFC3339, msg.Deadline)
|
|
||||||
if err == nil && !deadline.IsZero() {
|
|
||||||
ctx, cancel = context.WithDeadline(ctx, deadline)
|
|
||||||
}
|
|
||||||
if cancel == nil {
|
|
||||||
ctx, cancel = context.WithCancel(ctx)
|
|
||||||
}
|
|
||||||
return ctx, cancel
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetTaskID extracts a task ID from a context, if any.
|
// GetTaskID extracts a task ID from a context, if any.
|
||||||
//
|
//
|
||||||
// ID of a task is guaranteed to be unique.
|
// ID of a task is guaranteed to be unique.
|
||||||
// ID of a task doesn't change if the task is being retried.
|
// ID of a task doesn't change if the task is being retried.
|
||||||
func GetTaskID(ctx context.Context) (id string, ok bool) {
|
func GetTaskID(ctx context.Context) (id string, ok bool) {
|
||||||
metadata, ok := ctx.Value(metadataCtxKey).(taskMetadata)
|
return asynqcontext.GetTaskID(ctx)
|
||||||
if !ok {
|
|
||||||
return "", false
|
|
||||||
}
|
|
||||||
return metadata.id, true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetRetryCount extracts retry count from a context, if any.
|
// GetRetryCount extracts retry count from a context, if any.
|
||||||
@@ -65,11 +23,7 @@ func GetTaskID(ctx context.Context) (id string, ok bool) {
|
|||||||
// Return value n indicates the number of times associated task has been
|
// Return value n indicates the number of times associated task has been
|
||||||
// retried so far.
|
// retried so far.
|
||||||
func GetRetryCount(ctx context.Context) (n int, ok bool) {
|
func GetRetryCount(ctx context.Context) (n int, ok bool) {
|
||||||
metadata, ok := ctx.Value(metadataCtxKey).(taskMetadata)
|
return asynqcontext.GetRetryCount(ctx)
|
||||||
if !ok {
|
|
||||||
return 0, false
|
|
||||||
}
|
|
||||||
return metadata.retryCount, true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetMaxRetry extracts maximum retry from a context, if any.
|
// GetMaxRetry extracts maximum retry from a context, if any.
|
||||||
@@ -77,9 +31,12 @@ func GetRetryCount(ctx context.Context) (n int, ok bool) {
|
|||||||
// Return value n indicates the maximum number of times the assoicated task
|
// Return value n indicates the maximum number of times the assoicated task
|
||||||
// can be retried if ProcessTask returns a non-nil error.
|
// can be retried if ProcessTask returns a non-nil error.
|
||||||
func GetMaxRetry(ctx context.Context) (n int, ok bool) {
|
func GetMaxRetry(ctx context.Context) (n int, ok bool) {
|
||||||
metadata, ok := ctx.Value(metadataCtxKey).(taskMetadata)
|
return asynqcontext.GetMaxRetry(ctx)
|
||||||
if !ok {
|
|
||||||
return 0, false
|
|
||||||
}
|
}
|
||||||
return metadata.maxRetry, true
|
|
||||||
|
// GetQueueName extracts queue name from a context, if any.
|
||||||
|
//
|
||||||
|
// Return value qname indicates which queue the task was pulled from.
|
||||||
|
func GetQueueName(ctx context.Context) (qname string, ok bool) {
|
||||||
|
return asynqcontext.GetQueueName(ctx)
|
||||||
}
|
}
|
||||||
|
|||||||
157
context_test.go
157
context_test.go
@@ -1,157 +0,0 @@
|
|||||||
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
|
||||||
// Use of this source code is governed by a MIT license
|
|
||||||
// that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package asynq
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/google/go-cmp/cmp"
|
|
||||||
"github.com/google/go-cmp/cmp/cmpopts"
|
|
||||||
"github.com/hibiken/asynq/internal/base"
|
|
||||||
"github.com/rs/xid"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestCreateContextWithTimeRestrictions(t *testing.T) {
|
|
||||||
var (
|
|
||||||
noTimeout = time.Duration(0)
|
|
||||||
noDeadline = time.Time{}
|
|
||||||
)
|
|
||||||
|
|
||||||
tests := []struct {
|
|
||||||
desc string
|
|
||||||
timeout time.Duration
|
|
||||||
deadline time.Time
|
|
||||||
wantDeadline time.Time
|
|
||||||
}{
|
|
||||||
{"only with timeout", 10 * time.Second, noDeadline, time.Now().Add(10 * time.Second)},
|
|
||||||
{"only with deadline", noTimeout, time.Now().Add(time.Hour), time.Now().Add(time.Hour)},
|
|
||||||
{"with timeout and deadline (timeout < deadline)", 10 * time.Second, time.Now().Add(time.Hour), time.Now().Add(10 * time.Second)},
|
|
||||||
{"with timeout and deadline (timeout > deadline)", 10 * time.Minute, time.Now().Add(30 * time.Second), time.Now().Add(30 * time.Second)},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range tests {
|
|
||||||
msg := &base.TaskMessage{
|
|
||||||
Type: "something",
|
|
||||||
ID: xid.New(),
|
|
||||||
Timeout: tc.timeout.String(),
|
|
||||||
Deadline: tc.deadline.Format(time.RFC3339),
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx, cancel := createContext(msg)
|
|
||||||
|
|
||||||
select {
|
|
||||||
case x := <-ctx.Done():
|
|
||||||
t.Errorf("%s: <-ctx.Done() == %v, want nothing (it should block)", tc.desc, x)
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
|
|
||||||
got, ok := ctx.Deadline()
|
|
||||||
if !ok {
|
|
||||||
t.Errorf("%s: ctx.Deadline() returned false, want deadline to be set", tc.desc)
|
|
||||||
}
|
|
||||||
if !cmp.Equal(tc.wantDeadline, got, cmpopts.EquateApproxTime(time.Second)) {
|
|
||||||
t.Errorf("%s: ctx.Deadline() returned %v, want %v", tc.desc, got, tc.wantDeadline)
|
|
||||||
}
|
|
||||||
|
|
||||||
cancel()
|
|
||||||
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
default:
|
|
||||||
t.Errorf("ctx.Done() blocked, want it to be non-blocking")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCreateContextWithoutTimeRestrictions(t *testing.T) {
|
|
||||||
msg := &base.TaskMessage{
|
|
||||||
Type: "something",
|
|
||||||
ID: xid.New(),
|
|
||||||
Timeout: time.Duration(0).String(), // zero value to indicate no timeout
|
|
||||||
Deadline: time.Time{}.Format(time.RFC3339), // zero value to indicate no deadline
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx, cancel := createContext(msg)
|
|
||||||
|
|
||||||
select {
|
|
||||||
case x := <-ctx.Done():
|
|
||||||
t.Errorf("<-ctx.Done() == %v, want nothing (it should block)", x)
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
|
|
||||||
_, ok := ctx.Deadline()
|
|
||||||
if ok {
|
|
||||||
t.Error("ctx.Deadline() returned true, want deadline to not be set")
|
|
||||||
}
|
|
||||||
|
|
||||||
cancel()
|
|
||||||
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
default:
|
|
||||||
t.Error("ctx.Done() blocked, want it to be non-blocking")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGetTaskMetadataFromContext(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
desc string
|
|
||||||
msg *base.TaskMessage
|
|
||||||
}{
|
|
||||||
{"with zero retried message", &base.TaskMessage{Type: "something", ID: xid.New(), Retry: 25, Retried: 0}},
|
|
||||||
{"with non-zero retried message", &base.TaskMessage{Type: "something", ID: xid.New(), Retry: 10, Retried: 5}},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range tests {
|
|
||||||
ctx, _ := createContext(tc.msg)
|
|
||||||
|
|
||||||
id, ok := GetTaskID(ctx)
|
|
||||||
if !ok {
|
|
||||||
t.Errorf("%s: GetTaskID(ctx) returned ok == false", tc.desc)
|
|
||||||
}
|
|
||||||
if ok && id != tc.msg.ID.String() {
|
|
||||||
t.Errorf("%s: GetTaskID(ctx) returned id == %q, want %q", tc.desc, id, tc.msg.ID.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
retried, ok := GetRetryCount(ctx)
|
|
||||||
if !ok {
|
|
||||||
t.Errorf("%s: GetRetryCount(ctx) returned ok == false", tc.desc)
|
|
||||||
}
|
|
||||||
if ok && retried != tc.msg.Retried {
|
|
||||||
t.Errorf("%s: GetRetryCount(ctx) returned n == %d want %d", tc.desc, retried, tc.msg.Retried)
|
|
||||||
}
|
|
||||||
|
|
||||||
maxRetry, ok := GetMaxRetry(ctx)
|
|
||||||
if !ok {
|
|
||||||
t.Errorf("%s: GetMaxRetry(ctx) returned ok == false", tc.desc)
|
|
||||||
}
|
|
||||||
if ok && maxRetry != tc.msg.Retry {
|
|
||||||
t.Errorf("%s: GetMaxRetry(ctx) returned n == %d want %d", tc.desc, maxRetry, tc.msg.Retry)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGetTaskMetadataFromContextError(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
desc string
|
|
||||||
ctx context.Context
|
|
||||||
}{
|
|
||||||
{"with background context", context.Background()},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range tests {
|
|
||||||
if _, ok := GetTaskID(tc.ctx); ok {
|
|
||||||
t.Errorf("%s: GetTaskID(ctx) returned ok == true", tc.desc)
|
|
||||||
}
|
|
||||||
if _, ok := GetRetryCount(tc.ctx); ok {
|
|
||||||
t.Errorf("%s: GetRetryCount(ctx) returned ok == true", tc.desc)
|
|
||||||
}
|
|
||||||
if _, ok := GetMaxRetry(tc.ctx); ok {
|
|
||||||
t.Errorf("%s: GetMaxRetry(ctx) returned ok == true", tc.desc)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
49
doc.go
49
doc.go
@@ -3,40 +3,46 @@
|
|||||||
// that can be found in the LICENSE file.
|
// that can be found in the LICENSE file.
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Package asynq provides a framework for asynchronous task processing.
|
Package asynq provides a framework for Redis based distrubted task queue.
|
||||||
|
|
||||||
Asynq uses Redis as a message broker. To connect to redis server,
|
Asynq uses Redis as a message broker. To connect to redis,
|
||||||
specify the options using one of RedisConnOpt types.
|
specify the connection using one of RedisConnOpt types.
|
||||||
|
|
||||||
redis = &asynq.RedisClientOpt{
|
redisConnOpt = asynq.RedisClientOpt{
|
||||||
Addr: "127.0.0.1:6379",
|
Addr: "127.0.0.1:6379",
|
||||||
Password: "xxxxx",
|
Password: "xxxxx",
|
||||||
DB: 3,
|
DB: 2,
|
||||||
}
|
}
|
||||||
|
|
||||||
The Client is used to enqueue a task to be processed at the specified time.
|
The Client is used to enqueue a task.
|
||||||
|
|
||||||
Task is created with two parameters: its type and payload.
|
|
||||||
|
|
||||||
client := asynq.NewClient(redis)
|
client := asynq.NewClient(redisConnOpt)
|
||||||
|
|
||||||
t := asynq.NewTask(
|
// Task is created with two parameters: its type and payload.
|
||||||
"send_email",
|
// Payload data is simply an array of bytes. It can be encoded in JSON, Protocol Buffer, Gob, etc.
|
||||||
map[string]interface{}{"user_id": 42})
|
b, err := json.Marshal(ExamplePayload{UserID: 42})
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
task := asynq.NewTask("example", b)
|
||||||
|
|
||||||
// Enqueue the task to be processed immediately.
|
// Enqueue the task to be processed immediately.
|
||||||
err := client.Enqueue(t)
|
info, err := client.Enqueue(task)
|
||||||
|
|
||||||
// Schedule the task to be processed after one minute.
|
// Schedule the task to be processed after one minute.
|
||||||
err = client.EnqueueIn(time.Minute, t)
|
info, err = client.Enqueue(t, asynq.ProcessIn(1*time.Minute))
|
||||||
|
|
||||||
The Server is used to run the background task processing with a given
|
The Server is used to run the task processing workers with a given
|
||||||
handler.
|
handler.
|
||||||
srv := asynq.NewServer(redis, asynq.Config{
|
srv := asynq.NewServer(redisConnOpt, asynq.Config{
|
||||||
Concurrency: 10,
|
Concurrency: 10,
|
||||||
})
|
})
|
||||||
|
|
||||||
srv.Run(handler)
|
if err := srv.Run(handler); err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
Handler is an interface type with a method which
|
Handler is an interface type with a method which
|
||||||
takes a task and returns an error. Handler should return nil if
|
takes a task and returns an error. Handler should return nil if
|
||||||
@@ -50,10 +56,13 @@ Example of a type that implements the Handler interface.
|
|||||||
|
|
||||||
func (h *TaskHandler) ProcessTask(ctx context.Context, task *asynq.Task) error {
|
func (h *TaskHandler) ProcessTask(ctx context.Context, task *asynq.Task) error {
|
||||||
switch task.Type {
|
switch task.Type {
|
||||||
case "send_email":
|
case "example":
|
||||||
id, err := task.Payload.GetInt("user_id")
|
var data ExamplePayload
|
||||||
// send email
|
if err := json.Unmarshal(task.Payload(), &data); err != nil {
|
||||||
//...
|
return err
|
||||||
|
}
|
||||||
|
// perform task with the data
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("unexpected task type %q", task.Type)
|
return fmt.Errorf("unexpected task type %q", task.Type)
|
||||||
}
|
}
|
||||||
|
|||||||
BIN
docs/assets/asynqmon-queues-view.png
Normal file
BIN
docs/assets/asynqmon-queues-view.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 279 KiB |
BIN
docs/assets/asynqmon-task-view.png
Normal file
BIN
docs/assets/asynqmon-task-view.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 347 KiB |
BIN
docs/assets/cluster.png
Normal file
BIN
docs/assets/cluster.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 60 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 983 KiB After Width: | Height: | Size: 329 KiB |
@@ -5,10 +5,12 @@
|
|||||||
package asynq_test
|
package asynq_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/hibiken/asynq"
|
"github.com/hibiken/asynq"
|
||||||
"golang.org/x/sys/unix"
|
"golang.org/x/sys/unix"
|
||||||
@@ -29,7 +31,7 @@ func ExampleServer_Run() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func ExampleServer_Stop() {
|
func ExampleServer_Shutdown() {
|
||||||
srv := asynq.NewServer(
|
srv := asynq.NewServer(
|
||||||
asynq.RedisClientOpt{Addr: ":6379"},
|
asynq.RedisClientOpt{Addr: ":6379"},
|
||||||
asynq.Config{Concurrency: 20},
|
asynq.Config{Concurrency: 20},
|
||||||
@@ -46,10 +48,10 @@ func ExampleServer_Stop() {
|
|||||||
signal.Notify(sigs, unix.SIGTERM, unix.SIGINT)
|
signal.Notify(sigs, unix.SIGTERM, unix.SIGINT)
|
||||||
<-sigs // wait for termination signal
|
<-sigs // wait for termination signal
|
||||||
|
|
||||||
srv.Stop()
|
srv.Shutdown()
|
||||||
}
|
}
|
||||||
|
|
||||||
func ExampleServer_Quiet() {
|
func ExampleServer_Stop() {
|
||||||
srv := asynq.NewServer(
|
srv := asynq.NewServer(
|
||||||
asynq.RedisClientOpt{Addr: ":6379"},
|
asynq.RedisClientOpt{Addr: ":6379"},
|
||||||
asynq.Config{Concurrency: 20},
|
asynq.Config{Concurrency: 20},
|
||||||
@@ -69,13 +71,32 @@ func ExampleServer_Quiet() {
|
|||||||
for {
|
for {
|
||||||
s := <-sigs
|
s := <-sigs
|
||||||
if s == unix.SIGTSTP {
|
if s == unix.SIGTSTP {
|
||||||
srv.Quiet() // stop processing new tasks
|
srv.Stop() // stop processing new tasks
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
break
|
break // received SIGTERM or SIGINT signal
|
||||||
}
|
}
|
||||||
|
|
||||||
srv.Stop()
|
srv.Shutdown()
|
||||||
|
}
|
||||||
|
|
||||||
|
func ExampleScheduler() {
|
||||||
|
scheduler := asynq.NewScheduler(
|
||||||
|
asynq.RedisClientOpt{Addr: ":6379"},
|
||||||
|
&asynq.SchedulerOpts{Location: time.Local},
|
||||||
|
)
|
||||||
|
|
||||||
|
if _, err := scheduler.Register("* * * * *", asynq.NewTask("task1", nil)); err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
if _, err := scheduler.Register("@every 30s", asynq.NewTask("task2", nil)); err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run blocks and waits for os signal to terminate the program.
|
||||||
|
if err := scheduler.Run(); err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func ExampleParseRedisURI() {
|
func ExampleParseRedisURI() {
|
||||||
@@ -93,3 +114,20 @@ func ExampleParseRedisURI() {
|
|||||||
// localhost:6379
|
// localhost:6379
|
||||||
// 10
|
// 10
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func ExampleResultWriter() {
|
||||||
|
// ResultWriter is only accessible in Handler.
|
||||||
|
h := func(ctx context.Context, task *asynq.Task) error {
|
||||||
|
// .. do task processing work
|
||||||
|
|
||||||
|
res := []byte("task result data")
|
||||||
|
n, err := task.ResultWriter().Write(res) // implements io.Writer
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to write task result: %v", err)
|
||||||
|
}
|
||||||
|
log.Printf(" %d bytes written", n)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
_ = h
|
||||||
|
}
|
||||||
|
|||||||
75
forwarder.go
Normal file
75
forwarder.go
Normal file
@@ -0,0 +1,75 @@
|
|||||||
|
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license
|
||||||
|
// that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package asynq
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/hibiken/asynq/internal/base"
|
||||||
|
"github.com/hibiken/asynq/internal/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A forwarder is responsible for moving scheduled and retry tasks to pending state
|
||||||
|
// so that the tasks get processed by the workers.
|
||||||
|
type forwarder struct {
|
||||||
|
logger *log.Logger
|
||||||
|
broker base.Broker
|
||||||
|
|
||||||
|
// channel to communicate back to the long running "forwarder" goroutine.
|
||||||
|
done chan struct{}
|
||||||
|
|
||||||
|
// list of queue names to check and enqueue.
|
||||||
|
queues []string
|
||||||
|
|
||||||
|
// poll interval on average
|
||||||
|
avgInterval time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
type forwarderParams struct {
|
||||||
|
logger *log.Logger
|
||||||
|
broker base.Broker
|
||||||
|
queues []string
|
||||||
|
interval time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
func newForwarder(params forwarderParams) *forwarder {
|
||||||
|
return &forwarder{
|
||||||
|
logger: params.logger,
|
||||||
|
broker: params.broker,
|
||||||
|
done: make(chan struct{}),
|
||||||
|
queues: params.queues,
|
||||||
|
avgInterval: params.interval,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *forwarder) shutdown() {
|
||||||
|
f.logger.Debug("Forwarder shutting down...")
|
||||||
|
// Signal the forwarder goroutine to stop polling.
|
||||||
|
f.done <- struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// start starts the "forwarder" goroutine.
|
||||||
|
func (f *forwarder) start(wg *sync.WaitGroup) {
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-f.done:
|
||||||
|
f.logger.Debug("Forwarder done")
|
||||||
|
return
|
||||||
|
case <-time.After(f.avgInterval):
|
||||||
|
f.exec()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *forwarder) exec() {
|
||||||
|
if err := f.broker.ForwardIfReady(f.queues...); err != nil {
|
||||||
|
f.logger.Errorf("Failed to forward scheduled tasks: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
137
forwarder_test.go
Normal file
137
forwarder_test.go
Normal file
@@ -0,0 +1,137 @@
|
|||||||
|
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license
|
||||||
|
// that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package asynq
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/google/go-cmp/cmp"
|
||||||
|
h "github.com/hibiken/asynq/internal/asynqtest"
|
||||||
|
"github.com/hibiken/asynq/internal/base"
|
||||||
|
"github.com/hibiken/asynq/internal/rdb"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestForwarder(t *testing.T) {
|
||||||
|
r := setup(t)
|
||||||
|
defer r.Close()
|
||||||
|
rdbClient := rdb.NewRDB(r)
|
||||||
|
const pollInterval = time.Second
|
||||||
|
s := newForwarder(forwarderParams{
|
||||||
|
logger: testLogger,
|
||||||
|
broker: rdbClient,
|
||||||
|
queues: []string{"default", "critical"},
|
||||||
|
interval: pollInterval,
|
||||||
|
})
|
||||||
|
t1 := h.NewTaskMessageWithQueue("gen_thumbnail", nil, "default")
|
||||||
|
t2 := h.NewTaskMessageWithQueue("send_email", nil, "critical")
|
||||||
|
t3 := h.NewTaskMessageWithQueue("reindex", nil, "default")
|
||||||
|
t4 := h.NewTaskMessageWithQueue("sync", nil, "critical")
|
||||||
|
now := time.Now()
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
initScheduled map[string][]base.Z // scheduled queue initial state
|
||||||
|
initRetry map[string][]base.Z // retry queue initial state
|
||||||
|
initPending map[string][]*base.TaskMessage // default queue initial state
|
||||||
|
wait time.Duration // wait duration before checking for final state
|
||||||
|
wantScheduled map[string][]*base.TaskMessage // schedule queue final state
|
||||||
|
wantRetry map[string][]*base.TaskMessage // retry queue final state
|
||||||
|
wantPending map[string][]*base.TaskMessage // default queue final state
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
initScheduled: map[string][]base.Z{
|
||||||
|
"default": {{Message: t1, Score: now.Add(time.Hour).Unix()}},
|
||||||
|
"critical": {{Message: t2, Score: now.Add(-2 * time.Second).Unix()}},
|
||||||
|
},
|
||||||
|
initRetry: map[string][]base.Z{
|
||||||
|
"default": {{Message: t3, Score: time.Now().Add(-500 * time.Millisecond).Unix()}},
|
||||||
|
"critical": {},
|
||||||
|
},
|
||||||
|
initPending: map[string][]*base.TaskMessage{
|
||||||
|
"default": {},
|
||||||
|
"critical": {t4},
|
||||||
|
},
|
||||||
|
wait: pollInterval * 2,
|
||||||
|
wantScheduled: map[string][]*base.TaskMessage{
|
||||||
|
"default": {t1},
|
||||||
|
"critical": {},
|
||||||
|
},
|
||||||
|
wantRetry: map[string][]*base.TaskMessage{
|
||||||
|
"default": {},
|
||||||
|
"critical": {},
|
||||||
|
},
|
||||||
|
wantPending: map[string][]*base.TaskMessage{
|
||||||
|
"default": {t3},
|
||||||
|
"critical": {t2, t4},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
initScheduled: map[string][]base.Z{
|
||||||
|
"default": {
|
||||||
|
{Message: t1, Score: now.Unix()},
|
||||||
|
{Message: t3, Score: now.Add(-500 * time.Millisecond).Unix()},
|
||||||
|
},
|
||||||
|
"critical": {
|
||||||
|
{Message: t2, Score: now.Add(-2 * time.Second).Unix()},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
initRetry: map[string][]base.Z{
|
||||||
|
"default": {},
|
||||||
|
"critical": {},
|
||||||
|
},
|
||||||
|
initPending: map[string][]*base.TaskMessage{
|
||||||
|
"default": {},
|
||||||
|
"critical": {t4},
|
||||||
|
},
|
||||||
|
wait: pollInterval * 2,
|
||||||
|
wantScheduled: map[string][]*base.TaskMessage{
|
||||||
|
"default": {},
|
||||||
|
"critical": {},
|
||||||
|
},
|
||||||
|
wantRetry: map[string][]*base.TaskMessage{
|
||||||
|
"default": {},
|
||||||
|
"critical": {},
|
||||||
|
},
|
||||||
|
wantPending: map[string][]*base.TaskMessage{
|
||||||
|
"default": {t1, t3},
|
||||||
|
"critical": {t2, t4},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
h.FlushDB(t, r) // clean up db before each test case.
|
||||||
|
h.SeedAllScheduledQueues(t, r, tc.initScheduled) // initialize scheduled queue
|
||||||
|
h.SeedAllRetryQueues(t, r, tc.initRetry) // initialize retry queue
|
||||||
|
h.SeedAllPendingQueues(t, r, tc.initPending) // initialize default queue
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
s.start(&wg)
|
||||||
|
time.Sleep(tc.wait)
|
||||||
|
s.shutdown()
|
||||||
|
|
||||||
|
for qname, want := range tc.wantScheduled {
|
||||||
|
gotScheduled := h.GetScheduledMessages(t, r, qname)
|
||||||
|
if diff := cmp.Diff(want, gotScheduled, h.SortMsgOpt); diff != "" {
|
||||||
|
t.Errorf("mismatch found in %q after running forwarder: (-want, +got)\n%s", base.ScheduledKey(qname), diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for qname, want := range tc.wantRetry {
|
||||||
|
gotRetry := h.GetRetryMessages(t, r, qname)
|
||||||
|
if diff := cmp.Diff(want, gotRetry, h.SortMsgOpt); diff != "" {
|
||||||
|
t.Errorf("mismatch found in %q after running forwarder: (-want, +got)\n%s", base.RetryKey(qname), diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for qname, want := range tc.wantPending {
|
||||||
|
gotPending := h.GetPendingMessages(t, r, qname)
|
||||||
|
if diff := cmp.Diff(want, gotPending, h.SortMsgOpt); diff != "" {
|
||||||
|
t.Errorf("mismatch found in %q after running forwarder: (-want, +got)\n%s", base.PendingKey(qname), diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
17
go.mod
17
go.mod
@@ -1,14 +1,19 @@
|
|||||||
module github.com/hibiken/asynq
|
module github.com/hibiken/asynq
|
||||||
|
|
||||||
go 1.13
|
go 1.14
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/go-redis/redis/v7 v7.2.0
|
github.com/go-redis/redis/v8 v8.11.2
|
||||||
github.com/google/go-cmp v0.4.0
|
github.com/golang/protobuf v1.4.2
|
||||||
github.com/rs/xid v1.2.1
|
github.com/google/go-cmp v0.5.6
|
||||||
|
github.com/google/uuid v1.2.0
|
||||||
|
github.com/kr/pretty v0.1.0 // indirect
|
||||||
|
github.com/robfig/cron/v3 v3.0.1
|
||||||
github.com/spf13/cast v1.3.1
|
github.com/spf13/cast v1.3.1
|
||||||
|
github.com/stretchr/testify v1.6.1 // indirect
|
||||||
go.uber.org/goleak v0.10.0
|
go.uber.org/goleak v0.10.0
|
||||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e
|
golang.org/x/sys v0.0.0-20210112080510-489259a85091
|
||||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4
|
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4
|
||||||
gopkg.in/yaml.v2 v2.2.7 // indirect
|
google.golang.org/protobuf v1.25.0
|
||||||
|
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect
|
||||||
)
|
)
|
||||||
|
|||||||
175
go.sum
175
go.sum
@@ -1,74 +1,177 @@
|
|||||||
|
cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ=
|
||||||
|
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||||
|
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||||
|
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||||
|
github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk=
|
||||||
|
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||||
|
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
|
||||||
|
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
|
github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=
|
||||||
|
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||||
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
|
||||||
|
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
|
||||||
|
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473 h1:4cmBvAEBNJaGARUEs3/suWRyfyBfhf7I60WBZq+bv2w=
|
||||||
|
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||||
|
github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A=
|
||||||
|
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||||
github.com/go-redis/redis/v7 v7.0.0-beta.4 h1:p6z7Pde69EGRWvlC++y8aFcaWegyrKHzOBGo0zUACTQ=
|
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
|
||||||
github.com/go-redis/redis/v7 v7.0.0-beta.4/go.mod h1:xhhSbUMTsleRPur+Vgx9sUHtyN33bdjxY+9/0n9Ig8s=
|
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||||
github.com/go-redis/redis/v7 v7.2.0 h1:CrCexy/jYWZjW0AyVoHlcJUeZN19VWlbepTh1Vq6dJs=
|
github.com/go-redis/redis/v8 v8.11.2 h1:WqlSpAwz8mxDSMCvbyz1Mkiqe0LE5OY4j3lgkvu1Ts0=
|
||||||
github.com/go-redis/redis/v7 v7.2.0/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRfnwdHj/Dcg=
|
github.com/go-redis/redis/v8 v8.11.2/go.mod h1:DLomh7y2e3ggQXQLd1YgmvIfecPJoFl7WU5SOQ/r06M=
|
||||||
|
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
|
||||||
|
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||||
|
github.com/golang/mock v1.1.1 h1:G5FRp8JnTd7RQH5kemVNlMeyXQAztQ3mOWV95KxsXH8=
|
||||||
|
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
|
|
||||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
|
||||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
|
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||||
|
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||||
|
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||||
|
github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0=
|
||||||
|
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||||
|
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||||
|
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
|
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
|
||||||
|
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs=
|
||||||
|
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
||||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||||
|
github.com/kr/pty v1.1.1 h1:VkoXIwSboBpnk99O/KFauAEILuNHv5DVFKZMBN/gUgw=
|
||||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||||
|
github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
|
||||||
|
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||||
github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w=
|
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||||
github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
github.com/onsi/ginkgo v1.15.0 h1:1V1NfVQR87RtWAgp1lv9JZJ5Jap+XFGKPi00andXGi4=
|
||||||
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
github.com/onsi/ginkgo v1.15.0/go.mod h1:hF8qUzuuC8DJGygJH3726JnCZX4MYbRB8yFfISqnKUg=
|
||||||
github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo=
|
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||||
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||||
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
github.com/onsi/gomega v1.10.5 h1:7n6FEkpFmfCoo2t+YYqXH0evK+a9ICQz0xcAy9dYcaQ=
|
||||||
|
github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48=
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/rs/xid v1.2.1 h1:mhH9Nq+C1fY2l1XIpgxIiUOfNpRBYH1kKcr+qfKgjRc=
|
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM=
|
||||||
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
|
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
|
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
|
||||||
|
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
|
||||||
github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng=
|
github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng=
|
||||||
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||||
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
|
github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4=
|
||||||
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||||
|
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
|
||||||
|
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
|
github.com/yuin/goldmark v1.2.1 h1:ruQGxdhGHe7FWOJPT0mKs5+pD2Xs1Bm/kdGlHO04FmM=
|
||||||
|
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
go.uber.org/goleak v0.10.0 h1:G3eWbSNIskeRqtsN/1uI5B+eP73y3JUuBsv9AZjehb4=
|
go.uber.org/goleak v0.10.0 h1:G3eWbSNIskeRqtsN/1uI5B+eP73y3JUuBsv9AZjehb4=
|
||||||
go.uber.org/goleak v0.10.0/go.mod h1:VCZuO8V8mFPlL0F5J5GK1rtHV3DrFcQ1R8ryq7FK0aI=
|
go.uber.org/goleak v0.10.0/go.mod h1:VCZuO8V8mFPlL0F5J5GK1rtHV3DrFcQ1R8ryq7FK0aI=
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA=
|
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
|
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
|
||||||
|
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
|
golang.org/x/exp v0.0.0-20190121172915-509febef88a4 h1:c2HOrn5iMezYjSlGPncknSEr/8x5LELb/ilJbXi9DEA=
|
||||||
|
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
|
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||||
|
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||||
|
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3 h1:XQyxROzUlZH+WIQwySDgnISgOivlhjIEwaQaJEJrrN0=
|
||||||
|
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||||
|
golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4=
|
||||||
|
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
|
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20190522155817-f3200d17e092 h1:4QSRKanuywn15aTZvI/mIDEgPQpswuFndXpOj3rKEco=
|
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjutRKlBEZQ6wTn8ozI/nI=
|
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||||
|
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||||
|
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb h1:eBmm0M9fYhWpKZLjQUUKka/LtIxf46G4fxeEz5KJr9U=
|
||||||
|
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e h1:o3PsSEY8E4eXWkXrIP9YJALUkVZqzHJT5DOasTyn8Vs=
|
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 h1:SQFwaSi55rU7vdNs9Yr0Z324VNlrF+0wMqRXT4St8ck=
|
||||||
|
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e h1:9vRrk9YW2BTzLP0VCB9ZDjU4cPqkg+IDWL7XgxA1yxQ=
|
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210112080510-489259a85091 h1:DMyOG0U+gKfu8JZzg2UQe9MeaC1X+xQWlAKcRnjxjCw=
|
||||||
|
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
|
||||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
|
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
|
||||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||||
|
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||||
|
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||||
|
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e h1:4nW4NLDYnU28ojHaHO8OVxFHk/aQ33U01a9cjED+pzE=
|
||||||
|
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
|
||||||
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||||
|
google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=
|
||||||
|
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
|
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||||
|
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||||
|
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY=
|
||||||
|
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||||
|
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||||
|
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||||
|
google.golang.org/grpc v1.27.0 h1:rRYRFMVgRv6E0D70Skyfsr28tDXIuuPZyWGMPdMcnXg=
|
||||||
|
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||||
|
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||||
|
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||||
|
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||||
|
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||||
|
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||||
|
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||||
|
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||||
|
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||||
|
google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c=
|
||||||
|
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
|
||||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
|
||||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
||||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||||
gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE=
|
|
||||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
|
||||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo=
|
gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
|
||||||
gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
||||||
|
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
|
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
|
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc h1:/hemPrYIhOhy8zYrNj+069zDB68us2sMGsfkFJO0iZs=
|
||||||
|
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
|
|||||||
80
healthcheck.go
Normal file
80
healthcheck.go
Normal file
@@ -0,0 +1,80 @@
|
|||||||
|
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license
|
||||||
|
// that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package asynq
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/hibiken/asynq/internal/base"
|
||||||
|
"github.com/hibiken/asynq/internal/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
// healthchecker is responsible for pinging broker periodically
|
||||||
|
// and call user provided HeathCheckFunc with the ping result.
|
||||||
|
type healthchecker struct {
|
||||||
|
logger *log.Logger
|
||||||
|
broker base.Broker
|
||||||
|
|
||||||
|
// channel to communicate back to the long running "healthchecker" goroutine.
|
||||||
|
done chan struct{}
|
||||||
|
|
||||||
|
// interval between healthchecks.
|
||||||
|
interval time.Duration
|
||||||
|
|
||||||
|
// function to call periodically.
|
||||||
|
healthcheckFunc func(error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type healthcheckerParams struct {
|
||||||
|
logger *log.Logger
|
||||||
|
broker base.Broker
|
||||||
|
interval time.Duration
|
||||||
|
healthcheckFunc func(error)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newHealthChecker(params healthcheckerParams) *healthchecker {
|
||||||
|
return &healthchecker{
|
||||||
|
logger: params.logger,
|
||||||
|
broker: params.broker,
|
||||||
|
done: make(chan struct{}),
|
||||||
|
interval: params.interval,
|
||||||
|
healthcheckFunc: params.healthcheckFunc,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hc *healthchecker) shutdown() {
|
||||||
|
if hc.healthcheckFunc == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
hc.logger.Debug("Healthchecker shutting down...")
|
||||||
|
// Signal the healthchecker goroutine to stop.
|
||||||
|
hc.done <- struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hc *healthchecker) start(wg *sync.WaitGroup) {
|
||||||
|
if hc.healthcheckFunc == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
timer := time.NewTimer(hc.interval)
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-hc.done:
|
||||||
|
hc.logger.Debug("Healthchecker done")
|
||||||
|
timer.Stop()
|
||||||
|
return
|
||||||
|
case <-timer.C:
|
||||||
|
err := hc.broker.Ping()
|
||||||
|
hc.healthcheckFunc(err)
|
||||||
|
timer.Reset(hc.interval)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
103
healthcheck_test.go
Normal file
103
healthcheck_test.go
Normal file
@@ -0,0 +1,103 @@
|
|||||||
|
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license
|
||||||
|
// that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package asynq
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/hibiken/asynq/internal/rdb"
|
||||||
|
"github.com/hibiken/asynq/internal/testbroker"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestHealthChecker(t *testing.T) {
|
||||||
|
r := setup(t)
|
||||||
|
defer r.Close()
|
||||||
|
rdbClient := rdb.NewRDB(r)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// mu guards called and e variables.
|
||||||
|
mu sync.Mutex
|
||||||
|
called int
|
||||||
|
e error
|
||||||
|
)
|
||||||
|
checkFn := func(err error) {
|
||||||
|
mu.Lock()
|
||||||
|
defer mu.Unlock()
|
||||||
|
called++
|
||||||
|
e = err
|
||||||
|
}
|
||||||
|
|
||||||
|
hc := newHealthChecker(healthcheckerParams{
|
||||||
|
logger: testLogger,
|
||||||
|
broker: rdbClient,
|
||||||
|
interval: 1 * time.Second,
|
||||||
|
healthcheckFunc: checkFn,
|
||||||
|
})
|
||||||
|
|
||||||
|
hc.start(&sync.WaitGroup{})
|
||||||
|
|
||||||
|
time.Sleep(2 * time.Second)
|
||||||
|
|
||||||
|
mu.Lock()
|
||||||
|
if called == 0 {
|
||||||
|
t.Errorf("Healthchecker did not call the provided HealthCheckFunc")
|
||||||
|
}
|
||||||
|
if e != nil {
|
||||||
|
t.Errorf("HealthCheckFunc was called with non-nil error: %v", e)
|
||||||
|
}
|
||||||
|
mu.Unlock()
|
||||||
|
|
||||||
|
hc.shutdown()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHealthCheckerWhenRedisDown(t *testing.T) {
|
||||||
|
// Make sure that healthchecker goroutine doesn't panic
|
||||||
|
// if it cannot connect to redis.
|
||||||
|
defer func() {
|
||||||
|
if r := recover(); r != nil {
|
||||||
|
t.Errorf("panic occurred: %v", r)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
r := rdb.NewRDB(setup(t))
|
||||||
|
defer r.Close()
|
||||||
|
testBroker := testbroker.NewTestBroker(r)
|
||||||
|
var (
|
||||||
|
// mu guards called and e variables.
|
||||||
|
mu sync.Mutex
|
||||||
|
called int
|
||||||
|
e error
|
||||||
|
)
|
||||||
|
checkFn := func(err error) {
|
||||||
|
mu.Lock()
|
||||||
|
defer mu.Unlock()
|
||||||
|
called++
|
||||||
|
e = err
|
||||||
|
}
|
||||||
|
|
||||||
|
hc := newHealthChecker(healthcheckerParams{
|
||||||
|
logger: testLogger,
|
||||||
|
broker: testBroker,
|
||||||
|
interval: 1 * time.Second,
|
||||||
|
healthcheckFunc: checkFn,
|
||||||
|
})
|
||||||
|
|
||||||
|
testBroker.Sleep()
|
||||||
|
hc.start(&sync.WaitGroup{})
|
||||||
|
|
||||||
|
time.Sleep(2 * time.Second)
|
||||||
|
|
||||||
|
mu.Lock()
|
||||||
|
if called == 0 {
|
||||||
|
t.Errorf("Healthchecker did not call the provided HealthCheckFunc")
|
||||||
|
}
|
||||||
|
if e == nil {
|
||||||
|
t.Errorf("HealthCheckFunc was called with nil; want non-nil error")
|
||||||
|
}
|
||||||
|
mu.Unlock()
|
||||||
|
|
||||||
|
hc.shutdown()
|
||||||
|
}
|
||||||
142
heartbeat.go
142
heartbeat.go
@@ -5,11 +5,14 @@
|
|||||||
package asynq
|
package asynq
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"os"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/google/uuid"
|
||||||
"github.com/hibiken/asynq/internal/base"
|
"github.com/hibiken/asynq/internal/base"
|
||||||
"github.com/hibiken/asynq/internal/log"
|
"github.com/hibiken/asynq/internal/log"
|
||||||
|
"github.com/hibiken/asynq/internal/timeutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
// heartbeater is responsible for writing process info to redis periodically to
|
// heartbeater is responsible for writing process info to redis periodically to
|
||||||
@@ -17,64 +20,181 @@ import (
|
|||||||
type heartbeater struct {
|
type heartbeater struct {
|
||||||
logger *log.Logger
|
logger *log.Logger
|
||||||
broker base.Broker
|
broker base.Broker
|
||||||
|
clock timeutil.Clock
|
||||||
ss *base.ServerState
|
|
||||||
|
|
||||||
// channel to communicate back to the long running "heartbeater" goroutine.
|
// channel to communicate back to the long running "heartbeater" goroutine.
|
||||||
done chan struct{}
|
done chan struct{}
|
||||||
|
|
||||||
// interval between heartbeats.
|
// interval between heartbeats.
|
||||||
interval time.Duration
|
interval time.Duration
|
||||||
|
|
||||||
|
// following fields are initialized at construction time and are immutable.
|
||||||
|
host string
|
||||||
|
pid int
|
||||||
|
serverID string
|
||||||
|
concurrency int
|
||||||
|
queues map[string]int
|
||||||
|
strictPriority bool
|
||||||
|
|
||||||
|
// following fields are mutable and should be accessed only by the
|
||||||
|
// heartbeater goroutine. In other words, confine these variables
|
||||||
|
// to this goroutine only.
|
||||||
|
started time.Time
|
||||||
|
workers map[string]*workerInfo
|
||||||
|
|
||||||
|
// state is shared with other goroutine but is concurrency safe.
|
||||||
|
state *serverState
|
||||||
|
|
||||||
|
// channels to receive updates on active workers.
|
||||||
|
starting <-chan *workerInfo
|
||||||
|
finished <-chan *base.TaskMessage
|
||||||
}
|
}
|
||||||
|
|
||||||
type heartbeaterParams struct {
|
type heartbeaterParams struct {
|
||||||
logger *log.Logger
|
logger *log.Logger
|
||||||
broker base.Broker
|
broker base.Broker
|
||||||
serverState *base.ServerState
|
|
||||||
interval time.Duration
|
interval time.Duration
|
||||||
|
concurrency int
|
||||||
|
queues map[string]int
|
||||||
|
strictPriority bool
|
||||||
|
state *serverState
|
||||||
|
starting <-chan *workerInfo
|
||||||
|
finished <-chan *base.TaskMessage
|
||||||
}
|
}
|
||||||
|
|
||||||
func newHeartbeater(params heartbeaterParams) *heartbeater {
|
func newHeartbeater(params heartbeaterParams) *heartbeater {
|
||||||
|
host, err := os.Hostname()
|
||||||
|
if err != nil {
|
||||||
|
host = "unknown-host"
|
||||||
|
}
|
||||||
|
|
||||||
return &heartbeater{
|
return &heartbeater{
|
||||||
logger: params.logger,
|
logger: params.logger,
|
||||||
broker: params.broker,
|
broker: params.broker,
|
||||||
ss: params.serverState,
|
clock: timeutil.NewRealClock(),
|
||||||
done: make(chan struct{}),
|
done: make(chan struct{}),
|
||||||
interval: params.interval,
|
interval: params.interval,
|
||||||
|
|
||||||
|
host: host,
|
||||||
|
pid: os.Getpid(),
|
||||||
|
serverID: uuid.New().String(),
|
||||||
|
concurrency: params.concurrency,
|
||||||
|
queues: params.queues,
|
||||||
|
strictPriority: params.strictPriority,
|
||||||
|
|
||||||
|
state: params.state,
|
||||||
|
workers: make(map[string]*workerInfo),
|
||||||
|
starting: params.starting,
|
||||||
|
finished: params.finished,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *heartbeater) terminate() {
|
func (h *heartbeater) shutdown() {
|
||||||
h.logger.Debug("Heartbeater shutting down...")
|
h.logger.Debug("Heartbeater shutting down...")
|
||||||
// Signal the heartbeater goroutine to stop.
|
// Signal the heartbeater goroutine to stop.
|
||||||
h.done <- struct{}{}
|
h.done <- struct{}{}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// A workerInfo holds an active worker information.
|
||||||
|
type workerInfo struct {
|
||||||
|
// the task message the worker is processing.
|
||||||
|
msg *base.TaskMessage
|
||||||
|
// the time the worker has started processing the message.
|
||||||
|
started time.Time
|
||||||
|
// deadline the worker has to finish processing the task by.
|
||||||
|
deadline time.Time
|
||||||
|
// lease the worker holds for the task.
|
||||||
|
lease *base.Lease
|
||||||
|
}
|
||||||
|
|
||||||
func (h *heartbeater) start(wg *sync.WaitGroup) {
|
func (h *heartbeater) start(wg *sync.WaitGroup) {
|
||||||
h.ss.SetStarted(time.Now())
|
|
||||||
h.ss.SetStatus(base.StatusRunning)
|
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
|
|
||||||
|
h.started = h.clock.Now()
|
||||||
|
|
||||||
h.beat()
|
h.beat()
|
||||||
|
|
||||||
|
timer := time.NewTimer(h.interval)
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-h.done:
|
case <-h.done:
|
||||||
h.broker.ClearServerState(h.ss)
|
h.broker.ClearServerState(h.host, h.pid, h.serverID)
|
||||||
h.logger.Debug("Heartbeater done")
|
h.logger.Debug("Heartbeater done")
|
||||||
|
timer.Stop()
|
||||||
return
|
return
|
||||||
case <-time.After(h.interval):
|
|
||||||
|
case <-timer.C:
|
||||||
h.beat()
|
h.beat()
|
||||||
|
timer.Reset(h.interval)
|
||||||
|
|
||||||
|
case w := <-h.starting:
|
||||||
|
h.workers[w.msg.ID] = w
|
||||||
|
|
||||||
|
case msg := <-h.finished:
|
||||||
|
delete(h.workers, msg.ID)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// beat extends lease for workers and writes server/worker info to redis.
|
||||||
func (h *heartbeater) beat() {
|
func (h *heartbeater) beat() {
|
||||||
|
h.state.mu.Lock()
|
||||||
|
srvStatus := h.state.value.String()
|
||||||
|
h.state.mu.Unlock()
|
||||||
|
|
||||||
|
info := base.ServerInfo{
|
||||||
|
Host: h.host,
|
||||||
|
PID: h.pid,
|
||||||
|
ServerID: h.serverID,
|
||||||
|
Concurrency: h.concurrency,
|
||||||
|
Queues: h.queues,
|
||||||
|
StrictPriority: h.strictPriority,
|
||||||
|
Status: srvStatus,
|
||||||
|
Started: h.started,
|
||||||
|
ActiveWorkerCount: len(h.workers),
|
||||||
|
}
|
||||||
|
|
||||||
|
var ws []*base.WorkerInfo
|
||||||
|
idsByQueue := make(map[string][]string)
|
||||||
|
for id, w := range h.workers {
|
||||||
|
ws = append(ws, &base.WorkerInfo{
|
||||||
|
Host: h.host,
|
||||||
|
PID: h.pid,
|
||||||
|
ServerID: h.serverID,
|
||||||
|
ID: id,
|
||||||
|
Type: w.msg.Type,
|
||||||
|
Queue: w.msg.Queue,
|
||||||
|
Payload: w.msg.Payload,
|
||||||
|
Started: w.started,
|
||||||
|
Deadline: w.deadline,
|
||||||
|
})
|
||||||
|
// Check lease before adding to the set to make sure not to extend the lease if the lease is already expired.
|
||||||
|
if w.lease.IsValid() {
|
||||||
|
idsByQueue[w.msg.Queue] = append(idsByQueue[w.msg.Queue], id)
|
||||||
|
} else {
|
||||||
|
w.lease.NotifyExpiration() // notify processor if the lease is expired
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Note: Set TTL to be long enough so that it won't expire before we write again
|
// Note: Set TTL to be long enough so that it won't expire before we write again
|
||||||
// and short enough to expire quickly once the process is shut down or killed.
|
// and short enough to expire quickly once the process is shut down or killed.
|
||||||
err := h.broker.WriteServerState(h.ss, h.interval*2)
|
if err := h.broker.WriteServerState(&info, ws, h.interval*2); err != nil {
|
||||||
|
h.logger.Errorf("Failed to write server state data: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for qname, ids := range idsByQueue {
|
||||||
|
expirationTime, err := h.broker.ExtendLease(qname, ids...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logger.Errorf("could not write heartbeat data: %v", err)
|
h.logger.Errorf("Failed to extend lease for tasks %v: %v", ids, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for _, id := range ids {
|
||||||
|
if l := h.workers[id].lease; !l.Reset(expirationTime) {
|
||||||
|
h.logger.Warnf("Lease reset failed for %s; lease deadline: %v", id, l.Deadline())
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,6 +5,7 @@
|
|||||||
package asynq
|
package asynq
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
@@ -15,20 +16,143 @@ import (
|
|||||||
"github.com/hibiken/asynq/internal/base"
|
"github.com/hibiken/asynq/internal/base"
|
||||||
"github.com/hibiken/asynq/internal/rdb"
|
"github.com/hibiken/asynq/internal/rdb"
|
||||||
"github.com/hibiken/asynq/internal/testbroker"
|
"github.com/hibiken/asynq/internal/testbroker"
|
||||||
|
"github.com/hibiken/asynq/internal/timeutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Test goes through a few phases.
|
||||||
|
//
|
||||||
|
// Phase1: Simulate Server startup; Simulate starting tasks listed in startedWorkers
|
||||||
|
// Phase2: Simluate finishing tasks listed in finishedTasks
|
||||||
|
// Phase3: Simulate Server shutdown;
|
||||||
func TestHeartbeater(t *testing.T) {
|
func TestHeartbeater(t *testing.T) {
|
||||||
r := setup(t)
|
r := setup(t)
|
||||||
|
defer r.Close()
|
||||||
rdbClient := rdb.NewRDB(r)
|
rdbClient := rdb.NewRDB(r)
|
||||||
|
|
||||||
|
now := time.Now()
|
||||||
|
const elapsedTime = 10 * time.Second // simulated time elapsed between phase1 and phase2
|
||||||
|
|
||||||
|
clock := timeutil.NewSimulatedClock(time.Time{}) // time will be set in each test
|
||||||
|
|
||||||
|
t1 := h.NewTaskMessageWithQueue("task1", nil, "default")
|
||||||
|
t2 := h.NewTaskMessageWithQueue("task2", nil, "default")
|
||||||
|
t3 := h.NewTaskMessageWithQueue("task3", nil, "default")
|
||||||
|
t4 := h.NewTaskMessageWithQueue("task4", nil, "custom")
|
||||||
|
t5 := h.NewTaskMessageWithQueue("task5", nil, "custom")
|
||||||
|
t6 := h.NewTaskMessageWithQueue("task6", nil, "default")
|
||||||
|
|
||||||
|
// Note: intentionally set to time less than now.Add(rdb.LeaseDuration) to test lease extention is working.
|
||||||
|
lease1 := h.NewLeaseWithClock(now.Add(10*time.Second), clock)
|
||||||
|
lease2 := h.NewLeaseWithClock(now.Add(10*time.Second), clock)
|
||||||
|
lease3 := h.NewLeaseWithClock(now.Add(10*time.Second), clock)
|
||||||
|
lease4 := h.NewLeaseWithClock(now.Add(10*time.Second), clock)
|
||||||
|
lease5 := h.NewLeaseWithClock(now.Add(10*time.Second), clock)
|
||||||
|
lease6 := h.NewLeaseWithClock(now.Add(10*time.Second), clock)
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
|
desc string
|
||||||
|
|
||||||
|
// Interval between heartbeats.
|
||||||
interval time.Duration
|
interval time.Duration
|
||||||
|
|
||||||
|
// Server info.
|
||||||
host string
|
host string
|
||||||
pid int
|
pid int
|
||||||
queues map[string]int
|
queues map[string]int
|
||||||
concurrency int
|
concurrency int
|
||||||
|
|
||||||
|
active map[string][]*base.TaskMessage // initial active set state
|
||||||
|
lease map[string][]base.Z // initial lease set state
|
||||||
|
wantLease1 map[string][]base.Z // expected lease set state after starting all startedWorkers
|
||||||
|
wantLease2 map[string][]base.Z // expected lease set state after finishing all finishedTasks
|
||||||
|
startedWorkers []*workerInfo // workerInfo to send via the started channel
|
||||||
|
finishedTasks []*base.TaskMessage // tasks to send via the finished channel
|
||||||
|
|
||||||
|
startTime time.Time // simulated start time
|
||||||
|
elapsedTime time.Duration // simulated time elapsed between starting and finishing processing tasks
|
||||||
}{
|
}{
|
||||||
{time.Second, "localhost", 45678, map[string]int{"default": 1}, 10},
|
{
|
||||||
|
desc: "With single queue",
|
||||||
|
interval: 2 * time.Second,
|
||||||
|
host: "localhost",
|
||||||
|
pid: 45678,
|
||||||
|
queues: map[string]int{"default": 1},
|
||||||
|
concurrency: 10,
|
||||||
|
active: map[string][]*base.TaskMessage{
|
||||||
|
"default": {t1, t2, t3},
|
||||||
|
},
|
||||||
|
lease: map[string][]base.Z{
|
||||||
|
"default": {
|
||||||
|
{Message: t1, Score: now.Add(10 * time.Second).Unix()},
|
||||||
|
{Message: t2, Score: now.Add(10 * time.Second).Unix()},
|
||||||
|
{Message: t3, Score: now.Add(10 * time.Second).Unix()},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
startedWorkers: []*workerInfo{
|
||||||
|
{msg: t1, started: now, deadline: now.Add(2 * time.Minute), lease: lease1},
|
||||||
|
{msg: t2, started: now, deadline: now.Add(2 * time.Minute), lease: lease2},
|
||||||
|
{msg: t3, started: now, deadline: now.Add(2 * time.Minute), lease: lease3},
|
||||||
|
},
|
||||||
|
finishedTasks: []*base.TaskMessage{t1, t2},
|
||||||
|
wantLease1: map[string][]base.Z{
|
||||||
|
"default": {
|
||||||
|
{Message: t1, Score: now.Add(rdb.LeaseDuration).Unix()},
|
||||||
|
{Message: t2, Score: now.Add(rdb.LeaseDuration).Unix()},
|
||||||
|
{Message: t3, Score: now.Add(rdb.LeaseDuration).Unix()},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
wantLease2: map[string][]base.Z{
|
||||||
|
"default": {
|
||||||
|
{Message: t3, Score: now.Add(elapsedTime).Add(rdb.LeaseDuration).Unix()},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
startTime: now,
|
||||||
|
elapsedTime: elapsedTime,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "With multiple queue",
|
||||||
|
interval: 2 * time.Second,
|
||||||
|
host: "localhost",
|
||||||
|
pid: 45678,
|
||||||
|
queues: map[string]int{"default": 1, "custom": 2},
|
||||||
|
concurrency: 10,
|
||||||
|
active: map[string][]*base.TaskMessage{
|
||||||
|
"default": {t6},
|
||||||
|
"custom": {t4, t5},
|
||||||
|
},
|
||||||
|
lease: map[string][]base.Z{
|
||||||
|
"default": {
|
||||||
|
{Message: t6, Score: now.Add(10 * time.Second).Unix()},
|
||||||
|
},
|
||||||
|
"custom": {
|
||||||
|
{Message: t4, Score: now.Add(10 * time.Second).Unix()},
|
||||||
|
{Message: t5, Score: now.Add(10 * time.Second).Unix()},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
startedWorkers: []*workerInfo{
|
||||||
|
{msg: t6, started: now, deadline: now.Add(2 * time.Minute), lease: lease6},
|
||||||
|
{msg: t4, started: now, deadline: now.Add(2 * time.Minute), lease: lease4},
|
||||||
|
{msg: t5, started: now, deadline: now.Add(2 * time.Minute), lease: lease5},
|
||||||
|
},
|
||||||
|
finishedTasks: []*base.TaskMessage{t6, t5},
|
||||||
|
wantLease1: map[string][]base.Z{
|
||||||
|
"default": {
|
||||||
|
{Message: t6, Score: now.Add(rdb.LeaseDuration).Unix()},
|
||||||
|
},
|
||||||
|
"custom": {
|
||||||
|
{Message: t4, Score: now.Add(rdb.LeaseDuration).Unix()},
|
||||||
|
{Message: t5, Score: now.Add(rdb.LeaseDuration).Unix()},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
wantLease2: map[string][]base.Z{
|
||||||
|
"default": {},
|
||||||
|
"custom": {
|
||||||
|
{Message: t4, Score: now.Add(elapsedTime).Add(rdb.LeaseDuration).Unix()},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
startTime: now,
|
||||||
|
elapsedTime: elapsedTime,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
timeCmpOpt := cmpopts.EquateApproxTime(10 * time.Millisecond)
|
timeCmpOpt := cmpopts.EquateApproxTime(10 * time.Millisecond)
|
||||||
@@ -36,76 +160,155 @@ func TestHeartbeater(t *testing.T) {
|
|||||||
ignoreFieldOpt := cmpopts.IgnoreFields(base.ServerInfo{}, "ServerID")
|
ignoreFieldOpt := cmpopts.IgnoreFields(base.ServerInfo{}, "ServerID")
|
||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
h.FlushDB(t, r)
|
h.FlushDB(t, r)
|
||||||
|
h.SeedAllActiveQueues(t, r, tc.active)
|
||||||
|
h.SeedAllLease(t, r, tc.lease)
|
||||||
|
|
||||||
state := base.NewServerState(tc.host, tc.pid, tc.concurrency, tc.queues, false)
|
clock.SetTime(tc.startTime)
|
||||||
|
rdbClient.SetClock(clock)
|
||||||
|
|
||||||
|
srvState := &serverState{}
|
||||||
|
startingCh := make(chan *workerInfo)
|
||||||
|
finishedCh := make(chan *base.TaskMessage)
|
||||||
hb := newHeartbeater(heartbeaterParams{
|
hb := newHeartbeater(heartbeaterParams{
|
||||||
logger: testLogger,
|
logger: testLogger,
|
||||||
broker: rdbClient,
|
broker: rdbClient,
|
||||||
serverState: state,
|
|
||||||
interval: tc.interval,
|
interval: tc.interval,
|
||||||
|
concurrency: tc.concurrency,
|
||||||
|
queues: tc.queues,
|
||||||
|
strictPriority: false,
|
||||||
|
state: srvState,
|
||||||
|
starting: startingCh,
|
||||||
|
finished: finishedCh,
|
||||||
})
|
})
|
||||||
|
hb.clock = clock
|
||||||
|
|
||||||
|
// Change host and pid fields for testing purpose.
|
||||||
|
hb.host = tc.host
|
||||||
|
hb.pid = tc.pid
|
||||||
|
|
||||||
|
//===================
|
||||||
|
// Start Phase1
|
||||||
|
//===================
|
||||||
|
|
||||||
|
srvState.mu.Lock()
|
||||||
|
srvState.value = srvStateActive // simulating Server.Start
|
||||||
|
srvState.mu.Unlock()
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
hb.start(&wg)
|
hb.start(&wg)
|
||||||
|
|
||||||
want := &base.ServerInfo{
|
// Simulate processor starting to work on tasks.
|
||||||
Host: tc.host,
|
for _, w := range tc.startedWorkers {
|
||||||
PID: tc.pid,
|
startingCh <- w
|
||||||
Queues: tc.queues,
|
|
||||||
Concurrency: tc.concurrency,
|
|
||||||
Started: time.Now(),
|
|
||||||
Status: "running",
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// allow for heartbeater to write to redis
|
// Wait for heartbeater to write to redis
|
||||||
time.Sleep(tc.interval * 2)
|
time.Sleep(tc.interval * 2)
|
||||||
|
|
||||||
ss, err := rdbClient.ListServers()
|
ss, err := rdbClient.ListServers()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("could not read server info from redis: %v", err)
|
t.Errorf("%s: could not read server info from redis: %v", tc.desc, err)
|
||||||
hb.terminate()
|
hb.shutdown()
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(ss) != 1 {
|
if len(ss) != 1 {
|
||||||
t.Errorf("(*RDB).ListServers returned %d process info, want 1", len(ss))
|
t.Errorf("%s: (*RDB).ListServers returned %d server info, want 1", tc.desc, len(ss))
|
||||||
hb.terminate()
|
hb.shutdown()
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if diff := cmp.Diff(want, ss[0], timeCmpOpt, ignoreOpt, ignoreFieldOpt); diff != "" {
|
wantInfo := &base.ServerInfo{
|
||||||
t.Errorf("redis stored process status %+v, want %+v; (-want, +got)\n%s", ss[0], want, diff)
|
Host: tc.host,
|
||||||
hb.terminate()
|
PID: tc.pid,
|
||||||
|
Queues: tc.queues,
|
||||||
|
Concurrency: tc.concurrency,
|
||||||
|
Started: now,
|
||||||
|
Status: "active",
|
||||||
|
ActiveWorkerCount: len(tc.startedWorkers),
|
||||||
|
}
|
||||||
|
if diff := cmp.Diff(wantInfo, ss[0], timeCmpOpt, ignoreOpt, ignoreFieldOpt); diff != "" {
|
||||||
|
t.Errorf("%s: redis stored server status %+v, want %+v; (-want, +got)\n%s", tc.desc, ss[0], wantInfo, diff)
|
||||||
|
hb.shutdown()
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// status change
|
for qname, wantLease := range tc.wantLease1 {
|
||||||
state.SetStatus(base.StatusStopped)
|
gotLease := h.GetLeaseEntries(t, r, qname)
|
||||||
|
if diff := cmp.Diff(wantLease, gotLease, h.SortZSetEntryOpt); diff != "" {
|
||||||
|
t.Errorf("%s: mismatch found in %q: (-want,+got):\n%s", tc.desc, base.LeaseKey(qname), diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// allow for heartbeater to write to redis
|
for _, w := range tc.startedWorkers {
|
||||||
|
if want := now.Add(rdb.LeaseDuration); w.lease.Deadline() != want {
|
||||||
|
t.Errorf("%s: lease deadline for %v is set to %v, want %v", tc.desc, w.msg, w.lease.Deadline(), want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//===================
|
||||||
|
// Start Phase2
|
||||||
|
//===================
|
||||||
|
|
||||||
|
clock.AdvanceTime(tc.elapsedTime)
|
||||||
|
// Simulate processor finished processing tasks.
|
||||||
|
for _, msg := range tc.finishedTasks {
|
||||||
|
if err := rdbClient.Done(context.Background(), msg); err != nil {
|
||||||
|
t.Fatalf("RDB.Done failed: %v", err)
|
||||||
|
}
|
||||||
|
finishedCh <- msg
|
||||||
|
}
|
||||||
|
// Wait for heartbeater to write to redis
|
||||||
time.Sleep(tc.interval * 2)
|
time.Sleep(tc.interval * 2)
|
||||||
|
|
||||||
want.Status = "stopped"
|
for qname, wantLease := range tc.wantLease2 {
|
||||||
|
gotLease := h.GetLeaseEntries(t, r, qname)
|
||||||
|
if diff := cmp.Diff(wantLease, gotLease, h.SortZSetEntryOpt); diff != "" {
|
||||||
|
t.Errorf("%s: mismatch found in %q: (-want,+got):\n%s", tc.desc, base.LeaseKey(qname), diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//===================
|
||||||
|
// Start Phase3
|
||||||
|
//===================
|
||||||
|
|
||||||
|
// Server state change; simulating Server.Shutdown
|
||||||
|
srvState.mu.Lock()
|
||||||
|
srvState.value = srvStateClosed
|
||||||
|
srvState.mu.Unlock()
|
||||||
|
|
||||||
|
// Wait for heartbeater to write to redis
|
||||||
|
time.Sleep(tc.interval * 2)
|
||||||
|
|
||||||
|
wantInfo = &base.ServerInfo{
|
||||||
|
Host: tc.host,
|
||||||
|
PID: tc.pid,
|
||||||
|
Queues: tc.queues,
|
||||||
|
Concurrency: tc.concurrency,
|
||||||
|
Started: now,
|
||||||
|
Status: "closed",
|
||||||
|
ActiveWorkerCount: len(tc.startedWorkers) - len(tc.finishedTasks),
|
||||||
|
}
|
||||||
ss, err = rdbClient.ListServers()
|
ss, err = rdbClient.ListServers()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("could not read process status from redis: %v", err)
|
t.Errorf("%s: could not read server status from redis: %v", tc.desc, err)
|
||||||
hb.terminate()
|
hb.shutdown()
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(ss) != 1 {
|
if len(ss) != 1 {
|
||||||
t.Errorf("(*RDB).ListProcesses returned %d process info, want 1", len(ss))
|
t.Errorf("%s: (*RDB).ListServers returned %d server info, want 1", tc.desc, len(ss))
|
||||||
hb.terminate()
|
hb.shutdown()
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if diff := cmp.Diff(want, ss[0], timeCmpOpt, ignoreOpt, ignoreFieldOpt); diff != "" {
|
if diff := cmp.Diff(wantInfo, ss[0], timeCmpOpt, ignoreOpt, ignoreFieldOpt); diff != "" {
|
||||||
t.Errorf("redis stored process status %+v, want %+v; (-want, +got)\n%s", ss[0], want, diff)
|
t.Errorf("%s: redis stored process status %+v, want %+v; (-want, +got)\n%s", tc.desc, ss[0], wantInfo, diff)
|
||||||
hb.terminate()
|
hb.shutdown()
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
hb.terminate()
|
hb.shutdown()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -118,13 +321,19 @@ func TestHeartbeaterWithRedisDown(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
r := rdb.NewRDB(setup(t))
|
r := rdb.NewRDB(setup(t))
|
||||||
|
defer r.Close()
|
||||||
testBroker := testbroker.NewTestBroker(r)
|
testBroker := testbroker.NewTestBroker(r)
|
||||||
ss := base.NewServerState("localhost", 1234, 10, map[string]int{"default": 1}, false)
|
state := &serverState{value: srvStateActive}
|
||||||
hb := newHeartbeater(heartbeaterParams{
|
hb := newHeartbeater(heartbeaterParams{
|
||||||
logger: testLogger,
|
logger: testLogger,
|
||||||
broker: testBroker,
|
broker: testBroker,
|
||||||
serverState: ss,
|
|
||||||
interval: time.Second,
|
interval: time.Second,
|
||||||
|
concurrency: 10,
|
||||||
|
queues: map[string]int{"default": 1},
|
||||||
|
strictPriority: false,
|
||||||
|
state: state,
|
||||||
|
starting: make(chan *workerInfo),
|
||||||
|
finished: make(chan *base.TaskMessage),
|
||||||
})
|
})
|
||||||
|
|
||||||
testBroker.Sleep()
|
testBroker.Sleep()
|
||||||
@@ -134,5 +343,5 @@ func TestHeartbeaterWithRedisDown(t *testing.T) {
|
|||||||
// wait for heartbeater to try writing data to redis
|
// wait for heartbeater to try writing data to redis
|
||||||
time.Sleep(2 * time.Second)
|
time.Sleep(2 * time.Second)
|
||||||
|
|
||||||
hb.terminate()
|
hb.shutdown()
|
||||||
}
|
}
|
||||||
|
|||||||
921
inspector.go
Normal file
921
inspector.go
Normal file
@@ -0,0 +1,921 @@
|
|||||||
|
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license
|
||||||
|
// that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package asynq
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/go-redis/redis/v8"
|
||||||
|
"github.com/hibiken/asynq/internal/base"
|
||||||
|
"github.com/hibiken/asynq/internal/errors"
|
||||||
|
"github.com/hibiken/asynq/internal/rdb"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Inspector is a client interface to inspect and mutate the state of
|
||||||
|
// queues and tasks.
|
||||||
|
type Inspector struct {
|
||||||
|
rdb *rdb.RDB
|
||||||
|
}
|
||||||
|
|
||||||
|
// New returns a new instance of Inspector.
|
||||||
|
func NewInspector(r RedisConnOpt) *Inspector {
|
||||||
|
c, ok := r.MakeRedisClient().(redis.UniversalClient)
|
||||||
|
if !ok {
|
||||||
|
panic(fmt.Sprintf("inspeq: unsupported RedisConnOpt type %T", r))
|
||||||
|
}
|
||||||
|
return &Inspector{
|
||||||
|
rdb: rdb.NewRDB(c),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes the connection with redis.
|
||||||
|
func (i *Inspector) Close() error {
|
||||||
|
return i.rdb.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Queues returns a list of all queue names.
|
||||||
|
func (i *Inspector) Queues() ([]string, error) {
|
||||||
|
return i.rdb.AllQueues()
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueueInfo represents a state of queues at a certain time.
|
||||||
|
type QueueInfo struct {
|
||||||
|
// Name of the queue.
|
||||||
|
Queue string
|
||||||
|
|
||||||
|
// Total number of bytes that the queue and its tasks require to be stored in redis.
|
||||||
|
// It is an approximate memory usage value in bytes since the value is computed by sampling.
|
||||||
|
MemoryUsage int64
|
||||||
|
|
||||||
|
// Latency of the queue, measured by the oldest pending task in the queue.
|
||||||
|
Latency time.Duration
|
||||||
|
|
||||||
|
// Size is the total number of tasks in the queue.
|
||||||
|
// The value is the sum of Pending, Active, Scheduled, Retry, and Archived.
|
||||||
|
Size int
|
||||||
|
|
||||||
|
// Number of pending tasks.
|
||||||
|
Pending int
|
||||||
|
// Number of active tasks.
|
||||||
|
Active int
|
||||||
|
// Number of scheduled tasks.
|
||||||
|
Scheduled int
|
||||||
|
// Number of retry tasks.
|
||||||
|
Retry int
|
||||||
|
// Number of archived tasks.
|
||||||
|
Archived int
|
||||||
|
// Number of stored completed tasks.
|
||||||
|
Completed int
|
||||||
|
|
||||||
|
// Total number of tasks being processed within the given date (counter resets daily).
|
||||||
|
// The number includes both succeeded and failed tasks.
|
||||||
|
Processed int
|
||||||
|
// Total number of tasks failed to be processed within the given date (counter resets daily).
|
||||||
|
Failed int
|
||||||
|
|
||||||
|
// Total number of tasks processed (cumulative).
|
||||||
|
ProcessedTotal int
|
||||||
|
// Total number of tasks failed (cumulative).
|
||||||
|
FailedTotal int
|
||||||
|
|
||||||
|
// Paused indicates whether the queue is paused.
|
||||||
|
// If true, tasks in the queue will not be processed.
|
||||||
|
Paused bool
|
||||||
|
|
||||||
|
// Time when this queue info snapshot was taken.
|
||||||
|
Timestamp time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetQueueInfo returns current information of the given queue.
|
||||||
|
func (i *Inspector) GetQueueInfo(qname string) (*QueueInfo, error) {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
stats, err := i.rdb.CurrentStats(qname)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &QueueInfo{
|
||||||
|
Queue: stats.Queue,
|
||||||
|
MemoryUsage: stats.MemoryUsage,
|
||||||
|
Latency: stats.Latency,
|
||||||
|
Size: stats.Size,
|
||||||
|
Pending: stats.Pending,
|
||||||
|
Active: stats.Active,
|
||||||
|
Scheduled: stats.Scheduled,
|
||||||
|
Retry: stats.Retry,
|
||||||
|
Archived: stats.Archived,
|
||||||
|
Completed: stats.Completed,
|
||||||
|
Processed: stats.Processed,
|
||||||
|
Failed: stats.Failed,
|
||||||
|
ProcessedTotal: stats.ProcessedTotal,
|
||||||
|
FailedTotal: stats.FailedTotal,
|
||||||
|
Paused: stats.Paused,
|
||||||
|
Timestamp: stats.Timestamp,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DailyStats holds aggregate data for a given day for a given queue.
|
||||||
|
type DailyStats struct {
|
||||||
|
// Name of the queue.
|
||||||
|
Queue string
|
||||||
|
// Total number of tasks being processed during the given date.
|
||||||
|
// The number includes both succeeded and failed tasks.
|
||||||
|
Processed int
|
||||||
|
// Total number of tasks failed to be processed during the given date.
|
||||||
|
Failed int
|
||||||
|
// Date this stats was taken.
|
||||||
|
Date time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// History returns a list of stats from the last n days.
|
||||||
|
func (i *Inspector) History(qname string, n int) ([]*DailyStats, error) {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
stats, err := i.rdb.HistoricalStats(qname, n)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var res []*DailyStats
|
||||||
|
for _, s := range stats {
|
||||||
|
res = append(res, &DailyStats{
|
||||||
|
Queue: s.Queue,
|
||||||
|
Processed: s.Processed,
|
||||||
|
Failed: s.Failed,
|
||||||
|
Date: s.Time,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrQueueNotFound indicates that the specified queue does not exist.
|
||||||
|
ErrQueueNotFound = errors.New("queue not found")
|
||||||
|
|
||||||
|
// ErrQueueNotEmpty indicates that the specified queue is not empty.
|
||||||
|
ErrQueueNotEmpty = errors.New("queue is not empty")
|
||||||
|
|
||||||
|
// ErrTaskNotFound indicates that the specified task cannot be found in the queue.
|
||||||
|
ErrTaskNotFound = errors.New("task not found")
|
||||||
|
)
|
||||||
|
|
||||||
|
// DeleteQueue removes the specified queue.
|
||||||
|
//
|
||||||
|
// If force is set to true, DeleteQueue will remove the queue regardless of
|
||||||
|
// the queue size as long as no tasks are active in the queue.
|
||||||
|
// If force is set to false, DeleteQueue will remove the queue only if
|
||||||
|
// the queue is empty.
|
||||||
|
//
|
||||||
|
// If the specified queue does not exist, DeleteQueue returns ErrQueueNotFound.
|
||||||
|
// If force is set to false and the specified queue is not empty, DeleteQueue
|
||||||
|
// returns ErrQueueNotEmpty.
|
||||||
|
func (i *Inspector) DeleteQueue(qname string, force bool) error {
|
||||||
|
err := i.rdb.RemoveQueue(qname, force)
|
||||||
|
if errors.IsQueueNotFound(err) {
|
||||||
|
return fmt.Errorf("%w: queue=%q", ErrQueueNotFound, qname)
|
||||||
|
}
|
||||||
|
if errors.IsQueueNotEmpty(err) {
|
||||||
|
return fmt.Errorf("%w: queue=%q", ErrQueueNotEmpty, qname)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetTaskInfo retrieves task information given a task id and queue name.
|
||||||
|
//
|
||||||
|
// Returns an error wrapping ErrQueueNotFound if a queue with the given name doesn't exist.
|
||||||
|
// Returns an error wrapping ErrTaskNotFound if a task with the given id doesn't exist in the queue.
|
||||||
|
func (i *Inspector) GetTaskInfo(qname, id string) (*TaskInfo, error) {
|
||||||
|
info, err := i.rdb.GetTaskInfo(qname, id)
|
||||||
|
switch {
|
||||||
|
case errors.IsQueueNotFound(err):
|
||||||
|
return nil, fmt.Errorf("asynq: %w", ErrQueueNotFound)
|
||||||
|
case errors.IsTaskNotFound(err):
|
||||||
|
return nil, fmt.Errorf("asynq: %w", ErrTaskNotFound)
|
||||||
|
case err != nil:
|
||||||
|
return nil, fmt.Errorf("asynq: %v", err)
|
||||||
|
}
|
||||||
|
return newTaskInfo(info.Message, info.State, info.NextProcessAt, info.Result), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListOption specifies behavior of list operation.
|
||||||
|
type ListOption interface{}
|
||||||
|
|
||||||
|
// Internal list option representations.
|
||||||
|
type (
|
||||||
|
pageSizeOpt int
|
||||||
|
pageNumOpt int
|
||||||
|
)
|
||||||
|
|
||||||
|
type listOption struct {
|
||||||
|
pageSize int
|
||||||
|
pageNum int
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Page size used by default in list operation.
|
||||||
|
defaultPageSize = 30
|
||||||
|
|
||||||
|
// Page number used by default in list operation.
|
||||||
|
defaultPageNum = 1
|
||||||
|
)
|
||||||
|
|
||||||
|
func composeListOptions(opts ...ListOption) listOption {
|
||||||
|
res := listOption{
|
||||||
|
pageSize: defaultPageSize,
|
||||||
|
pageNum: defaultPageNum,
|
||||||
|
}
|
||||||
|
for _, opt := range opts {
|
||||||
|
switch opt := opt.(type) {
|
||||||
|
case pageSizeOpt:
|
||||||
|
res.pageSize = int(opt)
|
||||||
|
case pageNumOpt:
|
||||||
|
res.pageNum = int(opt)
|
||||||
|
default:
|
||||||
|
// ignore unexpected option
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
// PageSize returns an option to specify the page size for list operation.
|
||||||
|
//
|
||||||
|
// Negative page size is treated as zero.
|
||||||
|
func PageSize(n int) ListOption {
|
||||||
|
if n < 0 {
|
||||||
|
n = 0
|
||||||
|
}
|
||||||
|
return pageSizeOpt(n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Page returns an option to specify the page number for list operation.
|
||||||
|
// The value 1 fetches the first page.
|
||||||
|
//
|
||||||
|
// Negative page number is treated as one.
|
||||||
|
func Page(n int) ListOption {
|
||||||
|
if n < 0 {
|
||||||
|
n = 1
|
||||||
|
}
|
||||||
|
return pageNumOpt(n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListPendingTasks retrieves pending tasks from the specified queue.
|
||||||
|
//
|
||||||
|
// By default, it retrieves the first 30 tasks.
|
||||||
|
func (i *Inspector) ListPendingTasks(qname string, opts ...ListOption) ([]*TaskInfo, error) {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
return nil, fmt.Errorf("asynq: %v", err)
|
||||||
|
}
|
||||||
|
opt := composeListOptions(opts...)
|
||||||
|
pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1}
|
||||||
|
infos, err := i.rdb.ListPending(qname, pgn)
|
||||||
|
switch {
|
||||||
|
case errors.IsQueueNotFound(err):
|
||||||
|
return nil, fmt.Errorf("asynq: %w", ErrQueueNotFound)
|
||||||
|
case err != nil:
|
||||||
|
return nil, fmt.Errorf("asynq: %v", err)
|
||||||
|
}
|
||||||
|
var tasks []*TaskInfo
|
||||||
|
for _, i := range infos {
|
||||||
|
tasks = append(tasks, newTaskInfo(
|
||||||
|
i.Message,
|
||||||
|
i.State,
|
||||||
|
i.NextProcessAt,
|
||||||
|
i.Result,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
return tasks, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListActiveTasks retrieves active tasks from the specified queue.
|
||||||
|
//
|
||||||
|
// By default, it retrieves the first 30 tasks.
|
||||||
|
func (i *Inspector) ListActiveTasks(qname string, opts ...ListOption) ([]*TaskInfo, error) {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
return nil, fmt.Errorf("asynq: %v", err)
|
||||||
|
}
|
||||||
|
opt := composeListOptions(opts...)
|
||||||
|
pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1}
|
||||||
|
infos, err := i.rdb.ListActive(qname, pgn)
|
||||||
|
switch {
|
||||||
|
case errors.IsQueueNotFound(err):
|
||||||
|
return nil, fmt.Errorf("asynq: %w", ErrQueueNotFound)
|
||||||
|
case err != nil:
|
||||||
|
return nil, fmt.Errorf("asynq: %v", err)
|
||||||
|
}
|
||||||
|
expired, err := i.rdb.ListLeaseExpired(time.Now(), qname)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("asynq: %v", err)
|
||||||
|
}
|
||||||
|
expiredSet := make(map[string]struct{}) // set of expired message IDs
|
||||||
|
for _, msg := range expired {
|
||||||
|
expiredSet[msg.ID] = struct{}{}
|
||||||
|
}
|
||||||
|
var tasks []*TaskInfo
|
||||||
|
for _, i := range infos {
|
||||||
|
t := newTaskInfo(
|
||||||
|
i.Message,
|
||||||
|
i.State,
|
||||||
|
i.NextProcessAt,
|
||||||
|
i.Result,
|
||||||
|
)
|
||||||
|
if _, ok := expiredSet[i.Message.ID]; ok {
|
||||||
|
t.IsOrphaned = true
|
||||||
|
}
|
||||||
|
tasks = append(tasks, t)
|
||||||
|
}
|
||||||
|
return tasks, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListScheduledTasks retrieves scheduled tasks from the specified queue.
|
||||||
|
// Tasks are sorted by NextProcessAt in ascending order.
|
||||||
|
//
|
||||||
|
// By default, it retrieves the first 30 tasks.
|
||||||
|
func (i *Inspector) ListScheduledTasks(qname string, opts ...ListOption) ([]*TaskInfo, error) {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
return nil, fmt.Errorf("asynq: %v", err)
|
||||||
|
}
|
||||||
|
opt := composeListOptions(opts...)
|
||||||
|
pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1}
|
||||||
|
infos, err := i.rdb.ListScheduled(qname, pgn)
|
||||||
|
switch {
|
||||||
|
case errors.IsQueueNotFound(err):
|
||||||
|
return nil, fmt.Errorf("asynq: %w", ErrQueueNotFound)
|
||||||
|
case err != nil:
|
||||||
|
return nil, fmt.Errorf("asynq: %v", err)
|
||||||
|
}
|
||||||
|
var tasks []*TaskInfo
|
||||||
|
for _, i := range infos {
|
||||||
|
tasks = append(tasks, newTaskInfo(
|
||||||
|
i.Message,
|
||||||
|
i.State,
|
||||||
|
i.NextProcessAt,
|
||||||
|
i.Result,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
return tasks, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListRetryTasks retrieves retry tasks from the specified queue.
|
||||||
|
// Tasks are sorted by NextProcessAt in ascending order.
|
||||||
|
//
|
||||||
|
// By default, it retrieves the first 30 tasks.
|
||||||
|
func (i *Inspector) ListRetryTasks(qname string, opts ...ListOption) ([]*TaskInfo, error) {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
return nil, fmt.Errorf("asynq: %v", err)
|
||||||
|
}
|
||||||
|
opt := composeListOptions(opts...)
|
||||||
|
pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1}
|
||||||
|
infos, err := i.rdb.ListRetry(qname, pgn)
|
||||||
|
switch {
|
||||||
|
case errors.IsQueueNotFound(err):
|
||||||
|
return nil, fmt.Errorf("asynq: %w", ErrQueueNotFound)
|
||||||
|
case err != nil:
|
||||||
|
return nil, fmt.Errorf("asynq: %v", err)
|
||||||
|
}
|
||||||
|
var tasks []*TaskInfo
|
||||||
|
for _, i := range infos {
|
||||||
|
tasks = append(tasks, newTaskInfo(
|
||||||
|
i.Message,
|
||||||
|
i.State,
|
||||||
|
i.NextProcessAt,
|
||||||
|
i.Result,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
return tasks, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListArchivedTasks retrieves archived tasks from the specified queue.
|
||||||
|
// Tasks are sorted by LastFailedAt in descending order.
|
||||||
|
//
|
||||||
|
// By default, it retrieves the first 30 tasks.
|
||||||
|
func (i *Inspector) ListArchivedTasks(qname string, opts ...ListOption) ([]*TaskInfo, error) {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
return nil, fmt.Errorf("asynq: %v", err)
|
||||||
|
}
|
||||||
|
opt := composeListOptions(opts...)
|
||||||
|
pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1}
|
||||||
|
infos, err := i.rdb.ListArchived(qname, pgn)
|
||||||
|
switch {
|
||||||
|
case errors.IsQueueNotFound(err):
|
||||||
|
return nil, fmt.Errorf("asynq: %w", ErrQueueNotFound)
|
||||||
|
case err != nil:
|
||||||
|
return nil, fmt.Errorf("asynq: %v", err)
|
||||||
|
}
|
||||||
|
var tasks []*TaskInfo
|
||||||
|
for _, i := range infos {
|
||||||
|
tasks = append(tasks, newTaskInfo(
|
||||||
|
i.Message,
|
||||||
|
i.State,
|
||||||
|
i.NextProcessAt,
|
||||||
|
i.Result,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
return tasks, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListCompletedTasks retrieves completed tasks from the specified queue.
|
||||||
|
// Tasks are sorted by expiration time (i.e. CompletedAt + Retention) in descending order.
|
||||||
|
//
|
||||||
|
// By default, it retrieves the first 30 tasks.
|
||||||
|
func (i *Inspector) ListCompletedTasks(qname string, opts ...ListOption) ([]*TaskInfo, error) {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
return nil, fmt.Errorf("asynq: %v", err)
|
||||||
|
}
|
||||||
|
opt := composeListOptions(opts...)
|
||||||
|
pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1}
|
||||||
|
infos, err := i.rdb.ListCompleted(qname, pgn)
|
||||||
|
switch {
|
||||||
|
case errors.IsQueueNotFound(err):
|
||||||
|
return nil, fmt.Errorf("asynq: %w", ErrQueueNotFound)
|
||||||
|
case err != nil:
|
||||||
|
return nil, fmt.Errorf("asynq: %v", err)
|
||||||
|
}
|
||||||
|
var tasks []*TaskInfo
|
||||||
|
for _, i := range infos {
|
||||||
|
tasks = append(tasks, newTaskInfo(
|
||||||
|
i.Message,
|
||||||
|
i.State,
|
||||||
|
i.NextProcessAt,
|
||||||
|
i.Result,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
return tasks, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteAllPendingTasks deletes all pending tasks from the specified queue,
|
||||||
|
// and reports the number tasks deleted.
|
||||||
|
func (i *Inspector) DeleteAllPendingTasks(qname string) (int, error) {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
n, err := i.rdb.DeleteAllPendingTasks(qname)
|
||||||
|
return int(n), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteAllScheduledTasks deletes all scheduled tasks from the specified queue,
|
||||||
|
// and reports the number tasks deleted.
|
||||||
|
func (i *Inspector) DeleteAllScheduledTasks(qname string) (int, error) {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
n, err := i.rdb.DeleteAllScheduledTasks(qname)
|
||||||
|
return int(n), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteAllRetryTasks deletes all retry tasks from the specified queue,
|
||||||
|
// and reports the number tasks deleted.
|
||||||
|
func (i *Inspector) DeleteAllRetryTasks(qname string) (int, error) {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
n, err := i.rdb.DeleteAllRetryTasks(qname)
|
||||||
|
return int(n), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteAllArchivedTasks deletes all archived tasks from the specified queue,
|
||||||
|
// and reports the number tasks deleted.
|
||||||
|
func (i *Inspector) DeleteAllArchivedTasks(qname string) (int, error) {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
n, err := i.rdb.DeleteAllArchivedTasks(qname)
|
||||||
|
return int(n), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteAllCompletedTasks deletes all completed tasks from the specified queue,
|
||||||
|
// and reports the number tasks deleted.
|
||||||
|
func (i *Inspector) DeleteAllCompletedTasks(qname string) (int, error) {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
n, err := i.rdb.DeleteAllCompletedTasks(qname)
|
||||||
|
return int(n), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteTask deletes a task with the given id from the given queue.
|
||||||
|
// The task needs to be in pending, scheduled, retry, or archived state,
|
||||||
|
// otherwise DeleteTask will return an error.
|
||||||
|
//
|
||||||
|
// If a queue with the given name doesn't exist, it returns an error wrapping ErrQueueNotFound.
|
||||||
|
// If a task with the given id doesn't exist in the queue, it returns an error wrapping ErrTaskNotFound.
|
||||||
|
// If the task is in active state, it returns a non-nil error.
|
||||||
|
func (i *Inspector) DeleteTask(qname, id string) error {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
return fmt.Errorf("asynq: %v", err)
|
||||||
|
}
|
||||||
|
err := i.rdb.DeleteTask(qname, id)
|
||||||
|
switch {
|
||||||
|
case errors.IsQueueNotFound(err):
|
||||||
|
return fmt.Errorf("asynq: %w", ErrQueueNotFound)
|
||||||
|
case errors.IsTaskNotFound(err):
|
||||||
|
return fmt.Errorf("asynq: %w", ErrTaskNotFound)
|
||||||
|
case err != nil:
|
||||||
|
return fmt.Errorf("asynq: %v", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// RunAllScheduledTasks transition all scheduled tasks to pending state from the given queue,
|
||||||
|
// and reports the number of tasks transitioned.
|
||||||
|
func (i *Inspector) RunAllScheduledTasks(qname string) (int, error) {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
n, err := i.rdb.RunAllScheduledTasks(qname)
|
||||||
|
return int(n), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// RunAllRetryTasks transition all retry tasks to pending state from the given queue,
|
||||||
|
// and reports the number of tasks transitioned.
|
||||||
|
func (i *Inspector) RunAllRetryTasks(qname string) (int, error) {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
n, err := i.rdb.RunAllRetryTasks(qname)
|
||||||
|
return int(n), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// RunAllArchivedTasks transition all archived tasks to pending state from the given queue,
|
||||||
|
// and reports the number of tasks transitioned.
|
||||||
|
func (i *Inspector) RunAllArchivedTasks(qname string) (int, error) {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
n, err := i.rdb.RunAllArchivedTasks(qname)
|
||||||
|
return int(n), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// RunTask updates the task to pending state given a queue name and task id.
|
||||||
|
// The task needs to be in scheduled, retry, or archived state, otherwise RunTask
|
||||||
|
// will return an error.
|
||||||
|
//
|
||||||
|
// If a queue with the given name doesn't exist, it returns an error wrapping ErrQueueNotFound.
|
||||||
|
// If a task with the given id doesn't exist in the queue, it returns an error wrapping ErrTaskNotFound.
|
||||||
|
// If the task is in pending or active state, it returns a non-nil error.
|
||||||
|
func (i *Inspector) RunTask(qname, id string) error {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
return fmt.Errorf("asynq: %v", err)
|
||||||
|
}
|
||||||
|
err := i.rdb.RunTask(qname, id)
|
||||||
|
switch {
|
||||||
|
case errors.IsQueueNotFound(err):
|
||||||
|
return fmt.Errorf("asynq: %w", ErrQueueNotFound)
|
||||||
|
case errors.IsTaskNotFound(err):
|
||||||
|
return fmt.Errorf("asynq: %w", ErrTaskNotFound)
|
||||||
|
case err != nil:
|
||||||
|
return fmt.Errorf("asynq: %v", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ArchiveAllPendingTasks archives all pending tasks from the given queue,
|
||||||
|
// and reports the number of tasks archived.
|
||||||
|
func (i *Inspector) ArchiveAllPendingTasks(qname string) (int, error) {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
n, err := i.rdb.ArchiveAllPendingTasks(qname)
|
||||||
|
return int(n), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ArchiveAllScheduledTasks archives all scheduled tasks from the given queue,
|
||||||
|
// and reports the number of tasks archiveed.
|
||||||
|
func (i *Inspector) ArchiveAllScheduledTasks(qname string) (int, error) {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
n, err := i.rdb.ArchiveAllScheduledTasks(qname)
|
||||||
|
return int(n), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ArchiveAllRetryTasks archives all retry tasks from the given queue,
|
||||||
|
// and reports the number of tasks archiveed.
|
||||||
|
func (i *Inspector) ArchiveAllRetryTasks(qname string) (int, error) {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
n, err := i.rdb.ArchiveAllRetryTasks(qname)
|
||||||
|
return int(n), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ArchiveTask archives a task with the given id in the given queue.
|
||||||
|
// The task needs to be in pending, scheduled, or retry state, otherwise ArchiveTask
|
||||||
|
// will return an error.
|
||||||
|
//
|
||||||
|
// If a queue with the given name doesn't exist, it returns an error wrapping ErrQueueNotFound.
|
||||||
|
// If a task with the given id doesn't exist in the queue, it returns an error wrapping ErrTaskNotFound.
|
||||||
|
// If the task is in already archived, it returns a non-nil error.
|
||||||
|
func (i *Inspector) ArchiveTask(qname, id string) error {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
return fmt.Errorf("asynq: err")
|
||||||
|
}
|
||||||
|
err := i.rdb.ArchiveTask(qname, id)
|
||||||
|
switch {
|
||||||
|
case errors.IsQueueNotFound(err):
|
||||||
|
return fmt.Errorf("asynq: %w", ErrQueueNotFound)
|
||||||
|
case errors.IsTaskNotFound(err):
|
||||||
|
return fmt.Errorf("asynq: %w", ErrTaskNotFound)
|
||||||
|
case err != nil:
|
||||||
|
return fmt.Errorf("asynq: %v", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CancelProcessing sends a signal to cancel processing of the task
|
||||||
|
// given a task id. CancelProcessing is best-effort, which means that it does not
|
||||||
|
// guarantee that the task with the given id will be canceled. The return
|
||||||
|
// value only indicates whether the cancelation signal has been sent.
|
||||||
|
func (i *Inspector) CancelProcessing(id string) error {
|
||||||
|
return i.rdb.PublishCancelation(id)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PauseQueue pauses task processing on the specified queue.
|
||||||
|
// If the queue is already paused, it will return a non-nil error.
|
||||||
|
func (i *Inspector) PauseQueue(qname string) error {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return i.rdb.Pause(qname)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnpauseQueue resumes task processing on the specified queue.
|
||||||
|
// If the queue is not paused, it will return a non-nil error.
|
||||||
|
func (i *Inspector) UnpauseQueue(qname string) error {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return i.rdb.Unpause(qname)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Servers return a list of running servers' information.
|
||||||
|
func (i *Inspector) Servers() ([]*ServerInfo, error) {
|
||||||
|
servers, err := i.rdb.ListServers()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
workers, err := i.rdb.ListWorkers()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
m := make(map[string]*ServerInfo) // ServerInfo keyed by serverID
|
||||||
|
for _, s := range servers {
|
||||||
|
m[s.ServerID] = &ServerInfo{
|
||||||
|
ID: s.ServerID,
|
||||||
|
Host: s.Host,
|
||||||
|
PID: s.PID,
|
||||||
|
Concurrency: s.Concurrency,
|
||||||
|
Queues: s.Queues,
|
||||||
|
StrictPriority: s.StrictPriority,
|
||||||
|
Started: s.Started,
|
||||||
|
Status: s.Status,
|
||||||
|
ActiveWorkers: make([]*WorkerInfo, 0),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, w := range workers {
|
||||||
|
srvInfo, ok := m[w.ServerID]
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
wrkInfo := &WorkerInfo{
|
||||||
|
TaskID: w.ID,
|
||||||
|
TaskType: w.Type,
|
||||||
|
TaskPayload: w.Payload,
|
||||||
|
Queue: w.Queue,
|
||||||
|
Started: w.Started,
|
||||||
|
Deadline: w.Deadline,
|
||||||
|
}
|
||||||
|
srvInfo.ActiveWorkers = append(srvInfo.ActiveWorkers, wrkInfo)
|
||||||
|
}
|
||||||
|
var out []*ServerInfo
|
||||||
|
for _, srvInfo := range m {
|
||||||
|
out = append(out, srvInfo)
|
||||||
|
}
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServerInfo describes a running Server instance.
|
||||||
|
type ServerInfo struct {
|
||||||
|
// Unique Identifier for the server.
|
||||||
|
ID string
|
||||||
|
// Host machine on which the server is running.
|
||||||
|
Host string
|
||||||
|
// PID of the process in which the server is running.
|
||||||
|
PID int
|
||||||
|
|
||||||
|
// Server configuration details.
|
||||||
|
// See Config doc for field descriptions.
|
||||||
|
Concurrency int
|
||||||
|
Queues map[string]int
|
||||||
|
StrictPriority bool
|
||||||
|
|
||||||
|
// Time the server started.
|
||||||
|
Started time.Time
|
||||||
|
// Status indicates the status of the server.
|
||||||
|
// TODO: Update comment with more details.
|
||||||
|
Status string
|
||||||
|
// A List of active workers currently processing tasks.
|
||||||
|
ActiveWorkers []*WorkerInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
// WorkerInfo describes a running worker processing a task.
|
||||||
|
type WorkerInfo struct {
|
||||||
|
// ID of the task the worker is processing.
|
||||||
|
TaskID string
|
||||||
|
// Type of the task the worker is processing.
|
||||||
|
TaskType string
|
||||||
|
// Payload of the task the worker is processing.
|
||||||
|
TaskPayload []byte
|
||||||
|
// Queue from which the worker got its task.
|
||||||
|
Queue string
|
||||||
|
// Time the worker started processing the task.
|
||||||
|
Started time.Time
|
||||||
|
// Time the worker needs to finish processing the task by.
|
||||||
|
Deadline time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClusterKeySlot returns an integer identifying the hash slot the given queue hashes to.
|
||||||
|
func (i *Inspector) ClusterKeySlot(qname string) (int64, error) {
|
||||||
|
return i.rdb.ClusterKeySlot(qname)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClusterNode describes a node in redis cluster.
|
||||||
|
type ClusterNode struct {
|
||||||
|
// Node ID in the cluster.
|
||||||
|
ID string
|
||||||
|
|
||||||
|
// Address of the node.
|
||||||
|
Addr string
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClusterNodes returns a list of nodes the given queue belongs to.
|
||||||
|
//
|
||||||
|
// Only relevant if task queues are stored in redis cluster.
|
||||||
|
func (i *Inspector) ClusterNodes(qname string) ([]*ClusterNode, error) {
|
||||||
|
nodes, err := i.rdb.ClusterNodes(qname)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var res []*ClusterNode
|
||||||
|
for _, node := range nodes {
|
||||||
|
res = append(res, &ClusterNode{ID: node.ID, Addr: node.Addr})
|
||||||
|
}
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SchedulerEntry holds information about a periodic task registered with a scheduler.
|
||||||
|
type SchedulerEntry struct {
|
||||||
|
// Identifier of this entry.
|
||||||
|
ID string
|
||||||
|
|
||||||
|
// Spec describes the schedule of this entry.
|
||||||
|
Spec string
|
||||||
|
|
||||||
|
// Periodic Task registered for this entry.
|
||||||
|
Task *Task
|
||||||
|
|
||||||
|
// Opts is the options for the periodic task.
|
||||||
|
Opts []Option
|
||||||
|
|
||||||
|
// Next shows the next time the task will be enqueued.
|
||||||
|
Next time.Time
|
||||||
|
|
||||||
|
// Prev shows the last time the task was enqueued.
|
||||||
|
// Zero time if task was never enqueued.
|
||||||
|
Prev time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// SchedulerEntries returns a list of all entries registered with
|
||||||
|
// currently running schedulers.
|
||||||
|
func (i *Inspector) SchedulerEntries() ([]*SchedulerEntry, error) {
|
||||||
|
var entries []*SchedulerEntry
|
||||||
|
res, err := i.rdb.ListSchedulerEntries()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
for _, e := range res {
|
||||||
|
task := NewTask(e.Type, e.Payload)
|
||||||
|
var opts []Option
|
||||||
|
for _, s := range e.Opts {
|
||||||
|
if o, err := parseOption(s); err == nil {
|
||||||
|
// ignore bad data
|
||||||
|
opts = append(opts, o)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
entries = append(entries, &SchedulerEntry{
|
||||||
|
ID: e.ID,
|
||||||
|
Spec: e.Spec,
|
||||||
|
Task: task,
|
||||||
|
Opts: opts,
|
||||||
|
Next: e.Next,
|
||||||
|
Prev: e.Prev,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return entries, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseOption interprets a string s as an Option and returns the Option if parsing is successful,
|
||||||
|
// otherwise returns non-nil error.
|
||||||
|
func parseOption(s string) (Option, error) {
|
||||||
|
fn, arg := parseOptionFunc(s), parseOptionArg(s)
|
||||||
|
switch fn {
|
||||||
|
case "Queue":
|
||||||
|
qname, err := strconv.Unquote(arg)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return Queue(qname), nil
|
||||||
|
case "MaxRetry":
|
||||||
|
n, err := strconv.Atoi(arg)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return MaxRetry(n), nil
|
||||||
|
case "Timeout":
|
||||||
|
d, err := time.ParseDuration(arg)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return Timeout(d), nil
|
||||||
|
case "Deadline":
|
||||||
|
t, err := time.Parse(time.UnixDate, arg)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return Deadline(t), nil
|
||||||
|
case "Unique":
|
||||||
|
d, err := time.ParseDuration(arg)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return Unique(d), nil
|
||||||
|
case "ProcessAt":
|
||||||
|
t, err := time.Parse(time.UnixDate, arg)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ProcessAt(t), nil
|
||||||
|
case "ProcessIn":
|
||||||
|
d, err := time.ParseDuration(arg)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ProcessIn(d), nil
|
||||||
|
case "Retention":
|
||||||
|
d, err := time.ParseDuration(arg)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return Retention(d), nil
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("cannot not parse option string %q", s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseOptionFunc(s string) string {
|
||||||
|
i := strings.Index(s, "(")
|
||||||
|
return s[:i]
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseOptionArg(s string) string {
|
||||||
|
i := strings.Index(s, "(")
|
||||||
|
if i >= 0 {
|
||||||
|
j := strings.Index(s, ")")
|
||||||
|
if j > i {
|
||||||
|
return s[i+1 : j]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// SchedulerEnqueueEvent holds information about an enqueue event by a scheduler.
|
||||||
|
type SchedulerEnqueueEvent struct {
|
||||||
|
// ID of the task that was enqueued.
|
||||||
|
TaskID string
|
||||||
|
|
||||||
|
// Time the task was enqueued.
|
||||||
|
EnqueuedAt time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListSchedulerEnqueueEvents retrieves a list of enqueue events from the specified scheduler entry.
|
||||||
|
//
|
||||||
|
// By default, it retrieves the first 30 tasks.
|
||||||
|
func (i *Inspector) ListSchedulerEnqueueEvents(entryID string, opts ...ListOption) ([]*SchedulerEnqueueEvent, error) {
|
||||||
|
opt := composeListOptions(opts...)
|
||||||
|
pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1}
|
||||||
|
data, err := i.rdb.ListSchedulerEnqueueEvents(entryID, pgn)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var events []*SchedulerEnqueueEvent
|
||||||
|
for _, e := range data {
|
||||||
|
events = append(events, &SchedulerEnqueueEvent{TaskID: e.TaskID, EnqueuedAt: e.EnqueuedAt})
|
||||||
|
}
|
||||||
|
return events, nil
|
||||||
|
}
|
||||||
3325
inspector_test.go
Normal file
3325
inspector_test.go
Normal file
File diff suppressed because it is too large
Load Diff
@@ -6,37 +6,43 @@
|
|||||||
package asynqtest
|
package asynqtest
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"math"
|
||||||
"sort"
|
"sort"
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/go-redis/redis/v7"
|
"github.com/go-redis/redis/v8"
|
||||||
"github.com/google/go-cmp/cmp"
|
"github.com/google/go-cmp/cmp"
|
||||||
"github.com/google/go-cmp/cmp/cmpopts"
|
"github.com/google/go-cmp/cmp/cmpopts"
|
||||||
|
"github.com/google/uuid"
|
||||||
"github.com/hibiken/asynq/internal/base"
|
"github.com/hibiken/asynq/internal/base"
|
||||||
"github.com/rs/xid"
|
"github.com/hibiken/asynq/internal/timeutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ZSetEntry is an entry in redis sorted set.
|
// EquateInt64Approx returns a Comparer option that treats int64 values
|
||||||
type ZSetEntry struct {
|
// to be equal if they are within the given margin.
|
||||||
Msg *base.TaskMessage
|
func EquateInt64Approx(margin int64) cmp.Option {
|
||||||
Score float64
|
return cmp.Comparer(func(a, b int64) bool {
|
||||||
|
return math.Abs(float64(a-b)) <= float64(margin)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// SortMsgOpt is a cmp.Option to sort base.TaskMessage for comparing slice of task messages.
|
// SortMsgOpt is a cmp.Option to sort base.TaskMessage for comparing slice of task messages.
|
||||||
var SortMsgOpt = cmp.Transformer("SortTaskMessages", func(in []*base.TaskMessage) []*base.TaskMessage {
|
var SortMsgOpt = cmp.Transformer("SortTaskMessages", func(in []*base.TaskMessage) []*base.TaskMessage {
|
||||||
out := append([]*base.TaskMessage(nil), in...) // Copy input to avoid mutating it
|
out := append([]*base.TaskMessage(nil), in...) // Copy input to avoid mutating it
|
||||||
sort.Slice(out, func(i, j int) bool {
|
sort.Slice(out, func(i, j int) bool {
|
||||||
return out[i].ID.String() < out[j].ID.String()
|
return out[i].ID < out[j].ID
|
||||||
})
|
})
|
||||||
return out
|
return out
|
||||||
})
|
})
|
||||||
|
|
||||||
// SortZSetEntryOpt is an cmp.Option to sort ZSetEntry for comparing slice of zset entries.
|
// SortZSetEntryOpt is an cmp.Option to sort ZSetEntry for comparing slice of zset entries.
|
||||||
var SortZSetEntryOpt = cmp.Transformer("SortZSetEntries", func(in []ZSetEntry) []ZSetEntry {
|
var SortZSetEntryOpt = cmp.Transformer("SortZSetEntries", func(in []base.Z) []base.Z {
|
||||||
out := append([]ZSetEntry(nil), in...) // Copy input to avoid mutating it
|
out := append([]base.Z(nil), in...) // Copy input to avoid mutating it
|
||||||
sort.Slice(out, func(i, j int) bool {
|
sort.Slice(out, func(i, j int) bool {
|
||||||
return out[i].Msg.ID.String() < out[j].Msg.ID.String()
|
return out[i].Message.ID < out[j].Message.ID
|
||||||
})
|
})
|
||||||
return out
|
return out
|
||||||
})
|
})
|
||||||
@@ -57,7 +63,25 @@ var SortServerInfoOpt = cmp.Transformer("SortServerInfo", func(in []*base.Server
|
|||||||
var SortWorkerInfoOpt = cmp.Transformer("SortWorkerInfo", func(in []*base.WorkerInfo) []*base.WorkerInfo {
|
var SortWorkerInfoOpt = cmp.Transformer("SortWorkerInfo", func(in []*base.WorkerInfo) []*base.WorkerInfo {
|
||||||
out := append([]*base.WorkerInfo(nil), in...) // Copy input to avoid mutating it
|
out := append([]*base.WorkerInfo(nil), in...) // Copy input to avoid mutating it
|
||||||
sort.Slice(out, func(i, j int) bool {
|
sort.Slice(out, func(i, j int) bool {
|
||||||
return out[i].ID.String() < out[j].ID.String()
|
return out[i].ID < out[j].ID
|
||||||
|
})
|
||||||
|
return out
|
||||||
|
})
|
||||||
|
|
||||||
|
// SortSchedulerEntryOpt is a cmp.Option to sort base.SchedulerEntry for comparing slice of entries.
|
||||||
|
var SortSchedulerEntryOpt = cmp.Transformer("SortSchedulerEntry", func(in []*base.SchedulerEntry) []*base.SchedulerEntry {
|
||||||
|
out := append([]*base.SchedulerEntry(nil), in...) // Copy input to avoid mutating it
|
||||||
|
sort.Slice(out, func(i, j int) bool {
|
||||||
|
return out[i].Spec < out[j].Spec
|
||||||
|
})
|
||||||
|
return out
|
||||||
|
})
|
||||||
|
|
||||||
|
// SortSchedulerEnqueueEventOpt is a cmp.Option to sort base.SchedulerEnqueueEvent for comparing slice of events.
|
||||||
|
var SortSchedulerEnqueueEventOpt = cmp.Transformer("SortSchedulerEnqueueEvent", func(in []*base.SchedulerEnqueueEvent) []*base.SchedulerEnqueueEvent {
|
||||||
|
out := append([]*base.SchedulerEnqueueEvent(nil), in...)
|
||||||
|
sort.Slice(out, func(i, j int) bool {
|
||||||
|
return out[i].EnqueuedAt.Unix() < out[j].EnqueuedAt.Unix()
|
||||||
})
|
})
|
||||||
return out
|
return out
|
||||||
})
|
})
|
||||||
@@ -73,33 +97,67 @@ var SortStringSliceOpt = cmp.Transformer("SortStringSlice", func(in []string) []
|
|||||||
var IgnoreIDOpt = cmpopts.IgnoreFields(base.TaskMessage{}, "ID")
|
var IgnoreIDOpt = cmpopts.IgnoreFields(base.TaskMessage{}, "ID")
|
||||||
|
|
||||||
// NewTaskMessage returns a new instance of TaskMessage given a task type and payload.
|
// NewTaskMessage returns a new instance of TaskMessage given a task type and payload.
|
||||||
func NewTaskMessage(taskType string, payload map[string]interface{}) *base.TaskMessage {
|
func NewTaskMessage(taskType string, payload []byte) *base.TaskMessage {
|
||||||
return &base.TaskMessage{
|
return NewTaskMessageWithQueue(taskType, payload, base.DefaultQueueName)
|
||||||
ID: xid.New(),
|
|
||||||
Type: taskType,
|
|
||||||
Queue: base.DefaultQueueName,
|
|
||||||
Retry: 25,
|
|
||||||
Payload: payload,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewTaskMessageWithQueue returns a new instance of TaskMessage given a
|
// NewTaskMessageWithQueue returns a new instance of TaskMessage given a
|
||||||
// task type, payload and queue name.
|
// task type, payload and queue name.
|
||||||
func NewTaskMessageWithQueue(taskType string, payload map[string]interface{}, qname string) *base.TaskMessage {
|
func NewTaskMessageWithQueue(taskType string, payload []byte, qname string) *base.TaskMessage {
|
||||||
return &base.TaskMessage{
|
return &base.TaskMessage{
|
||||||
ID: xid.New(),
|
ID: uuid.NewString(),
|
||||||
Type: taskType,
|
Type: taskType,
|
||||||
Queue: qname,
|
Queue: qname,
|
||||||
Retry: 25,
|
Retry: 25,
|
||||||
Payload: payload,
|
Payload: payload,
|
||||||
|
Timeout: 1800, // default timeout of 30 mins
|
||||||
|
Deadline: 0, // no deadline
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewLeaseWithClock returns a new lease with the given expiration time and clock.
|
||||||
|
func NewLeaseWithClock(expirationTime time.Time, clock timeutil.Clock) *base.Lease {
|
||||||
|
l := base.NewLease(expirationTime)
|
||||||
|
l.Clock = clock
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
// JSON serializes the given key-value pairs into stream of bytes in JSON.
|
||||||
|
func JSON(kv map[string]interface{}) []byte {
|
||||||
|
b, err := json.Marshal(kv)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
// TaskMessageAfterRetry returns an updated copy of t after retry.
|
||||||
|
// It increments retry count and sets the error message and last_failed_at time.
|
||||||
|
func TaskMessageAfterRetry(t base.TaskMessage, errMsg string, failedAt time.Time) *base.TaskMessage {
|
||||||
|
t.Retried = t.Retried + 1
|
||||||
|
t.ErrorMsg = errMsg
|
||||||
|
t.LastFailedAt = failedAt.Unix()
|
||||||
|
return &t
|
||||||
|
}
|
||||||
|
|
||||||
|
// TaskMessageWithError returns an updated copy of t with the given error message.
|
||||||
|
func TaskMessageWithError(t base.TaskMessage, errMsg string, failedAt time.Time) *base.TaskMessage {
|
||||||
|
t.ErrorMsg = errMsg
|
||||||
|
t.LastFailedAt = failedAt.Unix()
|
||||||
|
return &t
|
||||||
|
}
|
||||||
|
|
||||||
|
// TaskMessageWithCompletedAt returns an updated copy of t after completion.
|
||||||
|
func TaskMessageWithCompletedAt(t base.TaskMessage, completedAt time.Time) *base.TaskMessage {
|
||||||
|
t.CompletedAt = completedAt.Unix()
|
||||||
|
return &t
|
||||||
|
}
|
||||||
|
|
||||||
// MustMarshal marshals given task message and returns a json string.
|
// MustMarshal marshals given task message and returns a json string.
|
||||||
// Calling test will fail if marshaling errors out.
|
// Calling test will fail if marshaling errors out.
|
||||||
func MustMarshal(tb testing.TB, msg *base.TaskMessage) string {
|
func MustMarshal(tb testing.TB, msg *base.TaskMessage) string {
|
||||||
tb.Helper()
|
tb.Helper()
|
||||||
data, err := json.Marshal(msg)
|
data, err := base.EncodeMessage(msg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
tb.Fatal(err)
|
tb.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -110,171 +168,320 @@ func MustMarshal(tb testing.TB, msg *base.TaskMessage) string {
|
|||||||
// Calling test will fail if unmarshaling errors out.
|
// Calling test will fail if unmarshaling errors out.
|
||||||
func MustUnmarshal(tb testing.TB, data string) *base.TaskMessage {
|
func MustUnmarshal(tb testing.TB, data string) *base.TaskMessage {
|
||||||
tb.Helper()
|
tb.Helper()
|
||||||
var msg base.TaskMessage
|
msg, err := base.DecodeMessage([]byte(data))
|
||||||
err := json.Unmarshal([]byte(data), &msg)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
tb.Fatal(err)
|
tb.Fatal(err)
|
||||||
}
|
}
|
||||||
return &msg
|
return msg
|
||||||
}
|
}
|
||||||
|
|
||||||
// MustMarshalSlice marshals a slice of task messages and return a slice of
|
// FlushDB deletes all the keys of the currently selected DB.
|
||||||
// json strings. Calling test will fail if marshaling errors out.
|
func FlushDB(tb testing.TB, r redis.UniversalClient) {
|
||||||
func MustMarshalSlice(tb testing.TB, msgs []*base.TaskMessage) []string {
|
|
||||||
tb.Helper()
|
tb.Helper()
|
||||||
var data []string
|
switch r := r.(type) {
|
||||||
for _, m := range msgs {
|
case *redis.Client:
|
||||||
data = append(data, MustMarshal(tb, m))
|
if err := r.FlushDB(context.Background()).Err(); err != nil {
|
||||||
|
tb.Fatal(err)
|
||||||
|
}
|
||||||
|
case *redis.ClusterClient:
|
||||||
|
err := r.ForEachMaster(context.Background(), func(ctx context.Context, c *redis.Client) error {
|
||||||
|
if err := c.FlushAll(ctx).Err(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
tb.Fatal(err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return data
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// MustUnmarshalSlice unmarshals a slice of strings into a slice of task message structs.
|
// SeedPendingQueue initializes the specified queue with the given messages.
|
||||||
// Calling test will fail if marshaling errors out.
|
func SeedPendingQueue(tb testing.TB, r redis.UniversalClient, msgs []*base.TaskMessage, qname string) {
|
||||||
func MustUnmarshalSlice(tb testing.TB, data []string) []*base.TaskMessage {
|
|
||||||
tb.Helper()
|
tb.Helper()
|
||||||
|
r.SAdd(context.Background(), base.AllQueues, qname)
|
||||||
|
seedRedisList(tb, r, base.PendingKey(qname), msgs, base.TaskStatePending)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SeedActiveQueue initializes the active queue with the given messages.
|
||||||
|
func SeedActiveQueue(tb testing.TB, r redis.UniversalClient, msgs []*base.TaskMessage, qname string) {
|
||||||
|
tb.Helper()
|
||||||
|
r.SAdd(context.Background(), base.AllQueues, qname)
|
||||||
|
seedRedisList(tb, r, base.ActiveKey(qname), msgs, base.TaskStateActive)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SeedScheduledQueue initializes the scheduled queue with the given messages.
|
||||||
|
func SeedScheduledQueue(tb testing.TB, r redis.UniversalClient, entries []base.Z, qname string) {
|
||||||
|
tb.Helper()
|
||||||
|
r.SAdd(context.Background(), base.AllQueues, qname)
|
||||||
|
seedRedisZSet(tb, r, base.ScheduledKey(qname), entries, base.TaskStateScheduled)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SeedRetryQueue initializes the retry queue with the given messages.
|
||||||
|
func SeedRetryQueue(tb testing.TB, r redis.UniversalClient, entries []base.Z, qname string) {
|
||||||
|
tb.Helper()
|
||||||
|
r.SAdd(context.Background(), base.AllQueues, qname)
|
||||||
|
seedRedisZSet(tb, r, base.RetryKey(qname), entries, base.TaskStateRetry)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SeedArchivedQueue initializes the archived queue with the given messages.
|
||||||
|
func SeedArchivedQueue(tb testing.TB, r redis.UniversalClient, entries []base.Z, qname string) {
|
||||||
|
tb.Helper()
|
||||||
|
r.SAdd(context.Background(), base.AllQueues, qname)
|
||||||
|
seedRedisZSet(tb, r, base.ArchivedKey(qname), entries, base.TaskStateArchived)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SeedLease initializes the lease set with the given entries.
|
||||||
|
func SeedLease(tb testing.TB, r redis.UniversalClient, entries []base.Z, qname string) {
|
||||||
|
tb.Helper()
|
||||||
|
r.SAdd(context.Background(), base.AllQueues, qname)
|
||||||
|
seedRedisZSet(tb, r, base.LeaseKey(qname), entries, base.TaskStateActive)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SeedCompletedQueue initializes the completed set witht the given entries.
|
||||||
|
func SeedCompletedQueue(tb testing.TB, r redis.UniversalClient, entries []base.Z, qname string) {
|
||||||
|
tb.Helper()
|
||||||
|
r.SAdd(context.Background(), base.AllQueues, qname)
|
||||||
|
seedRedisZSet(tb, r, base.CompletedKey(qname), entries, base.TaskStateCompleted)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SeedAllPendingQueues initializes all of the specified queues with the given messages.
|
||||||
|
//
|
||||||
|
// pending maps a queue name to a list of messages.
|
||||||
|
func SeedAllPendingQueues(tb testing.TB, r redis.UniversalClient, pending map[string][]*base.TaskMessage) {
|
||||||
|
tb.Helper()
|
||||||
|
for q, msgs := range pending {
|
||||||
|
SeedPendingQueue(tb, r, msgs, q)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SeedAllActiveQueues initializes all of the specified active queues with the given messages.
|
||||||
|
func SeedAllActiveQueues(tb testing.TB, r redis.UniversalClient, active map[string][]*base.TaskMessage) {
|
||||||
|
tb.Helper()
|
||||||
|
for q, msgs := range active {
|
||||||
|
SeedActiveQueue(tb, r, msgs, q)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SeedAllScheduledQueues initializes all of the specified scheduled queues with the given entries.
|
||||||
|
func SeedAllScheduledQueues(tb testing.TB, r redis.UniversalClient, scheduled map[string][]base.Z) {
|
||||||
|
tb.Helper()
|
||||||
|
for q, entries := range scheduled {
|
||||||
|
SeedScheduledQueue(tb, r, entries, q)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SeedAllRetryQueues initializes all of the specified retry queues with the given entries.
|
||||||
|
func SeedAllRetryQueues(tb testing.TB, r redis.UniversalClient, retry map[string][]base.Z) {
|
||||||
|
tb.Helper()
|
||||||
|
for q, entries := range retry {
|
||||||
|
SeedRetryQueue(tb, r, entries, q)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SeedAllArchivedQueues initializes all of the specified archived queues with the given entries.
|
||||||
|
func SeedAllArchivedQueues(tb testing.TB, r redis.UniversalClient, archived map[string][]base.Z) {
|
||||||
|
tb.Helper()
|
||||||
|
for q, entries := range archived {
|
||||||
|
SeedArchivedQueue(tb, r, entries, q)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SeedAllLease initializes all of the lease sets with the given entries.
|
||||||
|
func SeedAllLease(tb testing.TB, r redis.UniversalClient, lease map[string][]base.Z) {
|
||||||
|
tb.Helper()
|
||||||
|
for q, entries := range lease {
|
||||||
|
SeedLease(tb, r, entries, q)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SeedAllCompletedQueues initializes all of the completed queues with the given entries.
|
||||||
|
func SeedAllCompletedQueues(tb testing.TB, r redis.UniversalClient, completed map[string][]base.Z) {
|
||||||
|
tb.Helper()
|
||||||
|
for q, entries := range completed {
|
||||||
|
SeedCompletedQueue(tb, r, entries, q)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func seedRedisList(tb testing.TB, c redis.UniversalClient, key string,
|
||||||
|
msgs []*base.TaskMessage, state base.TaskState) {
|
||||||
|
tb.Helper()
|
||||||
|
for _, msg := range msgs {
|
||||||
|
encoded := MustMarshal(tb, msg)
|
||||||
|
if err := c.LPush(context.Background(), key, msg.ID).Err(); err != nil {
|
||||||
|
tb.Fatal(err)
|
||||||
|
}
|
||||||
|
key := base.TaskKey(msg.Queue, msg.ID)
|
||||||
|
data := map[string]interface{}{
|
||||||
|
"msg": encoded,
|
||||||
|
"state": state.String(),
|
||||||
|
"unique_key": msg.UniqueKey,
|
||||||
|
}
|
||||||
|
if err := c.HSet(context.Background(), key, data).Err(); err != nil {
|
||||||
|
tb.Fatal(err)
|
||||||
|
}
|
||||||
|
if len(msg.UniqueKey) > 0 {
|
||||||
|
err := c.SetNX(context.Background(), msg.UniqueKey, msg.ID, 1*time.Minute).Err()
|
||||||
|
if err != nil {
|
||||||
|
tb.Fatalf("Failed to set unique lock in redis: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func seedRedisZSet(tb testing.TB, c redis.UniversalClient, key string,
|
||||||
|
items []base.Z, state base.TaskState) {
|
||||||
|
tb.Helper()
|
||||||
|
for _, item := range items {
|
||||||
|
msg := item.Message
|
||||||
|
encoded := MustMarshal(tb, msg)
|
||||||
|
z := &redis.Z{Member: msg.ID, Score: float64(item.Score)}
|
||||||
|
if err := c.ZAdd(context.Background(), key, z).Err(); err != nil {
|
||||||
|
tb.Fatal(err)
|
||||||
|
}
|
||||||
|
key := base.TaskKey(msg.Queue, msg.ID)
|
||||||
|
data := map[string]interface{}{
|
||||||
|
"msg": encoded,
|
||||||
|
"state": state.String(),
|
||||||
|
"unique_key": msg.UniqueKey,
|
||||||
|
}
|
||||||
|
if err := c.HSet(context.Background(), key, data).Err(); err != nil {
|
||||||
|
tb.Fatal(err)
|
||||||
|
}
|
||||||
|
if len(msg.UniqueKey) > 0 {
|
||||||
|
err := c.SetNX(context.Background(), msg.UniqueKey, msg.ID, 1*time.Minute).Err()
|
||||||
|
if err != nil {
|
||||||
|
tb.Fatalf("Failed to set unique lock in redis: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetPendingMessages returns all pending messages in the given queue.
|
||||||
|
// It also asserts the state field of the task.
|
||||||
|
func GetPendingMessages(tb testing.TB, r redis.UniversalClient, qname string) []*base.TaskMessage {
|
||||||
|
tb.Helper()
|
||||||
|
return getMessagesFromList(tb, r, qname, base.PendingKey, base.TaskStatePending)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetActiveMessages returns all active messages in the given queue.
|
||||||
|
// It also asserts the state field of the task.
|
||||||
|
func GetActiveMessages(tb testing.TB, r redis.UniversalClient, qname string) []*base.TaskMessage {
|
||||||
|
tb.Helper()
|
||||||
|
return getMessagesFromList(tb, r, qname, base.ActiveKey, base.TaskStateActive)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetScheduledMessages returns all scheduled task messages in the given queue.
|
||||||
|
// It also asserts the state field of the task.
|
||||||
|
func GetScheduledMessages(tb testing.TB, r redis.UniversalClient, qname string) []*base.TaskMessage {
|
||||||
|
tb.Helper()
|
||||||
|
return getMessagesFromZSet(tb, r, qname, base.ScheduledKey, base.TaskStateScheduled)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetRetryMessages returns all retry messages in the given queue.
|
||||||
|
// It also asserts the state field of the task.
|
||||||
|
func GetRetryMessages(tb testing.TB, r redis.UniversalClient, qname string) []*base.TaskMessage {
|
||||||
|
tb.Helper()
|
||||||
|
return getMessagesFromZSet(tb, r, qname, base.RetryKey, base.TaskStateRetry)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetArchivedMessages returns all archived messages in the given queue.
|
||||||
|
// It also asserts the state field of the task.
|
||||||
|
func GetArchivedMessages(tb testing.TB, r redis.UniversalClient, qname string) []*base.TaskMessage {
|
||||||
|
tb.Helper()
|
||||||
|
return getMessagesFromZSet(tb, r, qname, base.ArchivedKey, base.TaskStateArchived)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetCompletedMessages returns all completed task messages in the given queue.
|
||||||
|
// It also asserts the state field of the task.
|
||||||
|
func GetCompletedMessages(tb testing.TB, r redis.UniversalClient, qname string) []*base.TaskMessage {
|
||||||
|
tb.Helper()
|
||||||
|
return getMessagesFromZSet(tb, r, qname, base.CompletedKey, base.TaskStateCompleted)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetScheduledEntries returns all scheduled messages and its score in the given queue.
|
||||||
|
// It also asserts the state field of the task.
|
||||||
|
func GetScheduledEntries(tb testing.TB, r redis.UniversalClient, qname string) []base.Z {
|
||||||
|
tb.Helper()
|
||||||
|
return getMessagesFromZSetWithScores(tb, r, qname, base.ScheduledKey, base.TaskStateScheduled)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetRetryEntries returns all retry messages and its score in the given queue.
|
||||||
|
// It also asserts the state field of the task.
|
||||||
|
func GetRetryEntries(tb testing.TB, r redis.UniversalClient, qname string) []base.Z {
|
||||||
|
tb.Helper()
|
||||||
|
return getMessagesFromZSetWithScores(tb, r, qname, base.RetryKey, base.TaskStateRetry)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetArchivedEntries returns all archived messages and its score in the given queue.
|
||||||
|
// It also asserts the state field of the task.
|
||||||
|
func GetArchivedEntries(tb testing.TB, r redis.UniversalClient, qname string) []base.Z {
|
||||||
|
tb.Helper()
|
||||||
|
return getMessagesFromZSetWithScores(tb, r, qname, base.ArchivedKey, base.TaskStateArchived)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetLeaseEntries returns all task IDs and its score in the lease set for the given queue.
|
||||||
|
// It also asserts the state field of the task.
|
||||||
|
func GetLeaseEntries(tb testing.TB, r redis.UniversalClient, qname string) []base.Z {
|
||||||
|
tb.Helper()
|
||||||
|
return getMessagesFromZSetWithScores(tb, r, qname, base.LeaseKey, base.TaskStateActive)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetCompletedEntries returns all completed messages and its score in the given queue.
|
||||||
|
// It also asserts the state field of the task.
|
||||||
|
func GetCompletedEntries(tb testing.TB, r redis.UniversalClient, qname string) []base.Z {
|
||||||
|
tb.Helper()
|
||||||
|
return getMessagesFromZSetWithScores(tb, r, qname, base.CompletedKey, base.TaskStateCompleted)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Retrieves all messages stored under `keyFn(qname)` key in redis list.
|
||||||
|
func getMessagesFromList(tb testing.TB, r redis.UniversalClient, qname string,
|
||||||
|
keyFn func(qname string) string, state base.TaskState) []*base.TaskMessage {
|
||||||
|
tb.Helper()
|
||||||
|
ids := r.LRange(context.Background(), keyFn(qname), 0, -1).Val()
|
||||||
var msgs []*base.TaskMessage
|
var msgs []*base.TaskMessage
|
||||||
for _, s := range data {
|
for _, id := range ids {
|
||||||
msgs = append(msgs, MustUnmarshal(tb, s))
|
taskKey := base.TaskKey(qname, id)
|
||||||
|
data := r.HGet(context.Background(), taskKey, "msg").Val()
|
||||||
|
msgs = append(msgs, MustUnmarshal(tb, data))
|
||||||
|
if gotState := r.HGet(context.Background(), taskKey, "state").Val(); gotState != state.String() {
|
||||||
|
tb.Errorf("task (id=%q) is in %q state, want %v", id, gotState, state)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return msgs
|
return msgs
|
||||||
}
|
}
|
||||||
|
|
||||||
// FlushDB deletes all the keys of the currently selected DB.
|
// Retrieves all messages stored under `keyFn(qname)` key in redis zset (sorted-set).
|
||||||
func FlushDB(tb testing.TB, r *redis.Client) {
|
func getMessagesFromZSet(tb testing.TB, r redis.UniversalClient, qname string,
|
||||||
|
keyFn func(qname string) string, state base.TaskState) []*base.TaskMessage {
|
||||||
tb.Helper()
|
tb.Helper()
|
||||||
if err := r.FlushDB().Err(); err != nil {
|
ids := r.ZRange(context.Background(), keyFn(qname), 0, -1).Val()
|
||||||
tb.Fatal(err)
|
var msgs []*base.TaskMessage
|
||||||
|
for _, id := range ids {
|
||||||
|
taskKey := base.TaskKey(qname, id)
|
||||||
|
msg := r.HGet(context.Background(), taskKey, "msg").Val()
|
||||||
|
msgs = append(msgs, MustUnmarshal(tb, msg))
|
||||||
|
if gotState := r.HGet(context.Background(), taskKey, "state").Val(); gotState != state.String() {
|
||||||
|
tb.Errorf("task (id=%q) is in %q state, want %v", id, gotState, state)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return msgs
|
||||||
|
}
|
||||||
|
|
||||||
// SeedEnqueuedQueue initializes the specified queue with the given messages.
|
// Retrieves all messages along with their scores stored under `keyFn(qname)` key in redis zset (sorted-set).
|
||||||
//
|
func getMessagesFromZSetWithScores(tb testing.TB, r redis.UniversalClient,
|
||||||
// If queue name option is not passed, it defaults to the default queue.
|
qname string, keyFn func(qname string) string, state base.TaskState) []base.Z {
|
||||||
func SeedEnqueuedQueue(tb testing.TB, r *redis.Client, msgs []*base.TaskMessage, queueOpt ...string) {
|
|
||||||
tb.Helper()
|
tb.Helper()
|
||||||
queue := base.DefaultQueue
|
zs := r.ZRangeWithScores(context.Background(), keyFn(qname), 0, -1).Val()
|
||||||
if len(queueOpt) > 0 {
|
var res []base.Z
|
||||||
queue = base.QueueKey(queueOpt[0])
|
for _, z := range zs {
|
||||||
}
|
taskID := z.Member.(string)
|
||||||
r.SAdd(base.AllQueues, queue)
|
taskKey := base.TaskKey(qname, taskID)
|
||||||
seedRedisList(tb, r, queue, msgs)
|
msg := r.HGet(context.Background(), taskKey, "msg").Val()
|
||||||
}
|
res = append(res, base.Z{Message: MustUnmarshal(tb, msg), Score: int64(z.Score)})
|
||||||
|
if gotState := r.HGet(context.Background(), taskKey, "state").Val(); gotState != state.String() {
|
||||||
// SeedInProgressQueue initializes the in-progress queue with the given messages.
|
tb.Errorf("task (id=%q) is in %q state, want %v", taskID, gotState, state)
|
||||||
func SeedInProgressQueue(tb testing.TB, r *redis.Client, msgs []*base.TaskMessage) {
|
|
||||||
tb.Helper()
|
|
||||||
seedRedisList(tb, r, base.InProgressQueue, msgs)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SeedScheduledQueue initializes the scheduled queue with the given messages.
|
|
||||||
func SeedScheduledQueue(tb testing.TB, r *redis.Client, entries []ZSetEntry) {
|
|
||||||
tb.Helper()
|
|
||||||
seedRedisZSet(tb, r, base.ScheduledQueue, entries)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SeedRetryQueue initializes the retry queue with the given messages.
|
|
||||||
func SeedRetryQueue(tb testing.TB, r *redis.Client, entries []ZSetEntry) {
|
|
||||||
tb.Helper()
|
|
||||||
seedRedisZSet(tb, r, base.RetryQueue, entries)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SeedDeadQueue initializes the dead queue with the given messages.
|
|
||||||
func SeedDeadQueue(tb testing.TB, r *redis.Client, entries []ZSetEntry) {
|
|
||||||
tb.Helper()
|
|
||||||
seedRedisZSet(tb, r, base.DeadQueue, entries)
|
|
||||||
}
|
|
||||||
|
|
||||||
func seedRedisList(tb testing.TB, c *redis.Client, key string, msgs []*base.TaskMessage) {
|
|
||||||
data := MustMarshalSlice(tb, msgs)
|
|
||||||
for _, s := range data {
|
|
||||||
if err := c.LPush(key, s).Err(); err != nil {
|
|
||||||
tb.Fatal(err)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
return res
|
||||||
|
|
||||||
func seedRedisZSet(tb testing.TB, c *redis.Client, key string, items []ZSetEntry) {
|
|
||||||
for _, item := range items {
|
|
||||||
z := &redis.Z{Member: MustMarshal(tb, item.Msg), Score: float64(item.Score)}
|
|
||||||
if err := c.ZAdd(key, z).Err(); err != nil {
|
|
||||||
tb.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetEnqueuedMessages returns all task messages in the specified queue.
|
|
||||||
//
|
|
||||||
// If queue name option is not passed, it defaults to the default queue.
|
|
||||||
func GetEnqueuedMessages(tb testing.TB, r *redis.Client, queueOpt ...string) []*base.TaskMessage {
|
|
||||||
tb.Helper()
|
|
||||||
queue := base.DefaultQueue
|
|
||||||
if len(queueOpt) > 0 {
|
|
||||||
queue = base.QueueKey(queueOpt[0])
|
|
||||||
}
|
|
||||||
return getListMessages(tb, r, queue)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetInProgressMessages returns all task messages in the in-progress queue.
|
|
||||||
func GetInProgressMessages(tb testing.TB, r *redis.Client) []*base.TaskMessage {
|
|
||||||
tb.Helper()
|
|
||||||
return getListMessages(tb, r, base.InProgressQueue)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetScheduledMessages returns all task messages in the scheduled queue.
|
|
||||||
func GetScheduledMessages(tb testing.TB, r *redis.Client) []*base.TaskMessage {
|
|
||||||
tb.Helper()
|
|
||||||
return getZSetMessages(tb, r, base.ScheduledQueue)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetRetryMessages returns all task messages in the retry queue.
|
|
||||||
func GetRetryMessages(tb testing.TB, r *redis.Client) []*base.TaskMessage {
|
|
||||||
tb.Helper()
|
|
||||||
return getZSetMessages(tb, r, base.RetryQueue)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetDeadMessages returns all task messages in the dead queue.
|
|
||||||
func GetDeadMessages(tb testing.TB, r *redis.Client) []*base.TaskMessage {
|
|
||||||
tb.Helper()
|
|
||||||
return getZSetMessages(tb, r, base.DeadQueue)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetScheduledEntries returns all task messages and its score in the scheduled queue.
|
|
||||||
func GetScheduledEntries(tb testing.TB, r *redis.Client) []ZSetEntry {
|
|
||||||
tb.Helper()
|
|
||||||
return getZSetEntries(tb, r, base.ScheduledQueue)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetRetryEntries returns all task messages and its score in the retry queue.
|
|
||||||
func GetRetryEntries(tb testing.TB, r *redis.Client) []ZSetEntry {
|
|
||||||
tb.Helper()
|
|
||||||
return getZSetEntries(tb, r, base.RetryQueue)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetDeadEntries returns all task messages and its score in the dead queue.
|
|
||||||
func GetDeadEntries(tb testing.TB, r *redis.Client) []ZSetEntry {
|
|
||||||
tb.Helper()
|
|
||||||
return getZSetEntries(tb, r, base.DeadQueue)
|
|
||||||
}
|
|
||||||
|
|
||||||
func getListMessages(tb testing.TB, r *redis.Client, list string) []*base.TaskMessage {
|
|
||||||
data := r.LRange(list, 0, -1).Val()
|
|
||||||
return MustUnmarshalSlice(tb, data)
|
|
||||||
}
|
|
||||||
|
|
||||||
func getZSetMessages(tb testing.TB, r *redis.Client, zset string) []*base.TaskMessage {
|
|
||||||
data := r.ZRange(zset, 0, -1).Val()
|
|
||||||
return MustUnmarshalSlice(tb, data)
|
|
||||||
}
|
|
||||||
|
|
||||||
func getZSetEntries(tb testing.TB, r *redis.Client, zset string) []ZSetEntry {
|
|
||||||
data := r.ZRangeWithScores(zset, 0, -1).Val()
|
|
||||||
var entries []ZSetEntry
|
|
||||||
for _, z := range data {
|
|
||||||
entries = append(entries, ZSetEntry{
|
|
||||||
Msg: MustUnmarshal(tb, z.Member.(string)),
|
|
||||||
Score: z.Score,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return entries
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,59 +7,197 @@ package base
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"crypto/md5"
|
||||||
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-redis/redis/v7"
|
"github.com/go-redis/redis/v8"
|
||||||
"github.com/rs/xid"
|
"github.com/golang/protobuf/ptypes"
|
||||||
|
"github.com/hibiken/asynq/internal/errors"
|
||||||
|
pb "github.com/hibiken/asynq/internal/proto"
|
||||||
|
"github.com/hibiken/asynq/internal/timeutil"
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Version of asynq library and CLI.
|
||||||
|
const Version = "0.22.0"
|
||||||
|
|
||||||
// DefaultQueueName is the queue name used if none are specified by user.
|
// DefaultQueueName is the queue name used if none are specified by user.
|
||||||
const DefaultQueueName = "default"
|
const DefaultQueueName = "default"
|
||||||
|
|
||||||
// Redis keys
|
// DefaultQueue is the redis key for the default queue.
|
||||||
|
var DefaultQueue = PendingKey(DefaultQueueName)
|
||||||
|
|
||||||
|
// Global Redis keys.
|
||||||
const (
|
const (
|
||||||
AllServers = "asynq:servers" // ZSET
|
AllServers = "asynq:servers" // ZSET
|
||||||
serversPrefix = "asynq:servers:" // STRING - asynq:ps:<host>:<pid>:<serverid>
|
|
||||||
AllWorkers = "asynq:workers" // ZSET
|
AllWorkers = "asynq:workers" // ZSET
|
||||||
workersPrefix = "asynq:workers:" // HASH - asynq:workers:<host:<pid>:<serverid>
|
AllSchedulers = "asynq:schedulers" // ZSET
|
||||||
processedPrefix = "asynq:processed:" // STRING - asynq:processed:<yyyy-mm-dd>
|
|
||||||
failurePrefix = "asynq:failure:" // STRING - asynq:failure:<yyyy-mm-dd>
|
|
||||||
QueuePrefix = "asynq:queues:" // LIST - asynq:queues:<qname>
|
|
||||||
AllQueues = "asynq:queues" // SET
|
AllQueues = "asynq:queues" // SET
|
||||||
DefaultQueue = QueuePrefix + DefaultQueueName // LIST
|
|
||||||
ScheduledQueue = "asynq:scheduled" // ZSET
|
|
||||||
RetryQueue = "asynq:retry" // ZSET
|
|
||||||
DeadQueue = "asynq:dead" // ZSET
|
|
||||||
InProgressQueue = "asynq:in_progress" // LIST
|
|
||||||
CancelChannel = "asynq:cancel" // PubSub channel
|
CancelChannel = "asynq:cancel" // PubSub channel
|
||||||
)
|
)
|
||||||
|
|
||||||
// QueueKey returns a redis key for the given queue name.
|
// TaskState denotes the state of a task.
|
||||||
func QueueKey(qname string) string {
|
type TaskState int
|
||||||
return QueuePrefix + strings.ToLower(qname)
|
|
||||||
|
const (
|
||||||
|
TaskStateActive TaskState = iota + 1
|
||||||
|
TaskStatePending
|
||||||
|
TaskStateScheduled
|
||||||
|
TaskStateRetry
|
||||||
|
TaskStateArchived
|
||||||
|
TaskStateCompleted
|
||||||
|
)
|
||||||
|
|
||||||
|
func (s TaskState) String() string {
|
||||||
|
switch s {
|
||||||
|
case TaskStateActive:
|
||||||
|
return "active"
|
||||||
|
case TaskStatePending:
|
||||||
|
return "pending"
|
||||||
|
case TaskStateScheduled:
|
||||||
|
return "scheduled"
|
||||||
|
case TaskStateRetry:
|
||||||
|
return "retry"
|
||||||
|
case TaskStateArchived:
|
||||||
|
return "archived"
|
||||||
|
case TaskStateCompleted:
|
||||||
|
return "completed"
|
||||||
|
}
|
||||||
|
panic(fmt.Sprintf("internal error: unknown task state %d", s))
|
||||||
}
|
}
|
||||||
|
|
||||||
// ProcessedKey returns a redis key for processed count for the given day.
|
func TaskStateFromString(s string) (TaskState, error) {
|
||||||
func ProcessedKey(t time.Time) string {
|
switch s {
|
||||||
return processedPrefix + t.UTC().Format("2006-01-02")
|
case "active":
|
||||||
|
return TaskStateActive, nil
|
||||||
|
case "pending":
|
||||||
|
return TaskStatePending, nil
|
||||||
|
case "scheduled":
|
||||||
|
return TaskStateScheduled, nil
|
||||||
|
case "retry":
|
||||||
|
return TaskStateRetry, nil
|
||||||
|
case "archived":
|
||||||
|
return TaskStateArchived, nil
|
||||||
|
case "completed":
|
||||||
|
return TaskStateCompleted, nil
|
||||||
|
}
|
||||||
|
return 0, errors.E(errors.FailedPrecondition, fmt.Sprintf("%q is not supported task state", s))
|
||||||
}
|
}
|
||||||
|
|
||||||
// FailureKey returns a redis key for failure count for the given day.
|
// ValidateQueueName validates a given qname to be used as a queue name.
|
||||||
func FailureKey(t time.Time) string {
|
// Returns nil if valid, otherwise returns non-nil error.
|
||||||
return failurePrefix + t.UTC().Format("2006-01-02")
|
func ValidateQueueName(qname string) error {
|
||||||
|
if len(strings.TrimSpace(qname)) == 0 {
|
||||||
|
return fmt.Errorf("queue name must contain one or more characters")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueueKeyPrefix returns a prefix for all keys in the given queue.
|
||||||
|
func QueueKeyPrefix(qname string) string {
|
||||||
|
return fmt.Sprintf("asynq:{%s}:", qname)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TaskKeyPrefix returns a prefix for task key.
|
||||||
|
func TaskKeyPrefix(qname string) string {
|
||||||
|
return fmt.Sprintf("%st:", QueueKeyPrefix(qname))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TaskKey returns a redis key for the given task message.
|
||||||
|
func TaskKey(qname, id string) string {
|
||||||
|
return fmt.Sprintf("%s%s", TaskKeyPrefix(qname), id)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PendingKey returns a redis key for the given queue name.
|
||||||
|
func PendingKey(qname string) string {
|
||||||
|
return fmt.Sprintf("%spending", QueueKeyPrefix(qname))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ActiveKey returns a redis key for the active tasks.
|
||||||
|
func ActiveKey(qname string) string {
|
||||||
|
return fmt.Sprintf("%sactive", QueueKeyPrefix(qname))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ScheduledKey returns a redis key for the scheduled tasks.
|
||||||
|
func ScheduledKey(qname string) string {
|
||||||
|
return fmt.Sprintf("%sscheduled", QueueKeyPrefix(qname))
|
||||||
|
}
|
||||||
|
|
||||||
|
// RetryKey returns a redis key for the retry tasks.
|
||||||
|
func RetryKey(qname string) string {
|
||||||
|
return fmt.Sprintf("%sretry", QueueKeyPrefix(qname))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ArchivedKey returns a redis key for the archived tasks.
|
||||||
|
func ArchivedKey(qname string) string {
|
||||||
|
return fmt.Sprintf("%sarchived", QueueKeyPrefix(qname))
|
||||||
|
}
|
||||||
|
|
||||||
|
// LeaseKey returns a redis key for the lease.
|
||||||
|
func LeaseKey(qname string) string {
|
||||||
|
return fmt.Sprintf("%slease", QueueKeyPrefix(qname))
|
||||||
|
}
|
||||||
|
|
||||||
|
func CompletedKey(qname string) string {
|
||||||
|
return fmt.Sprintf("%scompleted", QueueKeyPrefix(qname))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PausedKey returns a redis key to indicate that the given queue is paused.
|
||||||
|
func PausedKey(qname string) string {
|
||||||
|
return fmt.Sprintf("%spaused", QueueKeyPrefix(qname))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProcessedTotalKey returns a redis key for total processed count for the given queue.
|
||||||
|
func ProcessedTotalKey(qname string) string {
|
||||||
|
return fmt.Sprintf("%sprocessed", QueueKeyPrefix(qname))
|
||||||
|
}
|
||||||
|
|
||||||
|
// FailedTotalKey returns a redis key for total failure count for the given queue.
|
||||||
|
func FailedTotalKey(qname string) string {
|
||||||
|
return fmt.Sprintf("%sfailed", QueueKeyPrefix(qname))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProcessedKey returns a redis key for processed count for the given day for the queue.
|
||||||
|
func ProcessedKey(qname string, t time.Time) string {
|
||||||
|
return fmt.Sprintf("%sprocessed:%s", QueueKeyPrefix(qname), t.UTC().Format("2006-01-02"))
|
||||||
|
}
|
||||||
|
|
||||||
|
// FailedKey returns a redis key for failure count for the given day for the queue.
|
||||||
|
func FailedKey(qname string, t time.Time) string {
|
||||||
|
return fmt.Sprintf("%sfailed:%s", QueueKeyPrefix(qname), t.UTC().Format("2006-01-02"))
|
||||||
}
|
}
|
||||||
|
|
||||||
// ServerInfoKey returns a redis key for process info.
|
// ServerInfoKey returns a redis key for process info.
|
||||||
func ServerInfoKey(hostname string, pid int, sid string) string {
|
func ServerInfoKey(hostname string, pid int, serverID string) string {
|
||||||
return fmt.Sprintf("%s%s:%d:%s", serversPrefix, hostname, pid, sid)
|
return fmt.Sprintf("asynq:servers:{%s:%d:%s}", hostname, pid, serverID)
|
||||||
}
|
}
|
||||||
|
|
||||||
// WorkersKey returns a redis key for the workers given hostname, pid, and server ID.
|
// WorkersKey returns a redis key for the workers given hostname, pid, and server ID.
|
||||||
func WorkersKey(hostname string, pid int, sid string) string {
|
func WorkersKey(hostname string, pid int, serverID string) string {
|
||||||
return fmt.Sprintf("%s%s:%d:%s", workersPrefix, hostname, pid, sid)
|
return fmt.Sprintf("asynq:workers:{%s:%d:%s}", hostname, pid, serverID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SchedulerEntriesKey returns a redis key for the scheduler entries given scheduler ID.
|
||||||
|
func SchedulerEntriesKey(schedulerID string) string {
|
||||||
|
return fmt.Sprintf("asynq:schedulers:{%s}", schedulerID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SchedulerHistoryKey returns a redis key for the scheduler's history for the given entry.
|
||||||
|
func SchedulerHistoryKey(entryID string) string {
|
||||||
|
return fmt.Sprintf("asynq:scheduler_history:%s", entryID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UniqueKey returns a redis key with the given type, payload, and queue name.
|
||||||
|
func UniqueKey(qname, tasktype string, payload []byte) string {
|
||||||
|
if payload == nil {
|
||||||
|
return fmt.Sprintf("%sunique:%s:", QueueKeyPrefix(qname), tasktype)
|
||||||
|
}
|
||||||
|
checksum := md5.Sum(payload)
|
||||||
|
return fmt.Sprintf("%sunique:%s:%s", QueueKeyPrefix(qname), tasktype, hex.EncodeToString(checksum[:]))
|
||||||
}
|
}
|
||||||
|
|
||||||
// TaskMessage is the internal representation of a task with additional metadata fields.
|
// TaskMessage is the internal representation of a task with additional metadata fields.
|
||||||
@@ -69,10 +207,10 @@ type TaskMessage struct {
|
|||||||
Type string
|
Type string
|
||||||
|
|
||||||
// Payload holds data needed to process the task.
|
// Payload holds data needed to process the task.
|
||||||
Payload map[string]interface{}
|
Payload []byte
|
||||||
|
|
||||||
// ID is a unique identifier for each task.
|
// ID is a unique identifier for each task.
|
||||||
ID xid.ID
|
ID string
|
||||||
|
|
||||||
// Queue is a name this message should be enqueued to.
|
// Queue is a name this message should be enqueued to.
|
||||||
Queue string
|
Queue string
|
||||||
@@ -86,176 +224,99 @@ type TaskMessage struct {
|
|||||||
// ErrorMsg holds the error message from the last failure.
|
// ErrorMsg holds the error message from the last failure.
|
||||||
ErrorMsg string
|
ErrorMsg string
|
||||||
|
|
||||||
// Timeout specifies how long a task may run.
|
// Time of last failure in Unix time,
|
||||||
// The string value should be compatible with time.Duration.ParseDuration.
|
// the number of seconds elapsed since January 1, 1970 UTC.
|
||||||
//
|
//
|
||||||
// Zero means no limit.
|
// Use zero to indicate no last failure
|
||||||
Timeout string
|
LastFailedAt int64
|
||||||
|
|
||||||
// Deadline specifies the deadline for the task.
|
// Timeout specifies timeout in seconds.
|
||||||
// Task won't be processed if it exceeded its deadline.
|
// If task processing doesn't complete within the timeout, the task will be retried
|
||||||
// The string shoulbe be in RFC3339 format.
|
// if retry count is remaining. Otherwise it will be moved to the archive.
|
||||||
//
|
//
|
||||||
// time.Time's zero value means no deadline.
|
// Use zero to indicate no timeout.
|
||||||
Deadline string
|
Timeout int64
|
||||||
|
|
||||||
|
// Deadline specifies the deadline for the task in Unix time,
|
||||||
|
// the number of seconds elapsed since January 1, 1970 UTC.
|
||||||
|
// If task processing doesn't complete before the deadline, the task will be retried
|
||||||
|
// if retry count is remaining. Otherwise it will be moved to the archive.
|
||||||
|
//
|
||||||
|
// Use zero to indicate no deadline.
|
||||||
|
Deadline int64
|
||||||
|
|
||||||
// UniqueKey holds the redis key used for uniqueness lock for this task.
|
// UniqueKey holds the redis key used for uniqueness lock for this task.
|
||||||
//
|
//
|
||||||
// Empty string indicates that no uniqueness lock was used.
|
// Empty string indicates that no uniqueness lock was used.
|
||||||
UniqueKey string
|
UniqueKey string
|
||||||
}
|
|
||||||
|
|
||||||
// ServerState holds process level information.
|
// Retention specifies the number of seconds the task should be retained after completion.
|
||||||
|
Retention int64
|
||||||
|
|
||||||
|
// CompletedAt is the time the task was processed successfully in Unix time,
|
||||||
|
// the number of seconds elapsed since January 1, 1970 UTC.
|
||||||
//
|
//
|
||||||
// ServerStates are safe for concurrent use by multiple goroutines.
|
// Use zero to indicate no value.
|
||||||
type ServerState struct {
|
CompletedAt int64
|
||||||
mu sync.Mutex // guards all data fields
|
|
||||||
id xid.ID
|
|
||||||
concurrency int
|
|
||||||
queues map[string]int
|
|
||||||
strictPriority bool
|
|
||||||
pid int
|
|
||||||
host string
|
|
||||||
status ServerStatus
|
|
||||||
started time.Time
|
|
||||||
workers map[string]*workerStats
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ServerStatus represents status of a server.
|
// EncodeMessage marshals the given task message and returns an encoded bytes.
|
||||||
type ServerStatus int
|
func EncodeMessage(msg *TaskMessage) ([]byte, error) {
|
||||||
|
if msg == nil {
|
||||||
const (
|
return nil, fmt.Errorf("cannot encode nil message")
|
||||||
// StatusIdle indicates the server is in idle state.
|
|
||||||
StatusIdle ServerStatus = iota
|
|
||||||
|
|
||||||
// StatusRunning indicates the servier is up and processing tasks.
|
|
||||||
StatusRunning
|
|
||||||
|
|
||||||
// StatusQuiet indicates the server is up but not processing new tasks.
|
|
||||||
StatusQuiet
|
|
||||||
|
|
||||||
// StatusStopped indicates the server server has been stopped.
|
|
||||||
StatusStopped
|
|
||||||
)
|
|
||||||
|
|
||||||
var statuses = []string{
|
|
||||||
"idle",
|
|
||||||
"running",
|
|
||||||
"quiet",
|
|
||||||
"stopped",
|
|
||||||
}
|
}
|
||||||
|
return proto.Marshal(&pb.TaskMessage{
|
||||||
func (s ServerStatus) String() string {
|
Type: msg.Type,
|
||||||
if StatusIdle <= s && s <= StatusStopped {
|
Payload: msg.Payload,
|
||||||
return statuses[s]
|
Id: msg.ID,
|
||||||
}
|
Queue: msg.Queue,
|
||||||
return "unknown status"
|
Retry: int32(msg.Retry),
|
||||||
}
|
Retried: int32(msg.Retried),
|
||||||
|
ErrorMsg: msg.ErrorMsg,
|
||||||
type workerStats struct {
|
LastFailedAt: msg.LastFailedAt,
|
||||||
msg *TaskMessage
|
Timeout: msg.Timeout,
|
||||||
started time.Time
|
Deadline: msg.Deadline,
|
||||||
}
|
UniqueKey: msg.UniqueKey,
|
||||||
|
Retention: msg.Retention,
|
||||||
// NewServerState returns a new instance of ServerState.
|
CompletedAt: msg.CompletedAt,
|
||||||
func NewServerState(host string, pid, concurrency int, queues map[string]int, strict bool) *ServerState {
|
|
||||||
return &ServerState{
|
|
||||||
host: host,
|
|
||||||
pid: pid,
|
|
||||||
id: xid.New(),
|
|
||||||
concurrency: concurrency,
|
|
||||||
queues: cloneQueueConfig(queues),
|
|
||||||
strictPriority: strict,
|
|
||||||
status: StatusIdle,
|
|
||||||
workers: make(map[string]*workerStats),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetStatus updates the status of server.
|
|
||||||
func (ss *ServerState) SetStatus(status ServerStatus) {
|
|
||||||
ss.mu.Lock()
|
|
||||||
defer ss.mu.Unlock()
|
|
||||||
ss.status = status
|
|
||||||
}
|
|
||||||
|
|
||||||
// Status returns the status of server.
|
|
||||||
func (ss *ServerState) Status() ServerStatus {
|
|
||||||
ss.mu.Lock()
|
|
||||||
defer ss.mu.Unlock()
|
|
||||||
return ss.status
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetStarted records when the process started processing.
|
|
||||||
func (ss *ServerState) SetStarted(t time.Time) {
|
|
||||||
ss.mu.Lock()
|
|
||||||
defer ss.mu.Unlock()
|
|
||||||
ss.started = t
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddWorkerStats records when a worker started and which task it's processing.
|
|
||||||
func (ss *ServerState) AddWorkerStats(msg *TaskMessage, started time.Time) {
|
|
||||||
ss.mu.Lock()
|
|
||||||
defer ss.mu.Unlock()
|
|
||||||
ss.workers[msg.ID.String()] = &workerStats{msg, started}
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteWorkerStats removes a worker's entry from the process state.
|
|
||||||
func (ss *ServerState) DeleteWorkerStats(msg *TaskMessage) {
|
|
||||||
ss.mu.Lock()
|
|
||||||
defer ss.mu.Unlock()
|
|
||||||
delete(ss.workers, msg.ID.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetInfo returns current state of server as a ServerInfo.
|
|
||||||
func (ss *ServerState) GetInfo() *ServerInfo {
|
|
||||||
ss.mu.Lock()
|
|
||||||
defer ss.mu.Unlock()
|
|
||||||
return &ServerInfo{
|
|
||||||
Host: ss.host,
|
|
||||||
PID: ss.pid,
|
|
||||||
ServerID: ss.id.String(),
|
|
||||||
Concurrency: ss.concurrency,
|
|
||||||
Queues: cloneQueueConfig(ss.queues),
|
|
||||||
StrictPriority: ss.strictPriority,
|
|
||||||
Status: ss.status.String(),
|
|
||||||
Started: ss.started,
|
|
||||||
ActiveWorkerCount: len(ss.workers),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetWorkers returns a list of currently running workers' info.
|
|
||||||
func (ss *ServerState) GetWorkers() []*WorkerInfo {
|
|
||||||
ss.mu.Lock()
|
|
||||||
defer ss.mu.Unlock()
|
|
||||||
var res []*WorkerInfo
|
|
||||||
for _, w := range ss.workers {
|
|
||||||
res = append(res, &WorkerInfo{
|
|
||||||
Host: ss.host,
|
|
||||||
PID: ss.pid,
|
|
||||||
ID: w.msg.ID,
|
|
||||||
Type: w.msg.Type,
|
|
||||||
Queue: w.msg.Queue,
|
|
||||||
Payload: clonePayload(w.msg.Payload),
|
|
||||||
Started: w.started,
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
return res
|
|
||||||
|
// DecodeMessage unmarshals the given bytes and returns a decoded task message.
|
||||||
|
func DecodeMessage(data []byte) (*TaskMessage, error) {
|
||||||
|
var pbmsg pb.TaskMessage
|
||||||
|
if err := proto.Unmarshal(data, &pbmsg); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &TaskMessage{
|
||||||
|
Type: pbmsg.GetType(),
|
||||||
|
Payload: pbmsg.GetPayload(),
|
||||||
|
ID: pbmsg.GetId(),
|
||||||
|
Queue: pbmsg.GetQueue(),
|
||||||
|
Retry: int(pbmsg.GetRetry()),
|
||||||
|
Retried: int(pbmsg.GetRetried()),
|
||||||
|
ErrorMsg: pbmsg.GetErrorMsg(),
|
||||||
|
LastFailedAt: pbmsg.GetLastFailedAt(),
|
||||||
|
Timeout: pbmsg.GetTimeout(),
|
||||||
|
Deadline: pbmsg.GetDeadline(),
|
||||||
|
UniqueKey: pbmsg.GetUniqueKey(),
|
||||||
|
Retention: pbmsg.GetRetention(),
|
||||||
|
CompletedAt: pbmsg.GetCompletedAt(),
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func cloneQueueConfig(qcfg map[string]int) map[string]int {
|
// TaskInfo describes a task message and its metadata.
|
||||||
res := make(map[string]int)
|
type TaskInfo struct {
|
||||||
for qname, n := range qcfg {
|
Message *TaskMessage
|
||||||
res[qname] = n
|
State TaskState
|
||||||
}
|
NextProcessAt time.Time
|
||||||
return res
|
Result []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
func clonePayload(payload map[string]interface{}) map[string]interface{} {
|
// Z represents sorted set member.
|
||||||
res := make(map[string]interface{})
|
type Z struct {
|
||||||
for k, v := range payload {
|
Message *TaskMessage
|
||||||
res[k] = v
|
Score int64
|
||||||
}
|
|
||||||
return res
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ServerInfo holds information about a running server.
|
// ServerInfo holds information about a running server.
|
||||||
@@ -271,18 +332,242 @@ type ServerInfo struct {
|
|||||||
ActiveWorkerCount int
|
ActiveWorkerCount int
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// EncodeServerInfo marshals the given ServerInfo and returns the encoded bytes.
|
||||||
|
func EncodeServerInfo(info *ServerInfo) ([]byte, error) {
|
||||||
|
if info == nil {
|
||||||
|
return nil, fmt.Errorf("cannot encode nil server info")
|
||||||
|
}
|
||||||
|
queues := make(map[string]int32)
|
||||||
|
for q, p := range info.Queues {
|
||||||
|
queues[q] = int32(p)
|
||||||
|
}
|
||||||
|
started, err := ptypes.TimestampProto(info.Started)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return proto.Marshal(&pb.ServerInfo{
|
||||||
|
Host: info.Host,
|
||||||
|
Pid: int32(info.PID),
|
||||||
|
ServerId: info.ServerID,
|
||||||
|
Concurrency: int32(info.Concurrency),
|
||||||
|
Queues: queues,
|
||||||
|
StrictPriority: info.StrictPriority,
|
||||||
|
Status: info.Status,
|
||||||
|
StartTime: started,
|
||||||
|
ActiveWorkerCount: int32(info.ActiveWorkerCount),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeServerInfo decodes the given bytes into ServerInfo.
|
||||||
|
func DecodeServerInfo(b []byte) (*ServerInfo, error) {
|
||||||
|
var pbmsg pb.ServerInfo
|
||||||
|
if err := proto.Unmarshal(b, &pbmsg); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
queues := make(map[string]int)
|
||||||
|
for q, p := range pbmsg.GetQueues() {
|
||||||
|
queues[q] = int(p)
|
||||||
|
}
|
||||||
|
startTime, err := ptypes.Timestamp(pbmsg.GetStartTime())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &ServerInfo{
|
||||||
|
Host: pbmsg.GetHost(),
|
||||||
|
PID: int(pbmsg.GetPid()),
|
||||||
|
ServerID: pbmsg.GetServerId(),
|
||||||
|
Concurrency: int(pbmsg.GetConcurrency()),
|
||||||
|
Queues: queues,
|
||||||
|
StrictPriority: pbmsg.GetStrictPriority(),
|
||||||
|
Status: pbmsg.GetStatus(),
|
||||||
|
Started: startTime,
|
||||||
|
ActiveWorkerCount: int(pbmsg.GetActiveWorkerCount()),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
// WorkerInfo holds information about a running worker.
|
// WorkerInfo holds information about a running worker.
|
||||||
type WorkerInfo struct {
|
type WorkerInfo struct {
|
||||||
Host string
|
Host string
|
||||||
PID int
|
PID int
|
||||||
ID xid.ID
|
ServerID string
|
||||||
|
ID string
|
||||||
Type string
|
Type string
|
||||||
|
Payload []byte
|
||||||
Queue string
|
Queue string
|
||||||
Payload map[string]interface{}
|
|
||||||
Started time.Time
|
Started time.Time
|
||||||
|
Deadline time.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cancelations is a collection that holds cancel functions for all in-progress tasks.
|
// EncodeWorkerInfo marshals the given WorkerInfo and returns the encoded bytes.
|
||||||
|
func EncodeWorkerInfo(info *WorkerInfo) ([]byte, error) {
|
||||||
|
if info == nil {
|
||||||
|
return nil, fmt.Errorf("cannot encode nil worker info")
|
||||||
|
}
|
||||||
|
startTime, err := ptypes.TimestampProto(info.Started)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
deadline, err := ptypes.TimestampProto(info.Deadline)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return proto.Marshal(&pb.WorkerInfo{
|
||||||
|
Host: info.Host,
|
||||||
|
Pid: int32(info.PID),
|
||||||
|
ServerId: info.ServerID,
|
||||||
|
TaskId: info.ID,
|
||||||
|
TaskType: info.Type,
|
||||||
|
TaskPayload: info.Payload,
|
||||||
|
Queue: info.Queue,
|
||||||
|
StartTime: startTime,
|
||||||
|
Deadline: deadline,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeWorkerInfo decodes the given bytes into WorkerInfo.
|
||||||
|
func DecodeWorkerInfo(b []byte) (*WorkerInfo, error) {
|
||||||
|
var pbmsg pb.WorkerInfo
|
||||||
|
if err := proto.Unmarshal(b, &pbmsg); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
startTime, err := ptypes.Timestamp(pbmsg.GetStartTime())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
deadline, err := ptypes.Timestamp(pbmsg.GetDeadline())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &WorkerInfo{
|
||||||
|
Host: pbmsg.GetHost(),
|
||||||
|
PID: int(pbmsg.GetPid()),
|
||||||
|
ServerID: pbmsg.GetServerId(),
|
||||||
|
ID: pbmsg.GetTaskId(),
|
||||||
|
Type: pbmsg.GetTaskType(),
|
||||||
|
Payload: pbmsg.GetTaskPayload(),
|
||||||
|
Queue: pbmsg.GetQueue(),
|
||||||
|
Started: startTime,
|
||||||
|
Deadline: deadline,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SchedulerEntry holds information about a periodic task registered with a scheduler.
|
||||||
|
type SchedulerEntry struct {
|
||||||
|
// Identifier of this entry.
|
||||||
|
ID string
|
||||||
|
|
||||||
|
// Spec describes the schedule of this entry.
|
||||||
|
Spec string
|
||||||
|
|
||||||
|
// Type is the task type of the periodic task.
|
||||||
|
Type string
|
||||||
|
|
||||||
|
// Payload is the payload of the periodic task.
|
||||||
|
Payload []byte
|
||||||
|
|
||||||
|
// Opts is the options for the periodic task.
|
||||||
|
Opts []string
|
||||||
|
|
||||||
|
// Next shows the next time the task will be enqueued.
|
||||||
|
Next time.Time
|
||||||
|
|
||||||
|
// Prev shows the last time the task was enqueued.
|
||||||
|
// Zero time if task was never enqueued.
|
||||||
|
Prev time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// EncodeSchedulerEntry marshals the given entry and returns an encoded bytes.
|
||||||
|
func EncodeSchedulerEntry(entry *SchedulerEntry) ([]byte, error) {
|
||||||
|
if entry == nil {
|
||||||
|
return nil, fmt.Errorf("cannot encode nil scheduler entry")
|
||||||
|
}
|
||||||
|
next, err := ptypes.TimestampProto(entry.Next)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
prev, err := ptypes.TimestampProto(entry.Prev)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return proto.Marshal(&pb.SchedulerEntry{
|
||||||
|
Id: entry.ID,
|
||||||
|
Spec: entry.Spec,
|
||||||
|
TaskType: entry.Type,
|
||||||
|
TaskPayload: entry.Payload,
|
||||||
|
EnqueueOptions: entry.Opts,
|
||||||
|
NextEnqueueTime: next,
|
||||||
|
PrevEnqueueTime: prev,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeSchedulerEntry unmarshals the given bytes and returns a decoded SchedulerEntry.
|
||||||
|
func DecodeSchedulerEntry(b []byte) (*SchedulerEntry, error) {
|
||||||
|
var pbmsg pb.SchedulerEntry
|
||||||
|
if err := proto.Unmarshal(b, &pbmsg); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
next, err := ptypes.Timestamp(pbmsg.GetNextEnqueueTime())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
prev, err := ptypes.Timestamp(pbmsg.GetPrevEnqueueTime())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &SchedulerEntry{
|
||||||
|
ID: pbmsg.GetId(),
|
||||||
|
Spec: pbmsg.GetSpec(),
|
||||||
|
Type: pbmsg.GetTaskType(),
|
||||||
|
Payload: pbmsg.GetTaskPayload(),
|
||||||
|
Opts: pbmsg.GetEnqueueOptions(),
|
||||||
|
Next: next,
|
||||||
|
Prev: prev,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SchedulerEnqueueEvent holds information about an enqueue event by a scheduler.
|
||||||
|
type SchedulerEnqueueEvent struct {
|
||||||
|
// ID of the task that was enqueued.
|
||||||
|
TaskID string
|
||||||
|
|
||||||
|
// Time the task was enqueued.
|
||||||
|
EnqueuedAt time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// EncodeSchedulerEnqueueEvent marshals the given event
|
||||||
|
// and returns an encoded bytes.
|
||||||
|
func EncodeSchedulerEnqueueEvent(event *SchedulerEnqueueEvent) ([]byte, error) {
|
||||||
|
if event == nil {
|
||||||
|
return nil, fmt.Errorf("cannot encode nil enqueue event")
|
||||||
|
}
|
||||||
|
enqueuedAt, err := ptypes.TimestampProto(event.EnqueuedAt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return proto.Marshal(&pb.SchedulerEnqueueEvent{
|
||||||
|
TaskId: event.TaskID,
|
||||||
|
EnqueueTime: enqueuedAt,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeSchedulerEnqueueEvent unmarshals the given bytes
|
||||||
|
// and returns a decoded SchedulerEnqueueEvent.
|
||||||
|
func DecodeSchedulerEnqueueEvent(b []byte) (*SchedulerEnqueueEvent, error) {
|
||||||
|
var pbmsg pb.SchedulerEnqueueEvent
|
||||||
|
if err := proto.Unmarshal(b, &pbmsg); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
enqueuedAt, err := ptypes.Timestamp(pbmsg.GetEnqueueTime())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &SchedulerEnqueueEvent{
|
||||||
|
TaskID: pbmsg.GetTaskId(),
|
||||||
|
EnqueuedAt: enqueuedAt,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cancelations is a collection that holds cancel functions for all active tasks.
|
||||||
//
|
//
|
||||||
// Cancelations are safe for concurrent use by multipel goroutines.
|
// Cancelations are safe for concurrent use by multipel goroutines.
|
||||||
type Cancelations struct {
|
type Cancelations struct {
|
||||||
@@ -319,35 +604,96 @@ func (c *Cancelations) Get(id string) (fn context.CancelFunc, ok bool) {
|
|||||||
return fn, ok
|
return fn, ok
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetAll returns all cancel funcs.
|
// Lease is a time bound lease for worker to process task.
|
||||||
func (c *Cancelations) GetAll() []context.CancelFunc {
|
// It provides a communication channel between lessor and lessee about lease expiration.
|
||||||
c.mu.Lock()
|
type Lease struct {
|
||||||
defer c.mu.Unlock()
|
once sync.Once
|
||||||
var res []context.CancelFunc
|
ch chan struct{}
|
||||||
for _, fn := range c.cancelFuncs {
|
|
||||||
res = append(res, fn)
|
Clock timeutil.Clock
|
||||||
|
|
||||||
|
mu sync.Mutex
|
||||||
|
expireAt time.Time // guarded by mu
|
||||||
}
|
}
|
||||||
return res
|
|
||||||
|
func NewLease(expirationTime time.Time) *Lease {
|
||||||
|
return &Lease{
|
||||||
|
ch: make(chan struct{}),
|
||||||
|
expireAt: expirationTime,
|
||||||
|
Clock: timeutil.NewRealClock(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset chanegs the lease to expire at the given time.
|
||||||
|
// It returns true if the lease is still valid and reset operation was successful, false if the lease had been expired.
|
||||||
|
func (l *Lease) Reset(expirationTime time.Time) bool {
|
||||||
|
if !l.IsValid() {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
l.mu.Lock()
|
||||||
|
defer l.mu.Unlock()
|
||||||
|
l.expireAt = expirationTime
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sends a notification to lessee about expired lease
|
||||||
|
// Returns true if notification was sent, returns false if the lease is still valid and notification was not sent.
|
||||||
|
func (l *Lease) NotifyExpiration() bool {
|
||||||
|
if l.IsValid() {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
l.once.Do(l.closeCh)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *Lease) closeCh() {
|
||||||
|
close(l.ch)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Done returns a communication channel from which the lessee can read to get notified when lessor notifies about lease expiration.
|
||||||
|
func (l *Lease) Done() <-chan struct{} {
|
||||||
|
return l.ch
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deadline returns the expiration time of the lease.
|
||||||
|
func (l *Lease) Deadline() time.Time {
|
||||||
|
l.mu.Lock()
|
||||||
|
defer l.mu.Unlock()
|
||||||
|
return l.expireAt
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsValid returns true if the lease's expieration time is in the future or equals to the current time,
|
||||||
|
// returns false otherwise.
|
||||||
|
func (l *Lease) IsValid() bool {
|
||||||
|
now := l.Clock.Now()
|
||||||
|
l.mu.Lock()
|
||||||
|
defer l.mu.Unlock()
|
||||||
|
return l.expireAt.After(now) || l.expireAt.Equal(now)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Broker is a message broker that supports operations to manage task queues.
|
// Broker is a message broker that supports operations to manage task queues.
|
||||||
//
|
//
|
||||||
// See rdb.RDB as a reference implementation.
|
// See rdb.RDB as a reference implementation.
|
||||||
type Broker interface {
|
type Broker interface {
|
||||||
Enqueue(msg *TaskMessage) error
|
Ping() error
|
||||||
EnqueueUnique(msg *TaskMessage, ttl time.Duration) error
|
Enqueue(ctx context.Context, msg *TaskMessage) error
|
||||||
Dequeue(qnames ...string) (*TaskMessage, error)
|
EnqueueUnique(ctx context.Context, msg *TaskMessage, ttl time.Duration) error
|
||||||
Done(msg *TaskMessage) error
|
Dequeue(qnames ...string) (*TaskMessage, time.Time, error)
|
||||||
Requeue(msg *TaskMessage) error
|
Done(ctx context.Context, msg *TaskMessage) error
|
||||||
Schedule(msg *TaskMessage, processAt time.Time) error
|
MarkAsComplete(ctx context.Context, msg *TaskMessage) error
|
||||||
ScheduleUnique(msg *TaskMessage, processAt time.Time, ttl time.Duration) error
|
Requeue(ctx context.Context, msg *TaskMessage) error
|
||||||
Retry(msg *TaskMessage, processAt time.Time, errMsg string) error
|
Schedule(ctx context.Context, msg *TaskMessage, processAt time.Time) error
|
||||||
Kill(msg *TaskMessage, errMsg string) error
|
ScheduleUnique(ctx context.Context, msg *TaskMessage, processAt time.Time, ttl time.Duration) error
|
||||||
RequeueAll() (int64, error)
|
Retry(ctx context.Context, msg *TaskMessage, processAt time.Time, errMsg string, isFailure bool) error
|
||||||
CheckAndEnqueue(qnames ...string) error
|
Archive(ctx context.Context, msg *TaskMessage, errMsg string) error
|
||||||
WriteServerState(ss *ServerState, ttl time.Duration) error
|
ForwardIfReady(qnames ...string) error
|
||||||
ClearServerState(ss *ServerState) error
|
DeleteExpiredCompletedTasks(qname string) error
|
||||||
|
ListLeaseExpired(cutoff time.Time, qnames ...string) ([]*TaskMessage, error)
|
||||||
|
ExtendLease(qname string, ids ...string) (time.Time, error)
|
||||||
|
WriteServerState(info *ServerInfo, workers []*WorkerInfo, ttl time.Duration) error
|
||||||
|
ClearServerState(host string, pid int, serverID string) error
|
||||||
CancelationPubSub() (*redis.PubSub, error) // TODO: Need to decouple from redis to support other brokers
|
CancelationPubSub() (*redis.PubSub, error) // TODO: Need to decouple from redis to support other brokers
|
||||||
PublishCancelation(id string) error
|
PublishCancelation(id string) error
|
||||||
|
WriteResult(qname, id string, data []byte) (n int, err error)
|
||||||
Close() error
|
Close() error
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,62 +6,240 @@ package base
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"math/rand"
|
"crypto/md5"
|
||||||
|
"encoding/hex"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/google/go-cmp/cmp"
|
"github.com/google/go-cmp/cmp"
|
||||||
"github.com/google/go-cmp/cmp/cmpopts"
|
"github.com/google/uuid"
|
||||||
"github.com/rs/xid"
|
"github.com/hibiken/asynq/internal/timeutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func TestTaskKey(t *testing.T) {
|
||||||
|
id := uuid.NewString()
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
qname string
|
||||||
|
id string
|
||||||
|
want string
|
||||||
|
}{
|
||||||
|
{"default", id, fmt.Sprintf("asynq:{default}:t:%s", id)},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
got := TaskKey(tc.qname, tc.id)
|
||||||
|
if got != tc.want {
|
||||||
|
t.Errorf("TaskKey(%q, %s) = %q, want %q", tc.qname, tc.id, got, tc.want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestQueueKey(t *testing.T) {
|
func TestQueueKey(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
qname string
|
qname string
|
||||||
want string
|
want string
|
||||||
}{
|
}{
|
||||||
{"custom", "asynq:queues:custom"},
|
{"default", "asynq:{default}:pending"},
|
||||||
|
{"custom", "asynq:{custom}:pending"},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
got := QueueKey(tc.qname)
|
got := PendingKey(tc.qname)
|
||||||
if got != tc.want {
|
if got != tc.want {
|
||||||
t.Errorf("QueueKey(%q) = %q, want %q", tc.qname, got, tc.want)
|
t.Errorf("QueueKey(%q) = %q, want %q", tc.qname, got, tc.want)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestProcessedKey(t *testing.T) {
|
func TestActiveKey(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
input time.Time
|
qname string
|
||||||
want string
|
want string
|
||||||
}{
|
}{
|
||||||
{time.Date(2019, 11, 14, 10, 30, 1, 1, time.UTC), "asynq:processed:2019-11-14"},
|
{"default", "asynq:{default}:active"},
|
||||||
{time.Date(2020, 12, 1, 1, 0, 1, 1, time.UTC), "asynq:processed:2020-12-01"},
|
{"custom", "asynq:{custom}:active"},
|
||||||
{time.Date(2020, 1, 6, 15, 02, 1, 1, time.UTC), "asynq:processed:2020-01-06"},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
got := ProcessedKey(tc.input)
|
got := ActiveKey(tc.qname)
|
||||||
|
if got != tc.want {
|
||||||
|
t.Errorf("ActiveKey(%q) = %q, want %q", tc.qname, got, tc.want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLeaseKey(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
qname string
|
||||||
|
want string
|
||||||
|
}{
|
||||||
|
{"default", "asynq:{default}:lease"},
|
||||||
|
{"custom", "asynq:{custom}:lease"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
got := LeaseKey(tc.qname)
|
||||||
|
if got != tc.want {
|
||||||
|
t.Errorf("LeaseKey(%q) = %q, want %q", tc.qname, got, tc.want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestScheduledKey(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
qname string
|
||||||
|
want string
|
||||||
|
}{
|
||||||
|
{"default", "asynq:{default}:scheduled"},
|
||||||
|
{"custom", "asynq:{custom}:scheduled"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
got := ScheduledKey(tc.qname)
|
||||||
|
if got != tc.want {
|
||||||
|
t.Errorf("ScheduledKey(%q) = %q, want %q", tc.qname, got, tc.want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRetryKey(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
qname string
|
||||||
|
want string
|
||||||
|
}{
|
||||||
|
{"default", "asynq:{default}:retry"},
|
||||||
|
{"custom", "asynq:{custom}:retry"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
got := RetryKey(tc.qname)
|
||||||
|
if got != tc.want {
|
||||||
|
t.Errorf("RetryKey(%q) = %q, want %q", tc.qname, got, tc.want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestArchivedKey(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
qname string
|
||||||
|
want string
|
||||||
|
}{
|
||||||
|
{"default", "asynq:{default}:archived"},
|
||||||
|
{"custom", "asynq:{custom}:archived"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
got := ArchivedKey(tc.qname)
|
||||||
|
if got != tc.want {
|
||||||
|
t.Errorf("ArchivedKey(%q) = %q, want %q", tc.qname, got, tc.want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCompletedKey(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
qname string
|
||||||
|
want string
|
||||||
|
}{
|
||||||
|
{"default", "asynq:{default}:completed"},
|
||||||
|
{"custom", "asynq:{custom}:completed"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
got := CompletedKey(tc.qname)
|
||||||
|
if got != tc.want {
|
||||||
|
t.Errorf("CompletedKey(%q) = %q, want %q", tc.qname, got, tc.want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPausedKey(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
qname string
|
||||||
|
want string
|
||||||
|
}{
|
||||||
|
{"default", "asynq:{default}:paused"},
|
||||||
|
{"custom", "asynq:{custom}:paused"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
got := PausedKey(tc.qname)
|
||||||
|
if got != tc.want {
|
||||||
|
t.Errorf("PausedKey(%q) = %q, want %q", tc.qname, got, tc.want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProcessedTotalKey(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
qname string
|
||||||
|
want string
|
||||||
|
}{
|
||||||
|
{"default", "asynq:{default}:processed"},
|
||||||
|
{"custom", "asynq:{custom}:processed"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
got := ProcessedTotalKey(tc.qname)
|
||||||
|
if got != tc.want {
|
||||||
|
t.Errorf("ProcessedTotalKey(%q) = %q, want %q", tc.qname, got, tc.want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFailedTotalKey(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
qname string
|
||||||
|
want string
|
||||||
|
}{
|
||||||
|
{"default", "asynq:{default}:failed"},
|
||||||
|
{"custom", "asynq:{custom}:failed"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
got := FailedTotalKey(tc.qname)
|
||||||
|
if got != tc.want {
|
||||||
|
t.Errorf("FailedTotalKey(%q) = %q, want %q", tc.qname, got, tc.want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProcessedKey(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
qname string
|
||||||
|
input time.Time
|
||||||
|
want string
|
||||||
|
}{
|
||||||
|
{"default", time.Date(2019, 11, 14, 10, 30, 1, 1, time.UTC), "asynq:{default}:processed:2019-11-14"},
|
||||||
|
{"critical", time.Date(2020, 12, 1, 1, 0, 1, 1, time.UTC), "asynq:{critical}:processed:2020-12-01"},
|
||||||
|
{"default", time.Date(2020, 1, 6, 15, 02, 1, 1, time.UTC), "asynq:{default}:processed:2020-01-06"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
got := ProcessedKey(tc.qname, tc.input)
|
||||||
if got != tc.want {
|
if got != tc.want {
|
||||||
t.Errorf("ProcessedKey(%v) = %q, want %q", tc.input, got, tc.want)
|
t.Errorf("ProcessedKey(%v) = %q, want %q", tc.input, got, tc.want)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFailureKey(t *testing.T) {
|
func TestFailedKey(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
|
qname string
|
||||||
input time.Time
|
input time.Time
|
||||||
want string
|
want string
|
||||||
}{
|
}{
|
||||||
{time.Date(2019, 11, 14, 10, 30, 1, 1, time.UTC), "asynq:failure:2019-11-14"},
|
{"default", time.Date(2019, 11, 14, 10, 30, 1, 1, time.UTC), "asynq:{default}:failed:2019-11-14"},
|
||||||
{time.Date(2020, 12, 1, 1, 0, 1, 1, time.UTC), "asynq:failure:2020-12-01"},
|
{"custom", time.Date(2020, 12, 1, 1, 0, 1, 1, time.UTC), "asynq:{custom}:failed:2020-12-01"},
|
||||||
{time.Date(2020, 1, 6, 15, 02, 1, 1, time.UTC), "asynq:failure:2020-01-06"},
|
{"low", time.Date(2020, 1, 6, 15, 02, 1, 1, time.UTC), "asynq:{low}:failed:2020-01-06"},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
got := FailureKey(tc.input)
|
got := FailedKey(tc.qname, tc.input)
|
||||||
if got != tc.want {
|
if got != tc.want {
|
||||||
t.Errorf("FailureKey(%v) = %q, want %q", tc.input, got, tc.want)
|
t.Errorf("FailureKey(%v) = %q, want %q", tc.input, got, tc.want)
|
||||||
}
|
}
|
||||||
@@ -75,8 +253,8 @@ func TestServerInfoKey(t *testing.T) {
|
|||||||
sid string
|
sid string
|
||||||
want string
|
want string
|
||||||
}{
|
}{
|
||||||
{"localhost", 9876, "server123", "asynq:servers:localhost:9876:server123"},
|
{"localhost", 9876, "server123", "asynq:servers:{localhost:9876:server123}"},
|
||||||
{"127.0.0.1", 1234, "server987", "asynq:servers:127.0.0.1:1234:server987"},
|
{"127.0.0.1", 1234, "server987", "asynq:servers:{127.0.0.1:1234:server987}"},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
@@ -95,8 +273,8 @@ func TestWorkersKey(t *testing.T) {
|
|||||||
sid string
|
sid string
|
||||||
want string
|
want string
|
||||||
}{
|
}{
|
||||||
{"localhost", 9876, "server1", "asynq:workers:localhost:9876:server1"},
|
{"localhost", 9876, "server1", "asynq:workers:{localhost:9876:server1}"},
|
||||||
{"127.0.0.1", 1234, "server2", "asynq:workers:127.0.0.1:1234:server2"},
|
{"127.0.0.1", 1234, "server2", "asynq:workers:{127.0.0.1:1234:server2}"},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
@@ -108,68 +286,301 @@ func TestWorkersKey(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test for server state being accessed by multiple goroutines.
|
func TestSchedulerEntriesKey(t *testing.T) {
|
||||||
// Run with -race flag to check for data race.
|
tests := []struct {
|
||||||
func TestServerStateConcurrentAccess(t *testing.T) {
|
schedulerID string
|
||||||
ss := NewServerState("127.0.0.1", 1234, 10, map[string]int{"default": 1}, false)
|
want string
|
||||||
var wg sync.WaitGroup
|
}{
|
||||||
started := time.Now()
|
{"localhost:9876:scheduler123", "asynq:schedulers:{localhost:9876:scheduler123}"},
|
||||||
msgs := []*TaskMessage{
|
{"127.0.0.1:1234:scheduler987", "asynq:schedulers:{127.0.0.1:1234:scheduler987}"},
|
||||||
{ID: xid.New(), Type: "type1", Payload: map[string]interface{}{"user_id": 42}},
|
|
||||||
{ID: xid.New(), Type: "type2"},
|
|
||||||
{ID: xid.New(), Type: "type3"},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Simulate hearbeater calling SetStatus and SetStarted.
|
for _, tc := range tests {
|
||||||
wg.Add(1)
|
got := SchedulerEntriesKey(tc.schedulerID)
|
||||||
go func() {
|
if got != tc.want {
|
||||||
defer wg.Done()
|
t.Errorf("SchedulerEntriesKey(%q) = %q, want %q", tc.schedulerID, got, tc.want)
|
||||||
ss.SetStarted(started)
|
}
|
||||||
ss.SetStatus(StatusRunning)
|
|
||||||
if status := ss.Status(); status != StatusRunning {
|
|
||||||
t.Errorf("(*ServerState).Status() = %v, want %v", status, StatusRunning)
|
|
||||||
}
|
}
|
||||||
}()
|
|
||||||
|
|
||||||
// Simulate processor starting worker goroutines.
|
|
||||||
for _, msg := range msgs {
|
|
||||||
wg.Add(1)
|
|
||||||
ss.AddWorkerStats(msg, time.Now())
|
|
||||||
go func(msg *TaskMessage) {
|
|
||||||
defer wg.Done()
|
|
||||||
time.Sleep(time.Duration(rand.Intn(500)) * time.Millisecond)
|
|
||||||
ss.DeleteWorkerStats(msg)
|
|
||||||
}(msg)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Simulate hearbeater calling Get and GetWorkers
|
func TestSchedulerHistoryKey(t *testing.T) {
|
||||||
wg.Add(1)
|
tests := []struct {
|
||||||
go func() {
|
entryID string
|
||||||
wg.Done()
|
want string
|
||||||
for i := 0; i < 5; i++ {
|
}{
|
||||||
ss.GetInfo()
|
{"entry876", "asynq:scheduler_history:entry876"},
|
||||||
ss.GetWorkers()
|
{"entry345", "asynq:scheduler_history:entry345"},
|
||||||
time.Sleep(time.Duration(rand.Intn(100)) * time.Millisecond)
|
|
||||||
}
|
}
|
||||||
}()
|
|
||||||
|
|
||||||
wg.Wait()
|
for _, tc := range tests {
|
||||||
|
got := SchedulerHistoryKey(tc.entryID)
|
||||||
|
if got != tc.want {
|
||||||
|
t.Errorf("SchedulerHistoryKey(%q) = %q, want %q",
|
||||||
|
tc.entryID, got, tc.want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
want := &ServerInfo{
|
func toBytes(m map[string]interface{}) []byte {
|
||||||
|
b, err := json.Marshal(m)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUniqueKey(t *testing.T) {
|
||||||
|
payload1 := toBytes(map[string]interface{}{"a": 123, "b": "hello", "c": true})
|
||||||
|
payload2 := toBytes(map[string]interface{}{"b": "hello", "c": true, "a": 123})
|
||||||
|
payload3 := toBytes(map[string]interface{}{
|
||||||
|
"address": map[string]string{"line": "123 Main St", "city": "Boston", "state": "MA"},
|
||||||
|
"names": []string{"bob", "mike", "rob"}})
|
||||||
|
payload4 := toBytes(map[string]interface{}{
|
||||||
|
"time": time.Date(2020, time.July, 28, 0, 0, 0, 0, time.UTC),
|
||||||
|
"duration": time.Hour})
|
||||||
|
|
||||||
|
checksum := func(data []byte) string {
|
||||||
|
sum := md5.Sum(data)
|
||||||
|
return hex.EncodeToString(sum[:])
|
||||||
|
}
|
||||||
|
tests := []struct {
|
||||||
|
desc string
|
||||||
|
qname string
|
||||||
|
tasktype string
|
||||||
|
payload []byte
|
||||||
|
want string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
"with primitive types",
|
||||||
|
"default",
|
||||||
|
"email:send",
|
||||||
|
payload1,
|
||||||
|
fmt.Sprintf("asynq:{default}:unique:email:send:%s", checksum(payload1)),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"with unsorted keys",
|
||||||
|
"default",
|
||||||
|
"email:send",
|
||||||
|
payload2,
|
||||||
|
fmt.Sprintf("asynq:{default}:unique:email:send:%s", checksum(payload2)),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"with composite types",
|
||||||
|
"default",
|
||||||
|
"email:send",
|
||||||
|
payload3,
|
||||||
|
fmt.Sprintf("asynq:{default}:unique:email:send:%s", checksum(payload3)),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"with complex types",
|
||||||
|
"default",
|
||||||
|
"email:send",
|
||||||
|
payload4,
|
||||||
|
fmt.Sprintf("asynq:{default}:unique:email:send:%s", checksum(payload4)),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"with nil payload",
|
||||||
|
"default",
|
||||||
|
"reindex",
|
||||||
|
nil,
|
||||||
|
"asynq:{default}:unique:reindex:",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
got := UniqueKey(tc.qname, tc.tasktype, tc.payload)
|
||||||
|
if got != tc.want {
|
||||||
|
t.Errorf("%s: UniqueKey(%q, %q, %v) = %q, want %q", tc.desc, tc.qname, tc.tasktype, tc.payload, got, tc.want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMessageEncoding(t *testing.T) {
|
||||||
|
id := uuid.NewString()
|
||||||
|
tests := []struct {
|
||||||
|
in *TaskMessage
|
||||||
|
out *TaskMessage
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
in: &TaskMessage{
|
||||||
|
Type: "task1",
|
||||||
|
Payload: toBytes(map[string]interface{}{"a": 1, "b": "hello!", "c": true}),
|
||||||
|
ID: id,
|
||||||
|
Queue: "default",
|
||||||
|
Retry: 10,
|
||||||
|
Retried: 0,
|
||||||
|
Timeout: 1800,
|
||||||
|
Deadline: 1692311100,
|
||||||
|
Retention: 3600,
|
||||||
|
},
|
||||||
|
out: &TaskMessage{
|
||||||
|
Type: "task1",
|
||||||
|
Payload: toBytes(map[string]interface{}{"a": json.Number("1"), "b": "hello!", "c": true}),
|
||||||
|
ID: id,
|
||||||
|
Queue: "default",
|
||||||
|
Retry: 10,
|
||||||
|
Retried: 0,
|
||||||
|
Timeout: 1800,
|
||||||
|
Deadline: 1692311100,
|
||||||
|
Retention: 3600,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
encoded, err := EncodeMessage(tc.in)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("EncodeMessage(msg) returned error: %v", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
decoded, err := DecodeMessage(encoded)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("DecodeMessage(encoded) returned error: %v", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if diff := cmp.Diff(tc.out, decoded); diff != "" {
|
||||||
|
t.Errorf("Decoded message == %+v, want %+v;(-want,+got)\n%s",
|
||||||
|
decoded, tc.out, diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestServerInfoEncoding(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
info ServerInfo
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
info: ServerInfo{
|
||||||
Host: "127.0.0.1",
|
Host: "127.0.0.1",
|
||||||
PID: 1234,
|
PID: 9876,
|
||||||
|
ServerID: "abc123",
|
||||||
Concurrency: 10,
|
Concurrency: 10,
|
||||||
Queues: map[string]int{"default": 1},
|
Queues: map[string]int{"default": 1, "critical": 2},
|
||||||
StrictPriority: false,
|
StrictPriority: false,
|
||||||
Status: "running",
|
Status: "active",
|
||||||
Started: started,
|
Started: time.Now().Add(-3 * time.Hour),
|
||||||
ActiveWorkerCount: 0,
|
ActiveWorkerCount: 8,
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
got := ss.GetInfo()
|
for _, tc := range tests {
|
||||||
if diff := cmp.Diff(want, got, cmpopts.IgnoreFields(ServerInfo{}, "ServerID")); diff != "" {
|
encoded, err := EncodeServerInfo(&tc.info)
|
||||||
t.Errorf("(*ServerState).GetInfo() = %+v, want %+v; (-want,+got)\n%s",
|
if err != nil {
|
||||||
got, want, diff)
|
t.Errorf("EncodeServerInfo(info) returned error: %v", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
decoded, err := DecodeServerInfo(encoded)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("DecodeServerInfo(encoded) returned error: %v", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if diff := cmp.Diff(&tc.info, decoded); diff != "" {
|
||||||
|
t.Errorf("Decoded ServerInfo == %+v, want %+v;(-want,+got)\n%s",
|
||||||
|
decoded, tc.info, diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWorkerInfoEncoding(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
info WorkerInfo
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
info: WorkerInfo{
|
||||||
|
Host: "127.0.0.1",
|
||||||
|
PID: 9876,
|
||||||
|
ServerID: "abc123",
|
||||||
|
ID: uuid.NewString(),
|
||||||
|
Type: "taskA",
|
||||||
|
Payload: toBytes(map[string]interface{}{"foo": "bar"}),
|
||||||
|
Queue: "default",
|
||||||
|
Started: time.Now().Add(-3 * time.Hour),
|
||||||
|
Deadline: time.Now().Add(30 * time.Second),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
encoded, err := EncodeWorkerInfo(&tc.info)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("EncodeWorkerInfo(info) returned error: %v", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
decoded, err := DecodeWorkerInfo(encoded)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("DecodeWorkerInfo(encoded) returned error: %v", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if diff := cmp.Diff(&tc.info, decoded); diff != "" {
|
||||||
|
t.Errorf("Decoded WorkerInfo == %+v, want %+v;(-want,+got)\n%s",
|
||||||
|
decoded, tc.info, diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSchedulerEntryEncoding(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
entry SchedulerEntry
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
entry: SchedulerEntry{
|
||||||
|
ID: uuid.NewString(),
|
||||||
|
Spec: "* * * * *",
|
||||||
|
Type: "task_A",
|
||||||
|
Payload: toBytes(map[string]interface{}{"foo": "bar"}),
|
||||||
|
Opts: []string{"Queue('email')"},
|
||||||
|
Next: time.Now().Add(30 * time.Second).UTC(),
|
||||||
|
Prev: time.Now().Add(-2 * time.Minute).UTC(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
encoded, err := EncodeSchedulerEntry(&tc.entry)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("EncodeSchedulerEntry(entry) returned error: %v", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
decoded, err := DecodeSchedulerEntry(encoded)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("DecodeSchedulerEntry(encoded) returned error: %v", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if diff := cmp.Diff(&tc.entry, decoded); diff != "" {
|
||||||
|
t.Errorf("Decoded SchedulerEntry == %+v, want %+v;(-want,+got)\n%s",
|
||||||
|
decoded, tc.entry, diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSchedulerEnqueueEventEncoding(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
event SchedulerEnqueueEvent
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
event: SchedulerEnqueueEvent{
|
||||||
|
TaskID: uuid.NewString(),
|
||||||
|
EnqueuedAt: time.Now().Add(-30 * time.Second).UTC(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
encoded, err := EncodeSchedulerEnqueueEvent(&tc.event)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("EncodeSchedulerEnqueueEvent(event) returned error: %v", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
decoded, err := DecodeSchedulerEnqueueEvent(encoded)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("DecodeSchedulerEnqueueEvent(encoded) returned error: %v", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if diff := cmp.Diff(&tc.event, decoded); diff != "" {
|
||||||
|
t.Errorf("Decoded SchedulerEnqueueEvent == %+v, want %+v;(-want,+got)\n%s",
|
||||||
|
decoded, tc.event, diff)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -216,9 +627,76 @@ func TestCancelationsConcurrentAccess(t *testing.T) {
|
|||||||
if ok {
|
if ok {
|
||||||
t.Errorf("(*Cancelations).Get(%q) = _, true, want <nil>, false", key2)
|
t.Errorf("(*Cancelations).Get(%q) = _, true, want <nil>, false", key2)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
funcs := c.GetAll()
|
func TestLeaseReset(t *testing.T) {
|
||||||
if len(funcs) != 2 {
|
now := time.Now()
|
||||||
t.Errorf("(*Cancelations).GetAll() returns %d functions, want 2", len(funcs))
|
clock := timeutil.NewSimulatedClock(now)
|
||||||
|
|
||||||
|
l := NewLease(now.Add(30 * time.Second))
|
||||||
|
l.Clock = clock
|
||||||
|
|
||||||
|
// Check initial state
|
||||||
|
if !l.IsValid() {
|
||||||
|
t.Errorf("lease should be valid when expiration is set to a future time")
|
||||||
|
}
|
||||||
|
if want := now.Add(30 * time.Second); l.Deadline() != want {
|
||||||
|
t.Errorf("Lease.Deadline() = %v, want %v", l.Deadline(), want)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test Reset
|
||||||
|
if !l.Reset(now.Add(45 * time.Second)) {
|
||||||
|
t.Fatalf("Lease.Reset returned false when extending")
|
||||||
|
}
|
||||||
|
if want := now.Add(45 * time.Second); l.Deadline() != want {
|
||||||
|
t.Errorf("After Reset: Lease.Deadline() = %v, want %v", l.Deadline(), want)
|
||||||
|
}
|
||||||
|
|
||||||
|
clock.AdvanceTime(1 * time.Minute) // simulate lease expiration
|
||||||
|
|
||||||
|
if l.IsValid() {
|
||||||
|
t.Errorf("lease should be invalid after expiration")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset should return false if lease is expired.
|
||||||
|
if l.Reset(time.Now().Add(20 * time.Second)) {
|
||||||
|
t.Errorf("Lease.Reset should return false after expiration")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLeaseNotifyExpiration(t *testing.T) {
|
||||||
|
now := time.Now()
|
||||||
|
clock := timeutil.NewSimulatedClock(now)
|
||||||
|
|
||||||
|
l := NewLease(now.Add(30 * time.Second))
|
||||||
|
l.Clock = clock
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-l.Done():
|
||||||
|
t.Fatalf("Lease.Done() did not block")
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
if l.NotifyExpiration() {
|
||||||
|
t.Fatalf("Lease.NotifyExpiration() should return false when lease is still valid")
|
||||||
|
}
|
||||||
|
|
||||||
|
clock.AdvanceTime(1 * time.Minute) // simulate lease expiration
|
||||||
|
|
||||||
|
if l.IsValid() {
|
||||||
|
t.Errorf("Lease should be invalid after expiration")
|
||||||
|
}
|
||||||
|
if !l.NotifyExpiration() {
|
||||||
|
t.Errorf("Lease.NotifyExpiration() return return true after expiration")
|
||||||
|
}
|
||||||
|
if !l.NotifyExpiration() {
|
||||||
|
t.Errorf("It should be leagal to call Lease.NotifyExpiration multiple times")
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-l.Done():
|
||||||
|
// expected
|
||||||
|
default:
|
||||||
|
t.Errorf("Lease.Done() blocked after call to Lease.NotifyExpiration()")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
87
internal/context/context.go
Normal file
87
internal/context/context.go
Normal file
@@ -0,0 +1,87 @@
|
|||||||
|
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license
|
||||||
|
// that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package context
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/hibiken/asynq/internal/base"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A taskMetadata holds task scoped data to put in context.
|
||||||
|
type taskMetadata struct {
|
||||||
|
id string
|
||||||
|
maxRetry int
|
||||||
|
retryCount int
|
||||||
|
qname string
|
||||||
|
}
|
||||||
|
|
||||||
|
// ctxKey type is unexported to prevent collisions with context keys defined in
|
||||||
|
// other packages.
|
||||||
|
type ctxKey int
|
||||||
|
|
||||||
|
// metadataCtxKey is the context key for the task metadata.
|
||||||
|
// Its value of zero is arbitrary.
|
||||||
|
const metadataCtxKey ctxKey = 0
|
||||||
|
|
||||||
|
// New returns a context and cancel function for a given task message.
|
||||||
|
func New(base context.Context, msg *base.TaskMessage, deadline time.Time) (context.Context, context.CancelFunc) {
|
||||||
|
metadata := taskMetadata{
|
||||||
|
id: msg.ID,
|
||||||
|
maxRetry: msg.Retry,
|
||||||
|
retryCount: msg.Retried,
|
||||||
|
qname: msg.Queue,
|
||||||
|
}
|
||||||
|
ctx := context.WithValue(base, metadataCtxKey, metadata)
|
||||||
|
return context.WithDeadline(ctx, deadline)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetTaskID extracts a task ID from a context, if any.
|
||||||
|
//
|
||||||
|
// ID of a task is guaranteed to be unique.
|
||||||
|
// ID of a task doesn't change if the task is being retried.
|
||||||
|
func GetTaskID(ctx context.Context) (id string, ok bool) {
|
||||||
|
metadata, ok := ctx.Value(metadataCtxKey).(taskMetadata)
|
||||||
|
if !ok {
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
return metadata.id, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetRetryCount extracts retry count from a context, if any.
|
||||||
|
//
|
||||||
|
// Return value n indicates the number of times associated task has been
|
||||||
|
// retried so far.
|
||||||
|
func GetRetryCount(ctx context.Context) (n int, ok bool) {
|
||||||
|
metadata, ok := ctx.Value(metadataCtxKey).(taskMetadata)
|
||||||
|
if !ok {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
return metadata.retryCount, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetMaxRetry extracts maximum retry from a context, if any.
|
||||||
|
//
|
||||||
|
// Return value n indicates the maximum number of times the assoicated task
|
||||||
|
// can be retried if ProcessTask returns a non-nil error.
|
||||||
|
func GetMaxRetry(ctx context.Context) (n int, ok bool) {
|
||||||
|
metadata, ok := ctx.Value(metadataCtxKey).(taskMetadata)
|
||||||
|
if !ok {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
return metadata.maxRetry, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetQueueName extracts queue name from a context, if any.
|
||||||
|
//
|
||||||
|
// Return value qname indicates which queue the task was pulled from.
|
||||||
|
func GetQueueName(ctx context.Context) (qname string, ok bool) {
|
||||||
|
metadata, ok := ctx.Value(metadataCtxKey).(taskMetadata)
|
||||||
|
if !ok {
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
return metadata.qname, true
|
||||||
|
}
|
||||||
207
internal/context/context_test.go
Normal file
207
internal/context/context_test.go
Normal file
@@ -0,0 +1,207 @@
|
|||||||
|
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license
|
||||||
|
// that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package context
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/google/go-cmp/cmp"
|
||||||
|
"github.com/google/uuid"
|
||||||
|
"github.com/hibiken/asynq/internal/base"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestCreateContextWithFutureDeadline(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
deadline time.Time
|
||||||
|
}{
|
||||||
|
{time.Now().Add(time.Hour)},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
msg := &base.TaskMessage{
|
||||||
|
Type: "something",
|
||||||
|
ID: uuid.NewString(),
|
||||||
|
Payload: nil,
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, cancel := New(context.Background(), msg, tc.deadline)
|
||||||
|
select {
|
||||||
|
case x := <-ctx.Done():
|
||||||
|
t.Errorf("<-ctx.Done() == %v, want nothing (it should block)", x)
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
got, ok := ctx.Deadline()
|
||||||
|
if !ok {
|
||||||
|
t.Errorf("ctx.Deadline() returned false, want deadline to be set")
|
||||||
|
}
|
||||||
|
if !cmp.Equal(tc.deadline, got) {
|
||||||
|
t.Errorf("ctx.Deadline() returned %v, want %v", got, tc.deadline)
|
||||||
|
}
|
||||||
|
|
||||||
|
cancel()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
default:
|
||||||
|
t.Errorf("ctx.Done() blocked, want it to be non-blocking")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCreateContextWithBaseContext(t *testing.T) {
|
||||||
|
type ctxKey string
|
||||||
|
type ctxValue string
|
||||||
|
var key ctxKey = "key"
|
||||||
|
var value ctxValue = "value"
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
baseCtx context.Context
|
||||||
|
validate func(ctx context.Context, t *testing.T) error
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
baseCtx: context.WithValue(context.Background(), key, value),
|
||||||
|
validate: func(ctx context.Context, t *testing.T) error {
|
||||||
|
got, ok := ctx.Value(key).(ctxValue)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("ctx.Value().(ctxValue) returned false, expected to be true")
|
||||||
|
}
|
||||||
|
if want := value; got != want {
|
||||||
|
return fmt.Errorf("ctx.Value().(ctxValue) returned unknown value (%v), expected to be %s", got, value)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
msg := &base.TaskMessage{
|
||||||
|
Type: "something",
|
||||||
|
ID: uuid.NewString(),
|
||||||
|
Payload: nil,
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, cancel := New(tc.baseCtx, msg, time.Now().Add(30*time.Minute))
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case x := <-ctx.Done():
|
||||||
|
t.Errorf("<-ctx.Done() == %v, want nothing (it should block)", x)
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := tc.validate(ctx, t); err != nil {
|
||||||
|
t.Errorf("%v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCreateContextWithPastDeadline(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
deadline time.Time
|
||||||
|
}{
|
||||||
|
{time.Now().Add(-2 * time.Hour)},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
msg := &base.TaskMessage{
|
||||||
|
Type: "something",
|
||||||
|
ID: uuid.NewString(),
|
||||||
|
Payload: nil,
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, cancel := New(context.Background(), msg, tc.deadline)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
default:
|
||||||
|
t.Errorf("ctx.Done() blocked, want it to be non-blocking")
|
||||||
|
}
|
||||||
|
|
||||||
|
got, ok := ctx.Deadline()
|
||||||
|
if !ok {
|
||||||
|
t.Errorf("ctx.Deadline() returned false, want deadline to be set")
|
||||||
|
}
|
||||||
|
if !cmp.Equal(tc.deadline, got) {
|
||||||
|
t.Errorf("ctx.Deadline() returned %v, want %v", got, tc.deadline)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetTaskMetadataFromContext(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
desc string
|
||||||
|
msg *base.TaskMessage
|
||||||
|
}{
|
||||||
|
{"with zero retried message", &base.TaskMessage{Type: "something", ID: uuid.NewString(), Retry: 25, Retried: 0, Timeout: 1800, Queue: "default"}},
|
||||||
|
{"with non-zero retried message", &base.TaskMessage{Type: "something", ID: uuid.NewString(), Retry: 10, Retried: 5, Timeout: 1800, Queue: "default"}},
|
||||||
|
{"with custom queue name", &base.TaskMessage{Type: "something", ID: uuid.NewString(), Retry: 25, Retried: 0, Timeout: 1800, Queue: "custom"}},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
ctx, cancel := New(context.Background(), tc.msg, time.Now().Add(30*time.Minute))
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
id, ok := GetTaskID(ctx)
|
||||||
|
if !ok {
|
||||||
|
t.Errorf("%s: GetTaskID(ctx) returned ok == false", tc.desc)
|
||||||
|
}
|
||||||
|
if ok && id != tc.msg.ID {
|
||||||
|
t.Errorf("%s: GetTaskID(ctx) returned id == %q, want %q", tc.desc, id, tc.msg.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
retried, ok := GetRetryCount(ctx)
|
||||||
|
if !ok {
|
||||||
|
t.Errorf("%s: GetRetryCount(ctx) returned ok == false", tc.desc)
|
||||||
|
}
|
||||||
|
if ok && retried != tc.msg.Retried {
|
||||||
|
t.Errorf("%s: GetRetryCount(ctx) returned n == %d want %d", tc.desc, retried, tc.msg.Retried)
|
||||||
|
}
|
||||||
|
|
||||||
|
maxRetry, ok := GetMaxRetry(ctx)
|
||||||
|
if !ok {
|
||||||
|
t.Errorf("%s: GetMaxRetry(ctx) returned ok == false", tc.desc)
|
||||||
|
}
|
||||||
|
if ok && maxRetry != tc.msg.Retry {
|
||||||
|
t.Errorf("%s: GetMaxRetry(ctx) returned n == %d want %d", tc.desc, maxRetry, tc.msg.Retry)
|
||||||
|
}
|
||||||
|
|
||||||
|
qname, ok := GetQueueName(ctx)
|
||||||
|
if !ok {
|
||||||
|
t.Errorf("%s: GetQueueName(ctx) returned ok == false", tc.desc)
|
||||||
|
}
|
||||||
|
if ok && qname != tc.msg.Queue {
|
||||||
|
t.Errorf("%s: GetQueueName(ctx) returned qname == %q, want %q", tc.desc, qname, tc.msg.Queue)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetTaskMetadataFromContextError(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
desc string
|
||||||
|
ctx context.Context
|
||||||
|
}{
|
||||||
|
{"with background context", context.Background()},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
if _, ok := GetTaskID(tc.ctx); ok {
|
||||||
|
t.Errorf("%s: GetTaskID(ctx) returned ok == true", tc.desc)
|
||||||
|
}
|
||||||
|
if _, ok := GetRetryCount(tc.ctx); ok {
|
||||||
|
t.Errorf("%s: GetRetryCount(ctx) returned ok == true", tc.desc)
|
||||||
|
}
|
||||||
|
if _, ok := GetMaxRetry(tc.ctx); ok {
|
||||||
|
t.Errorf("%s: GetMaxRetry(ctx) returned ok == true", tc.desc)
|
||||||
|
}
|
||||||
|
if _, ok := GetQueueName(tc.ctx); ok {
|
||||||
|
t.Errorf("%s: GetQueueName(ctx) returned ok == true", tc.desc)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
288
internal/errors/errors.go
Normal file
288
internal/errors/errors.go
Normal file
@@ -0,0 +1,288 @@
|
|||||||
|
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license
|
||||||
|
// that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package errors defines the error type and functions used by
|
||||||
|
// asynq and its internal packages.
|
||||||
|
package errors
|
||||||
|
|
||||||
|
// Note: This package is inspired by a blog post about error handling in project Upspin
|
||||||
|
// https://commandcenter.blogspot.com/2017/12/error-handling-in-upspin.html.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Error is the type that implements the error interface.
|
||||||
|
// It contains a number of fields, each of different type.
|
||||||
|
// An Error value may leave some values unset.
|
||||||
|
type Error struct {
|
||||||
|
Code Code
|
||||||
|
Op Op
|
||||||
|
Err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *Error) DebugString() string {
|
||||||
|
var b strings.Builder
|
||||||
|
if e.Op != "" {
|
||||||
|
b.WriteString(string(e.Op))
|
||||||
|
}
|
||||||
|
if e.Code != Unspecified {
|
||||||
|
if b.Len() > 0 {
|
||||||
|
b.WriteString(": ")
|
||||||
|
}
|
||||||
|
b.WriteString(e.Code.String())
|
||||||
|
}
|
||||||
|
if e.Err != nil {
|
||||||
|
if b.Len() > 0 {
|
||||||
|
b.WriteString(": ")
|
||||||
|
}
|
||||||
|
b.WriteString(e.Err.Error())
|
||||||
|
}
|
||||||
|
return b.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *Error) Error() string {
|
||||||
|
var b strings.Builder
|
||||||
|
if e.Code != Unspecified {
|
||||||
|
b.WriteString(e.Code.String())
|
||||||
|
}
|
||||||
|
if e.Err != nil {
|
||||||
|
if b.Len() > 0 {
|
||||||
|
b.WriteString(": ")
|
||||||
|
}
|
||||||
|
b.WriteString(e.Err.Error())
|
||||||
|
}
|
||||||
|
return b.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *Error) Unwrap() error {
|
||||||
|
return e.Err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Code defines the canonical error code.
|
||||||
|
type Code uint8
|
||||||
|
|
||||||
|
// List of canonical error codes.
|
||||||
|
const (
|
||||||
|
Unspecified Code = iota
|
||||||
|
NotFound
|
||||||
|
FailedPrecondition
|
||||||
|
Internal
|
||||||
|
AlreadyExists
|
||||||
|
Unknown
|
||||||
|
// Note: If you add a new value here, make sure to update String method.
|
||||||
|
)
|
||||||
|
|
||||||
|
func (c Code) String() string {
|
||||||
|
switch c {
|
||||||
|
case Unspecified:
|
||||||
|
return "ERROR_CODE_UNSPECIFIED"
|
||||||
|
case NotFound:
|
||||||
|
return "NOT_FOUND"
|
||||||
|
case FailedPrecondition:
|
||||||
|
return "FAILED_PRECONDITION"
|
||||||
|
case Internal:
|
||||||
|
return "INTERNAL_ERROR"
|
||||||
|
case AlreadyExists:
|
||||||
|
return "ALREADY_EXISTS"
|
||||||
|
case Unknown:
|
||||||
|
return "UNKNOWN"
|
||||||
|
}
|
||||||
|
panic(fmt.Sprintf("unknown error code %d", c))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Op describes an operation, usually as the package and method,
|
||||||
|
// such as "rdb.Enqueue".
|
||||||
|
type Op string
|
||||||
|
|
||||||
|
// E builds an error value from its arguments.
|
||||||
|
// There must be at least one argument or E panics.
|
||||||
|
// The type of each argument determines its meaning.
|
||||||
|
// If more than one argument of a given type is presented,
|
||||||
|
// only the last one is recorded.
|
||||||
|
//
|
||||||
|
// The types are:
|
||||||
|
// errors.Op
|
||||||
|
// The operation being performed, usually the method
|
||||||
|
// being invoked (Get, Put, etc.).
|
||||||
|
// errors.Code
|
||||||
|
// The canonical error code, such as NOT_FOUND.
|
||||||
|
// string
|
||||||
|
// Treated as an error message and assigned to the
|
||||||
|
// Err field after a call to errors.New.
|
||||||
|
// error
|
||||||
|
// The underlying error that triggered this one.
|
||||||
|
//
|
||||||
|
// If the error is printed, only those items that have been
|
||||||
|
// set to non-zero values will appear in the result.
|
||||||
|
func E(args ...interface{}) error {
|
||||||
|
if len(args) == 0 {
|
||||||
|
panic("call to errors.E with no arguments")
|
||||||
|
}
|
||||||
|
e := &Error{}
|
||||||
|
for _, arg := range args {
|
||||||
|
switch arg := arg.(type) {
|
||||||
|
case Op:
|
||||||
|
e.Op = arg
|
||||||
|
case Code:
|
||||||
|
e.Code = arg
|
||||||
|
case error:
|
||||||
|
e.Err = arg
|
||||||
|
case string:
|
||||||
|
e.Err = errors.New(arg)
|
||||||
|
default:
|
||||||
|
_, file, line, _ := runtime.Caller(1)
|
||||||
|
log.Printf("errors.E: bad call from %s:%d: %v", file, line, args)
|
||||||
|
return fmt.Errorf("unknown type %T, value %v in error call", arg, arg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
// CanonicalCode returns the canonical code of the given error if one is present.
|
||||||
|
// Otherwise it returns Unspecified.
|
||||||
|
func CanonicalCode(err error) Code {
|
||||||
|
if err == nil {
|
||||||
|
return Unspecified
|
||||||
|
}
|
||||||
|
e, ok := err.(*Error)
|
||||||
|
if !ok {
|
||||||
|
return Unspecified
|
||||||
|
}
|
||||||
|
if e.Code == Unspecified {
|
||||||
|
return CanonicalCode(e.Err)
|
||||||
|
}
|
||||||
|
return e.Code
|
||||||
|
}
|
||||||
|
|
||||||
|
/******************************************
|
||||||
|
Domin Specific Error Types & Values
|
||||||
|
*******************************************/
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrNoProcessableTask indicates that there are no tasks ready to be processed.
|
||||||
|
ErrNoProcessableTask = errors.New("no tasks are ready for processing")
|
||||||
|
|
||||||
|
// ErrDuplicateTask indicates that another task with the same unique key holds the uniqueness lock.
|
||||||
|
ErrDuplicateTask = errors.New("task already exists")
|
||||||
|
|
||||||
|
// ErrTaskIdConflict indicates that another task with the same task ID already exist
|
||||||
|
ErrTaskIdConflict = errors.New("task id conflicts with another task")
|
||||||
|
)
|
||||||
|
|
||||||
|
// TaskNotFoundError indicates that a task with the given ID does not exist
|
||||||
|
// in the given queue.
|
||||||
|
type TaskNotFoundError struct {
|
||||||
|
Queue string // queue name
|
||||||
|
ID string // task id
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *TaskNotFoundError) Error() string {
|
||||||
|
return fmt.Sprintf("cannot find task with id=%s in queue %q", e.ID, e.Queue)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsTaskNotFound reports whether any error in err's chain is of type TaskNotFoundError.
|
||||||
|
func IsTaskNotFound(err error) bool {
|
||||||
|
var target *TaskNotFoundError
|
||||||
|
return As(err, &target)
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueueNotFoundError indicates that a queue with the given name does not exist.
|
||||||
|
type QueueNotFoundError struct {
|
||||||
|
Queue string // queue name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *QueueNotFoundError) Error() string {
|
||||||
|
return fmt.Sprintf("queue %q does not exist", e.Queue)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsQueueNotFound reports whether any error in err's chain is of type QueueNotFoundError.
|
||||||
|
func IsQueueNotFound(err error) bool {
|
||||||
|
var target *QueueNotFoundError
|
||||||
|
return As(err, &target)
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueueNotEmptyError indicates that the given queue is not empty.
|
||||||
|
type QueueNotEmptyError struct {
|
||||||
|
Queue string // queue name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *QueueNotEmptyError) Error() string {
|
||||||
|
return fmt.Sprintf("queue %q is not empty", e.Queue)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsQueueNotEmpty reports whether any error in err's chain is of type QueueNotEmptyError.
|
||||||
|
func IsQueueNotEmpty(err error) bool {
|
||||||
|
var target *QueueNotEmptyError
|
||||||
|
return As(err, &target)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TaskAlreadyArchivedError indicates that the task in question is already archived.
|
||||||
|
type TaskAlreadyArchivedError struct {
|
||||||
|
Queue string // queue name
|
||||||
|
ID string // task id
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *TaskAlreadyArchivedError) Error() string {
|
||||||
|
return fmt.Sprintf("task is already archived: id=%s, queue=%s", e.ID, e.Queue)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsTaskAlreadyArchived reports whether any error in err's chain is of type TaskAlreadyArchivedError.
|
||||||
|
func IsTaskAlreadyArchived(err error) bool {
|
||||||
|
var target *TaskAlreadyArchivedError
|
||||||
|
return As(err, &target)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RedisCommandError indicates that the given redis command returned error.
|
||||||
|
type RedisCommandError struct {
|
||||||
|
Command string // redis command (e.g. LRANGE, ZADD, etc)
|
||||||
|
Err error // underlying error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *RedisCommandError) Error() string {
|
||||||
|
return fmt.Sprintf("redis command error: %s failed: %v", strings.ToUpper(e.Command), e.Err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *RedisCommandError) Unwrap() error { return e.Err }
|
||||||
|
|
||||||
|
// IsRedisCommandError reports whether any error in err's chain is of type RedisCommandError.
|
||||||
|
func IsRedisCommandError(err error) bool {
|
||||||
|
var target *RedisCommandError
|
||||||
|
return As(err, &target)
|
||||||
|
}
|
||||||
|
|
||||||
|
/*************************************************
|
||||||
|
Standard Library errors package functions
|
||||||
|
*************************************************/
|
||||||
|
|
||||||
|
// New returns an error that formats as the given text.
|
||||||
|
// Each call to New returns a distinct error value even if the text is identical.
|
||||||
|
//
|
||||||
|
// This function is the errors.New function from the standard libarary (https://golang.org/pkg/errors/#New).
|
||||||
|
// It is exported from this package for import convinience.
|
||||||
|
func New(text string) error { return errors.New(text) }
|
||||||
|
|
||||||
|
// Is reports whether any error in err's chain matches target.
|
||||||
|
//
|
||||||
|
// This function is the errors.Is function from the standard libarary (https://golang.org/pkg/errors/#Is).
|
||||||
|
// It is exported from this package for import convinience.
|
||||||
|
func Is(err, target error) bool { return errors.Is(err, target) }
|
||||||
|
|
||||||
|
// As finds the first error in err's chain that matches target, and if so, sets target to that error value and returns true.
|
||||||
|
// Otherwise, it returns false.
|
||||||
|
//
|
||||||
|
// This function is the errors.As function from the standard libarary (https://golang.org/pkg/errors/#As).
|
||||||
|
// It is exported from this package for import convinience.
|
||||||
|
func As(err error, target interface{}) bool { return errors.As(err, target) }
|
||||||
|
|
||||||
|
// Unwrap returns the result of calling the Unwrap method on err, if err's type contains an Unwrap method returning error.
|
||||||
|
// Otherwise, Unwrap returns nil.
|
||||||
|
//
|
||||||
|
// This function is the errors.Unwrap function from the standard libarary (https://golang.org/pkg/errors/#Unwrap).
|
||||||
|
// It is exported from this package for import convinience.
|
||||||
|
func Unwrap(err error) error { return errors.Unwrap(err) }
|
||||||
176
internal/errors/errors_test.go
Normal file
176
internal/errors/errors_test.go
Normal file
@@ -0,0 +1,176 @@
|
|||||||
|
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license
|
||||||
|
// that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package errors
|
||||||
|
|
||||||
|
import "testing"
|
||||||
|
|
||||||
|
func TestErrorDebugString(t *testing.T) {
|
||||||
|
// DebugString should include Op since its meant to be used by
|
||||||
|
// maintainers/contributors of the asynq package.
|
||||||
|
tests := []struct {
|
||||||
|
desc string
|
||||||
|
err error
|
||||||
|
want string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
desc: "With Op, Code, and string",
|
||||||
|
err: E(Op("rdb.DeleteTask"), NotFound, "cannot find task with id=123"),
|
||||||
|
want: "rdb.DeleteTask: NOT_FOUND: cannot find task with id=123",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "With Op, Code and error",
|
||||||
|
err: E(Op("rdb.DeleteTask"), NotFound, &TaskNotFoundError{Queue: "default", ID: "123"}),
|
||||||
|
want: `rdb.DeleteTask: NOT_FOUND: cannot find task with id=123 in queue "default"`,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
if got := tc.err.(*Error).DebugString(); got != tc.want {
|
||||||
|
t.Errorf("%s: got=%q, want=%q", tc.desc, got, tc.want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestErrorString(t *testing.T) {
|
||||||
|
// String method should omit Op since op is an internal detail
|
||||||
|
// and we don't want to provide it to users of the package.
|
||||||
|
tests := []struct {
|
||||||
|
desc string
|
||||||
|
err error
|
||||||
|
want string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
desc: "With Op, Code, and string",
|
||||||
|
err: E(Op("rdb.DeleteTask"), NotFound, "cannot find task with id=123"),
|
||||||
|
want: "NOT_FOUND: cannot find task with id=123",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "With Op, Code and error",
|
||||||
|
err: E(Op("rdb.DeleteTask"), NotFound, &TaskNotFoundError{Queue: "default", ID: "123"}),
|
||||||
|
want: `NOT_FOUND: cannot find task with id=123 in queue "default"`,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
if got := tc.err.Error(); got != tc.want {
|
||||||
|
t.Errorf("%s: got=%q, want=%q", tc.desc, got, tc.want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestErrorIs(t *testing.T) {
|
||||||
|
var ErrCustom = New("custom sentinel error")
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
desc string
|
||||||
|
err error
|
||||||
|
target error
|
||||||
|
want bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
desc: "should unwrap one level",
|
||||||
|
err: E(Op("rdb.DeleteTask"), ErrCustom),
|
||||||
|
target: ErrCustom,
|
||||||
|
want: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
if got := Is(tc.err, tc.target); got != tc.want {
|
||||||
|
t.Errorf("%s: got=%t, want=%t", tc.desc, got, tc.want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestErrorAs(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
desc string
|
||||||
|
err error
|
||||||
|
target interface{}
|
||||||
|
want bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
desc: "should unwrap one level",
|
||||||
|
err: E(Op("rdb.DeleteTask"), NotFound, &QueueNotFoundError{Queue: "email"}),
|
||||||
|
target: &QueueNotFoundError{},
|
||||||
|
want: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
if got := As(tc.err, &tc.target); got != tc.want {
|
||||||
|
t.Errorf("%s: got=%t, want=%t", tc.desc, got, tc.want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestErrorPredicates(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
desc string
|
||||||
|
fn func(err error) bool
|
||||||
|
err error
|
||||||
|
want bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
desc: "IsTaskNotFound should detect presence of TaskNotFoundError in err's chain",
|
||||||
|
fn: IsTaskNotFound,
|
||||||
|
err: E(Op("rdb.ArchiveTask"), NotFound, &TaskNotFoundError{Queue: "default", ID: "9876"}),
|
||||||
|
want: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "IsTaskNotFound should detect absence of TaskNotFoundError in err's chain",
|
||||||
|
fn: IsTaskNotFound,
|
||||||
|
err: E(Op("rdb.ArchiveTask"), NotFound, &QueueNotFoundError{Queue: "default"}),
|
||||||
|
want: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "IsQueueNotFound should detect presence of QueueNotFoundError in err's chain",
|
||||||
|
fn: IsQueueNotFound,
|
||||||
|
err: E(Op("rdb.ArchiveTask"), NotFound, &QueueNotFoundError{Queue: "default"}),
|
||||||
|
want: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
if got := tc.fn(tc.err); got != tc.want {
|
||||||
|
t.Errorf("%s: got=%t, want=%t", tc.desc, got, tc.want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCanonicalCode(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
desc string
|
||||||
|
err error
|
||||||
|
want Code
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
desc: "without nesting",
|
||||||
|
err: E(Op("rdb.DeleteTask"), NotFound, &TaskNotFoundError{Queue: "default", ID: "123"}),
|
||||||
|
want: NotFound,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "with nesting",
|
||||||
|
err: E(FailedPrecondition, E(NotFound)),
|
||||||
|
want: FailedPrecondition,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "returns Unspecified if err is not *Error",
|
||||||
|
err: New("some other error"),
|
||||||
|
want: Unspecified,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "returns Unspecified if err is nil",
|
||||||
|
err: nil,
|
||||||
|
want: Unspecified,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
if got := CanonicalCode(tc.err); got != tc.want {
|
||||||
|
t.Errorf("%s: got=%s, want=%s", tc.desc, got, tc.want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
838
internal/proto/asynq.pb.go
Normal file
838
internal/proto/asynq.pb.go
Normal file
@@ -0,0 +1,838 @@
|
|||||||
|
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license
|
||||||
|
// that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||||
|
// versions:
|
||||||
|
// protoc-gen-go v1.25.0
|
||||||
|
// protoc v3.17.3
|
||||||
|
// source: asynq.proto
|
||||||
|
|
||||||
|
package proto
|
||||||
|
|
||||||
|
import (
|
||||||
|
proto "github.com/golang/protobuf/proto"
|
||||||
|
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||||
|
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||||
|
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
|
||||||
|
reflect "reflect"
|
||||||
|
sync "sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Verify that this generated code is sufficiently up-to-date.
|
||||||
|
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||||
|
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||||
|
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||||
|
)
|
||||||
|
|
||||||
|
// This is a compile-time assertion that a sufficiently up-to-date version
|
||||||
|
// of the legacy proto package is being used.
|
||||||
|
const _ = proto.ProtoPackageIsVersion4
|
||||||
|
|
||||||
|
// TaskMessage is the internal representation of a task with additional
|
||||||
|
// metadata fields.
|
||||||
|
type TaskMessage struct {
|
||||||
|
state protoimpl.MessageState
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
|
||||||
|
// Type indicates the kind of the task to be performed.
|
||||||
|
Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
|
||||||
|
// Payload holds data needed to process the task.
|
||||||
|
Payload []byte `protobuf:"bytes,2,opt,name=payload,proto3" json:"payload,omitempty"`
|
||||||
|
// Unique identifier for the task.
|
||||||
|
Id string `protobuf:"bytes,3,opt,name=id,proto3" json:"id,omitempty"`
|
||||||
|
// Name of the queue to which this task belongs.
|
||||||
|
Queue string `protobuf:"bytes,4,opt,name=queue,proto3" json:"queue,omitempty"`
|
||||||
|
// Max number of retries for this task.
|
||||||
|
Retry int32 `protobuf:"varint,5,opt,name=retry,proto3" json:"retry,omitempty"`
|
||||||
|
// Number of times this task has been retried so far.
|
||||||
|
Retried int32 `protobuf:"varint,6,opt,name=retried,proto3" json:"retried,omitempty"`
|
||||||
|
// Error message from the last failure.
|
||||||
|
ErrorMsg string `protobuf:"bytes,7,opt,name=error_msg,json=errorMsg,proto3" json:"error_msg,omitempty"`
|
||||||
|
// Time of last failure in Unix time,
|
||||||
|
// the number of seconds elapsed since January 1, 1970 UTC.
|
||||||
|
// Use zero to indicate no last failure.
|
||||||
|
LastFailedAt int64 `protobuf:"varint,11,opt,name=last_failed_at,json=lastFailedAt,proto3" json:"last_failed_at,omitempty"`
|
||||||
|
// Timeout specifies timeout in seconds.
|
||||||
|
// Use zero to indicate no timeout.
|
||||||
|
Timeout int64 `protobuf:"varint,8,opt,name=timeout,proto3" json:"timeout,omitempty"`
|
||||||
|
// Deadline specifies the deadline for the task in Unix time,
|
||||||
|
// the number of seconds elapsed since January 1, 1970 UTC.
|
||||||
|
// Use zero to indicate no deadline.
|
||||||
|
Deadline int64 `protobuf:"varint,9,opt,name=deadline,proto3" json:"deadline,omitempty"`
|
||||||
|
// UniqueKey holds the redis key used for uniqueness lock for this task.
|
||||||
|
// Empty string indicates that no uniqueness lock was used.
|
||||||
|
UniqueKey string `protobuf:"bytes,10,opt,name=unique_key,json=uniqueKey,proto3" json:"unique_key,omitempty"`
|
||||||
|
// Retention period specified in a number of seconds.
|
||||||
|
// The task will be stored in redis as a completed task until the TTL
|
||||||
|
// expires.
|
||||||
|
Retention int64 `protobuf:"varint,12,opt,name=retention,proto3" json:"retention,omitempty"`
|
||||||
|
// Time when the task completed in success in Unix time,
|
||||||
|
// the number of seconds elapsed since January 1, 1970 UTC.
|
||||||
|
// This field is populated if result_ttl > 0 upon completion.
|
||||||
|
CompletedAt int64 `protobuf:"varint,13,opt,name=completed_at,json=completedAt,proto3" json:"completed_at,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *TaskMessage) Reset() {
|
||||||
|
*x = TaskMessage{}
|
||||||
|
if protoimpl.UnsafeEnabled {
|
||||||
|
mi := &file_asynq_proto_msgTypes[0]
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *TaskMessage) String() string {
|
||||||
|
return protoimpl.X.MessageStringOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*TaskMessage) ProtoMessage() {}
|
||||||
|
|
||||||
|
func (x *TaskMessage) ProtoReflect() protoreflect.Message {
|
||||||
|
mi := &file_asynq_proto_msgTypes[0]
|
||||||
|
if protoimpl.UnsafeEnabled && x != nil {
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
if ms.LoadMessageInfo() == nil {
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
return ms
|
||||||
|
}
|
||||||
|
return mi.MessageOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Use TaskMessage.ProtoReflect.Descriptor instead.
|
||||||
|
func (*TaskMessage) Descriptor() ([]byte, []int) {
|
||||||
|
return file_asynq_proto_rawDescGZIP(), []int{0}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *TaskMessage) GetType() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.Type
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *TaskMessage) GetPayload() []byte {
|
||||||
|
if x != nil {
|
||||||
|
return x.Payload
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *TaskMessage) GetId() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.Id
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *TaskMessage) GetQueue() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.Queue
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *TaskMessage) GetRetry() int32 {
|
||||||
|
if x != nil {
|
||||||
|
return x.Retry
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *TaskMessage) GetRetried() int32 {
|
||||||
|
if x != nil {
|
||||||
|
return x.Retried
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *TaskMessage) GetErrorMsg() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.ErrorMsg
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *TaskMessage) GetLastFailedAt() int64 {
|
||||||
|
if x != nil {
|
||||||
|
return x.LastFailedAt
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *TaskMessage) GetTimeout() int64 {
|
||||||
|
if x != nil {
|
||||||
|
return x.Timeout
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *TaskMessage) GetDeadline() int64 {
|
||||||
|
if x != nil {
|
||||||
|
return x.Deadline
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *TaskMessage) GetUniqueKey() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.UniqueKey
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *TaskMessage) GetRetention() int64 {
|
||||||
|
if x != nil {
|
||||||
|
return x.Retention
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *TaskMessage) GetCompletedAt() int64 {
|
||||||
|
if x != nil {
|
||||||
|
return x.CompletedAt
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServerInfo holds information about a running server.
|
||||||
|
type ServerInfo struct {
|
||||||
|
state protoimpl.MessageState
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
|
||||||
|
// Host machine the server is running on.
|
||||||
|
Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"`
|
||||||
|
// PID of the server process.
|
||||||
|
Pid int32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"`
|
||||||
|
// Unique identifier for this server.
|
||||||
|
ServerId string `protobuf:"bytes,3,opt,name=server_id,json=serverId,proto3" json:"server_id,omitempty"`
|
||||||
|
// Maximum number of concurrency this server will use.
|
||||||
|
Concurrency int32 `protobuf:"varint,4,opt,name=concurrency,proto3" json:"concurrency,omitempty"`
|
||||||
|
// List of queue names with their priorities.
|
||||||
|
// The server will consume tasks from the queues and prioritize
|
||||||
|
// queues with higher priority numbers.
|
||||||
|
Queues map[string]int32 `protobuf:"bytes,5,rep,name=queues,proto3" json:"queues,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"`
|
||||||
|
// If set, the server will always consume tasks from a queue with higher
|
||||||
|
// priority.
|
||||||
|
StrictPriority bool `protobuf:"varint,6,opt,name=strict_priority,json=strictPriority,proto3" json:"strict_priority,omitempty"`
|
||||||
|
// Status indicates the status of the server.
|
||||||
|
Status string `protobuf:"bytes,7,opt,name=status,proto3" json:"status,omitempty"`
|
||||||
|
// Time this server was started.
|
||||||
|
StartTime *timestamppb.Timestamp `protobuf:"bytes,8,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
|
||||||
|
// Number of workers currently processing tasks.
|
||||||
|
ActiveWorkerCount int32 `protobuf:"varint,9,opt,name=active_worker_count,json=activeWorkerCount,proto3" json:"active_worker_count,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *ServerInfo) Reset() {
|
||||||
|
*x = ServerInfo{}
|
||||||
|
if protoimpl.UnsafeEnabled {
|
||||||
|
mi := &file_asynq_proto_msgTypes[1]
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *ServerInfo) String() string {
|
||||||
|
return protoimpl.X.MessageStringOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*ServerInfo) ProtoMessage() {}
|
||||||
|
|
||||||
|
func (x *ServerInfo) ProtoReflect() protoreflect.Message {
|
||||||
|
mi := &file_asynq_proto_msgTypes[1]
|
||||||
|
if protoimpl.UnsafeEnabled && x != nil {
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
if ms.LoadMessageInfo() == nil {
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
return ms
|
||||||
|
}
|
||||||
|
return mi.MessageOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Use ServerInfo.ProtoReflect.Descriptor instead.
|
||||||
|
func (*ServerInfo) Descriptor() ([]byte, []int) {
|
||||||
|
return file_asynq_proto_rawDescGZIP(), []int{1}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *ServerInfo) GetHost() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.Host
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *ServerInfo) GetPid() int32 {
|
||||||
|
if x != nil {
|
||||||
|
return x.Pid
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *ServerInfo) GetServerId() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.ServerId
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *ServerInfo) GetConcurrency() int32 {
|
||||||
|
if x != nil {
|
||||||
|
return x.Concurrency
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *ServerInfo) GetQueues() map[string]int32 {
|
||||||
|
if x != nil {
|
||||||
|
return x.Queues
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *ServerInfo) GetStrictPriority() bool {
|
||||||
|
if x != nil {
|
||||||
|
return x.StrictPriority
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *ServerInfo) GetStatus() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.Status
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *ServerInfo) GetStartTime() *timestamppb.Timestamp {
|
||||||
|
if x != nil {
|
||||||
|
return x.StartTime
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *ServerInfo) GetActiveWorkerCount() int32 {
|
||||||
|
if x != nil {
|
||||||
|
return x.ActiveWorkerCount
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// WorkerInfo holds information about a running worker.
|
||||||
|
type WorkerInfo struct {
|
||||||
|
state protoimpl.MessageState
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
|
||||||
|
// Host matchine this worker is running on.
|
||||||
|
Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"`
|
||||||
|
// PID of the process in which this worker is running.
|
||||||
|
Pid int32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"`
|
||||||
|
// ID of the server in which this worker is running.
|
||||||
|
ServerId string `protobuf:"bytes,3,opt,name=server_id,json=serverId,proto3" json:"server_id,omitempty"`
|
||||||
|
// ID of the task this worker is processing.
|
||||||
|
TaskId string `protobuf:"bytes,4,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"`
|
||||||
|
// Type of the task this worker is processing.
|
||||||
|
TaskType string `protobuf:"bytes,5,opt,name=task_type,json=taskType,proto3" json:"task_type,omitempty"`
|
||||||
|
// Payload of the task this worker is processing.
|
||||||
|
TaskPayload []byte `protobuf:"bytes,6,opt,name=task_payload,json=taskPayload,proto3" json:"task_payload,omitempty"`
|
||||||
|
// Name of the queue the task the worker is processing belongs.
|
||||||
|
Queue string `protobuf:"bytes,7,opt,name=queue,proto3" json:"queue,omitempty"`
|
||||||
|
// Time this worker started processing the task.
|
||||||
|
StartTime *timestamppb.Timestamp `protobuf:"bytes,8,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
|
||||||
|
// Deadline by which the worker needs to complete processing
|
||||||
|
// the task. If worker exceeds the deadline, the task will fail.
|
||||||
|
Deadline *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=deadline,proto3" json:"deadline,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *WorkerInfo) Reset() {
|
||||||
|
*x = WorkerInfo{}
|
||||||
|
if protoimpl.UnsafeEnabled {
|
||||||
|
mi := &file_asynq_proto_msgTypes[2]
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *WorkerInfo) String() string {
|
||||||
|
return protoimpl.X.MessageStringOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*WorkerInfo) ProtoMessage() {}
|
||||||
|
|
||||||
|
func (x *WorkerInfo) ProtoReflect() protoreflect.Message {
|
||||||
|
mi := &file_asynq_proto_msgTypes[2]
|
||||||
|
if protoimpl.UnsafeEnabled && x != nil {
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
if ms.LoadMessageInfo() == nil {
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
return ms
|
||||||
|
}
|
||||||
|
return mi.MessageOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Use WorkerInfo.ProtoReflect.Descriptor instead.
|
||||||
|
func (*WorkerInfo) Descriptor() ([]byte, []int) {
|
||||||
|
return file_asynq_proto_rawDescGZIP(), []int{2}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *WorkerInfo) GetHost() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.Host
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *WorkerInfo) GetPid() int32 {
|
||||||
|
if x != nil {
|
||||||
|
return x.Pid
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *WorkerInfo) GetServerId() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.ServerId
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *WorkerInfo) GetTaskId() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.TaskId
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *WorkerInfo) GetTaskType() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.TaskType
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *WorkerInfo) GetTaskPayload() []byte {
|
||||||
|
if x != nil {
|
||||||
|
return x.TaskPayload
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *WorkerInfo) GetQueue() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.Queue
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *WorkerInfo) GetStartTime() *timestamppb.Timestamp {
|
||||||
|
if x != nil {
|
||||||
|
return x.StartTime
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *WorkerInfo) GetDeadline() *timestamppb.Timestamp {
|
||||||
|
if x != nil {
|
||||||
|
return x.Deadline
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SchedulerEntry holds information about a periodic task registered
|
||||||
|
// with a scheduler.
|
||||||
|
type SchedulerEntry struct {
|
||||||
|
state protoimpl.MessageState
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
|
||||||
|
// Identifier of the scheduler entry.
|
||||||
|
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||||
|
// Periodic schedule spec of the entry.
|
||||||
|
Spec string `protobuf:"bytes,2,opt,name=spec,proto3" json:"spec,omitempty"`
|
||||||
|
// Task type of the periodic task.
|
||||||
|
TaskType string `protobuf:"bytes,3,opt,name=task_type,json=taskType,proto3" json:"task_type,omitempty"`
|
||||||
|
// Task payload of the periodic task.
|
||||||
|
TaskPayload []byte `protobuf:"bytes,4,opt,name=task_payload,json=taskPayload,proto3" json:"task_payload,omitempty"`
|
||||||
|
// Options used to enqueue the periodic task.
|
||||||
|
EnqueueOptions []string `protobuf:"bytes,5,rep,name=enqueue_options,json=enqueueOptions,proto3" json:"enqueue_options,omitempty"`
|
||||||
|
// Next time the task will be enqueued.
|
||||||
|
NextEnqueueTime *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=next_enqueue_time,json=nextEnqueueTime,proto3" json:"next_enqueue_time,omitempty"`
|
||||||
|
// Last time the task was enqueued.
|
||||||
|
// Zero time if task was never enqueued.
|
||||||
|
PrevEnqueueTime *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=prev_enqueue_time,json=prevEnqueueTime,proto3" json:"prev_enqueue_time,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *SchedulerEntry) Reset() {
|
||||||
|
*x = SchedulerEntry{}
|
||||||
|
if protoimpl.UnsafeEnabled {
|
||||||
|
mi := &file_asynq_proto_msgTypes[3]
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *SchedulerEntry) String() string {
|
||||||
|
return protoimpl.X.MessageStringOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*SchedulerEntry) ProtoMessage() {}
|
||||||
|
|
||||||
|
func (x *SchedulerEntry) ProtoReflect() protoreflect.Message {
|
||||||
|
mi := &file_asynq_proto_msgTypes[3]
|
||||||
|
if protoimpl.UnsafeEnabled && x != nil {
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
if ms.LoadMessageInfo() == nil {
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
return ms
|
||||||
|
}
|
||||||
|
return mi.MessageOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Use SchedulerEntry.ProtoReflect.Descriptor instead.
|
||||||
|
func (*SchedulerEntry) Descriptor() ([]byte, []int) {
|
||||||
|
return file_asynq_proto_rawDescGZIP(), []int{3}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *SchedulerEntry) GetId() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.Id
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *SchedulerEntry) GetSpec() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.Spec
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *SchedulerEntry) GetTaskType() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.TaskType
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *SchedulerEntry) GetTaskPayload() []byte {
|
||||||
|
if x != nil {
|
||||||
|
return x.TaskPayload
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *SchedulerEntry) GetEnqueueOptions() []string {
|
||||||
|
if x != nil {
|
||||||
|
return x.EnqueueOptions
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *SchedulerEntry) GetNextEnqueueTime() *timestamppb.Timestamp {
|
||||||
|
if x != nil {
|
||||||
|
return x.NextEnqueueTime
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *SchedulerEntry) GetPrevEnqueueTime() *timestamppb.Timestamp {
|
||||||
|
if x != nil {
|
||||||
|
return x.PrevEnqueueTime
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SchedulerEnqueueEvent holds information about an enqueue event
|
||||||
|
// by a scheduler.
|
||||||
|
type SchedulerEnqueueEvent struct {
|
||||||
|
state protoimpl.MessageState
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
|
||||||
|
// ID of the task that was enqueued.
|
||||||
|
TaskId string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"`
|
||||||
|
// Time the task was enqueued.
|
||||||
|
EnqueueTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=enqueue_time,json=enqueueTime,proto3" json:"enqueue_time,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *SchedulerEnqueueEvent) Reset() {
|
||||||
|
*x = SchedulerEnqueueEvent{}
|
||||||
|
if protoimpl.UnsafeEnabled {
|
||||||
|
mi := &file_asynq_proto_msgTypes[4]
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *SchedulerEnqueueEvent) String() string {
|
||||||
|
return protoimpl.X.MessageStringOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*SchedulerEnqueueEvent) ProtoMessage() {}
|
||||||
|
|
||||||
|
func (x *SchedulerEnqueueEvent) ProtoReflect() protoreflect.Message {
|
||||||
|
mi := &file_asynq_proto_msgTypes[4]
|
||||||
|
if protoimpl.UnsafeEnabled && x != nil {
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
if ms.LoadMessageInfo() == nil {
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
return ms
|
||||||
|
}
|
||||||
|
return mi.MessageOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Use SchedulerEnqueueEvent.ProtoReflect.Descriptor instead.
|
||||||
|
func (*SchedulerEnqueueEvent) Descriptor() ([]byte, []int) {
|
||||||
|
return file_asynq_proto_rawDescGZIP(), []int{4}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *SchedulerEnqueueEvent) GetTaskId() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.TaskId
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *SchedulerEnqueueEvent) GetEnqueueTime() *timestamppb.Timestamp {
|
||||||
|
if x != nil {
|
||||||
|
return x.EnqueueTime
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var File_asynq_proto protoreflect.FileDescriptor
|
||||||
|
|
||||||
|
var file_asynq_proto_rawDesc = []byte{
|
||||||
|
0x0a, 0x0b, 0x61, 0x73, 0x79, 0x6e, 0x71, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x61,
|
||||||
|
0x73, 0x79, 0x6e, 0x71, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f,
|
||||||
|
0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e,
|
||||||
|
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xea, 0x02, 0x0a, 0x0b, 0x54, 0x61, 0x73, 0x6b, 0x4d, 0x65,
|
||||||
|
0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20,
|
||||||
|
0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79,
|
||||||
|
0x6c, 0x6f, 0x61, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c,
|
||||||
|
0x6f, 0x61, 0x64, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
|
||||||
|
0x02, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01,
|
||||||
|
0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x72, 0x65, 0x74,
|
||||||
|
0x72, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x72, 0x65, 0x74, 0x72, 0x79, 0x12,
|
||||||
|
0x18, 0x0a, 0x07, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x05,
|
||||||
|
0x52, 0x07, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x72, 0x72,
|
||||||
|
0x6f, 0x72, 0x5f, 0x6d, 0x73, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x72,
|
||||||
|
0x72, 0x6f, 0x72, 0x4d, 0x73, 0x67, 0x12, 0x24, 0x0a, 0x0e, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x66,
|
||||||
|
0x61, 0x69, 0x6c, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c,
|
||||||
|
0x6c, 0x61, 0x73, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x41, 0x74, 0x12, 0x18, 0x0a, 0x07,
|
||||||
|
0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x74,
|
||||||
|
0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69,
|
||||||
|
0x6e, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69,
|
||||||
|
0x6e, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x5f, 0x6b, 0x65, 0x79,
|
||||||
|
0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x4b, 0x65,
|
||||||
|
0x79, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0c,
|
||||||
|
0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12,
|
||||||
|
0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18,
|
||||||
|
0x0d, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64,
|
||||||
|
0x41, 0x74, 0x22, 0x8f, 0x03, 0x0a, 0x0a, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x49, 0x6e, 0x66,
|
||||||
|
0x6f, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
|
||||||
|
0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01,
|
||||||
|
0x28, 0x05, 0x52, 0x03, 0x70, 0x69, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x65, 0x72, 0x76, 0x65,
|
||||||
|
0x72, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x72, 0x76,
|
||||||
|
0x65, 0x72, 0x49, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65,
|
||||||
|
0x6e, 0x63, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75,
|
||||||
|
0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x35, 0x0a, 0x06, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73,
|
||||||
|
0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x61, 0x73, 0x79, 0x6e, 0x71, 0x2e, 0x53,
|
||||||
|
0x65, 0x72, 0x76, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x51, 0x75, 0x65, 0x75, 0x65, 0x73,
|
||||||
|
0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x12, 0x27, 0x0a,
|
||||||
|
0x0f, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x5f, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79,
|
||||||
|
0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x50, 0x72,
|
||||||
|
0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73,
|
||||||
|
0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x39,
|
||||||
|
0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01,
|
||||||
|
0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
|
||||||
|
0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09,
|
||||||
|
0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x13, 0x61, 0x63, 0x74,
|
||||||
|
0x69, 0x76, 0x65, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74,
|
||||||
|
0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x11, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x57, 0x6f,
|
||||||
|
0x72, 0x6b, 0x65, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x1a, 0x39, 0x0a, 0x0b, 0x51, 0x75, 0x65,
|
||||||
|
0x75, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18,
|
||||||
|
0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61,
|
||||||
|
0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
|
||||||
|
0x3a, 0x02, 0x38, 0x01, 0x22, 0xb1, 0x02, 0x0a, 0x0a, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49,
|
||||||
|
0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28,
|
||||||
|
0x09, 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x02,
|
||||||
|
0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x70, 0x69, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x65, 0x72,
|
||||||
|
0x76, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65,
|
||||||
|
0x72, 0x76, 0x65, 0x72, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69,
|
||||||
|
0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12,
|
||||||
|
0x1b, 0x0a, 0x09, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01,
|
||||||
|
0x28, 0x09, 0x52, 0x08, 0x74, 0x61, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x12, 0x21, 0x0a, 0x0c,
|
||||||
|
0x74, 0x61, 0x73, 0x6b, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x06, 0x20, 0x01,
|
||||||
|
0x28, 0x0c, 0x52, 0x0b, 0x74, 0x61, 0x73, 0x6b, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12,
|
||||||
|
0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05,
|
||||||
|
0x71, 0x75, 0x65, 0x75, 0x65, 0x12, 0x39, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74,
|
||||||
|
0x69, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
|
||||||
|
0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65,
|
||||||
|
0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65,
|
||||||
|
0x12, 0x36, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x09, 0x20, 0x01,
|
||||||
|
0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
|
||||||
|
0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08,
|
||||||
|
0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x22, 0xad, 0x02, 0x0a, 0x0e, 0x53, 0x63, 0x68,
|
||||||
|
0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x69,
|
||||||
|
0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x73,
|
||||||
|
0x70, 0x65, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x73, 0x70, 0x65, 0x63, 0x12,
|
||||||
|
0x1b, 0x0a, 0x09, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01,
|
||||||
|
0x28, 0x09, 0x52, 0x08, 0x74, 0x61, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x12, 0x21, 0x0a, 0x0c,
|
||||||
|
0x74, 0x61, 0x73, 0x6b, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x04, 0x20, 0x01,
|
||||||
|
0x28, 0x0c, 0x52, 0x0b, 0x74, 0x61, 0x73, 0x6b, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12,
|
||||||
|
0x27, 0x0a, 0x0f, 0x65, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f,
|
||||||
|
0x6e, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x65, 0x6e, 0x71, 0x75, 0x65, 0x75,
|
||||||
|
0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x46, 0x0a, 0x11, 0x6e, 0x65, 0x78, 0x74,
|
||||||
|
0x5f, 0x65, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20,
|
||||||
|
0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
|
||||||
|
0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52,
|
||||||
|
0x0f, 0x6e, 0x65, 0x78, 0x74, 0x45, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x54, 0x69, 0x6d, 0x65,
|
||||||
|
0x12, 0x46, 0x0a, 0x11, 0x70, 0x72, 0x65, 0x76, 0x5f, 0x65, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65,
|
||||||
|
0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
|
||||||
|
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69,
|
||||||
|
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0f, 0x70, 0x72, 0x65, 0x76, 0x45, 0x6e, 0x71,
|
||||||
|
0x75, 0x65, 0x75, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x22, 0x6f, 0x0a, 0x15, 0x53, 0x63, 0x68, 0x65,
|
||||||
|
0x64, 0x75, 0x6c, 0x65, 0x72, 0x45, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x45, 0x76, 0x65, 0x6e,
|
||||||
|
0x74, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01,
|
||||||
|
0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12, 0x3d, 0x0a, 0x0c, 0x65, 0x6e,
|
||||||
|
0x71, 0x75, 0x65, 0x75, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
|
||||||
|
0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
|
||||||
|
0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x65, 0x6e,
|
||||||
|
0x71, 0x75, 0x65, 0x75, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74,
|
||||||
|
0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x69, 0x62, 0x69, 0x6b, 0x65, 0x6e, 0x2f,
|
||||||
|
0x61, 0x73, 0x79, 0x6e, 0x71, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70,
|
||||||
|
0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
file_asynq_proto_rawDescOnce sync.Once
|
||||||
|
file_asynq_proto_rawDescData = file_asynq_proto_rawDesc
|
||||||
|
)
|
||||||
|
|
||||||
|
func file_asynq_proto_rawDescGZIP() []byte {
|
||||||
|
file_asynq_proto_rawDescOnce.Do(func() {
|
||||||
|
file_asynq_proto_rawDescData = protoimpl.X.CompressGZIP(file_asynq_proto_rawDescData)
|
||||||
|
})
|
||||||
|
return file_asynq_proto_rawDescData
|
||||||
|
}
|
||||||
|
|
||||||
|
var file_asynq_proto_msgTypes = make([]protoimpl.MessageInfo, 6)
|
||||||
|
var file_asynq_proto_goTypes = []interface{}{
|
||||||
|
(*TaskMessage)(nil), // 0: asynq.TaskMessage
|
||||||
|
(*ServerInfo)(nil), // 1: asynq.ServerInfo
|
||||||
|
(*WorkerInfo)(nil), // 2: asynq.WorkerInfo
|
||||||
|
(*SchedulerEntry)(nil), // 3: asynq.SchedulerEntry
|
||||||
|
(*SchedulerEnqueueEvent)(nil), // 4: asynq.SchedulerEnqueueEvent
|
||||||
|
nil, // 5: asynq.ServerInfo.QueuesEntry
|
||||||
|
(*timestamppb.Timestamp)(nil), // 6: google.protobuf.Timestamp
|
||||||
|
}
|
||||||
|
var file_asynq_proto_depIdxs = []int32{
|
||||||
|
5, // 0: asynq.ServerInfo.queues:type_name -> asynq.ServerInfo.QueuesEntry
|
||||||
|
6, // 1: asynq.ServerInfo.start_time:type_name -> google.protobuf.Timestamp
|
||||||
|
6, // 2: asynq.WorkerInfo.start_time:type_name -> google.protobuf.Timestamp
|
||||||
|
6, // 3: asynq.WorkerInfo.deadline:type_name -> google.protobuf.Timestamp
|
||||||
|
6, // 4: asynq.SchedulerEntry.next_enqueue_time:type_name -> google.protobuf.Timestamp
|
||||||
|
6, // 5: asynq.SchedulerEntry.prev_enqueue_time:type_name -> google.protobuf.Timestamp
|
||||||
|
6, // 6: asynq.SchedulerEnqueueEvent.enqueue_time:type_name -> google.protobuf.Timestamp
|
||||||
|
7, // [7:7] is the sub-list for method output_type
|
||||||
|
7, // [7:7] is the sub-list for method input_type
|
||||||
|
7, // [7:7] is the sub-list for extension type_name
|
||||||
|
7, // [7:7] is the sub-list for extension extendee
|
||||||
|
0, // [0:7] is the sub-list for field type_name
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() { file_asynq_proto_init() }
|
||||||
|
func file_asynq_proto_init() {
|
||||||
|
if File_asynq_proto != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if !protoimpl.UnsafeEnabled {
|
||||||
|
file_asynq_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||||
|
switch v := v.(*TaskMessage); i {
|
||||||
|
case 0:
|
||||||
|
return &v.state
|
||||||
|
case 1:
|
||||||
|
return &v.sizeCache
|
||||||
|
case 2:
|
||||||
|
return &v.unknownFields
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
file_asynq_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||||
|
switch v := v.(*ServerInfo); i {
|
||||||
|
case 0:
|
||||||
|
return &v.state
|
||||||
|
case 1:
|
||||||
|
return &v.sizeCache
|
||||||
|
case 2:
|
||||||
|
return &v.unknownFields
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
file_asynq_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||||
|
switch v := v.(*WorkerInfo); i {
|
||||||
|
case 0:
|
||||||
|
return &v.state
|
||||||
|
case 1:
|
||||||
|
return &v.sizeCache
|
||||||
|
case 2:
|
||||||
|
return &v.unknownFields
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
file_asynq_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
|
||||||
|
switch v := v.(*SchedulerEntry); i {
|
||||||
|
case 0:
|
||||||
|
return &v.state
|
||||||
|
case 1:
|
||||||
|
return &v.sizeCache
|
||||||
|
case 2:
|
||||||
|
return &v.unknownFields
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
file_asynq_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
|
||||||
|
switch v := v.(*SchedulerEnqueueEvent); i {
|
||||||
|
case 0:
|
||||||
|
return &v.state
|
||||||
|
case 1:
|
||||||
|
return &v.sizeCache
|
||||||
|
case 2:
|
||||||
|
return &v.unknownFields
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
type x struct{}
|
||||||
|
out := protoimpl.TypeBuilder{
|
||||||
|
File: protoimpl.DescBuilder{
|
||||||
|
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||||
|
RawDescriptor: file_asynq_proto_rawDesc,
|
||||||
|
NumEnums: 0,
|
||||||
|
NumMessages: 6,
|
||||||
|
NumExtensions: 0,
|
||||||
|
NumServices: 0,
|
||||||
|
},
|
||||||
|
GoTypes: file_asynq_proto_goTypes,
|
||||||
|
DependencyIndexes: file_asynq_proto_depIdxs,
|
||||||
|
MessageInfos: file_asynq_proto_msgTypes,
|
||||||
|
}.Build()
|
||||||
|
File_asynq_proto = out.File
|
||||||
|
file_asynq_proto_rawDesc = nil
|
||||||
|
file_asynq_proto_goTypes = nil
|
||||||
|
file_asynq_proto_depIdxs = nil
|
||||||
|
}
|
||||||
163
internal/proto/asynq.proto
Normal file
163
internal/proto/asynq.proto
Normal file
@@ -0,0 +1,163 @@
|
|||||||
|
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license
|
||||||
|
// that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
syntax = "proto3";
|
||||||
|
package asynq;
|
||||||
|
|
||||||
|
import "google/protobuf/timestamp.proto";
|
||||||
|
|
||||||
|
option go_package = "github.com/hibiken/asynq/internal/proto";
|
||||||
|
|
||||||
|
// TaskMessage is the internal representation of a task with additional
|
||||||
|
// metadata fields.
|
||||||
|
message TaskMessage {
|
||||||
|
// Type indicates the kind of the task to be performed.
|
||||||
|
string type = 1;
|
||||||
|
|
||||||
|
// Payload holds data needed to process the task.
|
||||||
|
bytes payload = 2;
|
||||||
|
|
||||||
|
// Unique identifier for the task.
|
||||||
|
string id = 3;
|
||||||
|
|
||||||
|
// Name of the queue to which this task belongs.
|
||||||
|
string queue = 4;
|
||||||
|
|
||||||
|
// Max number of retries for this task.
|
||||||
|
int32 retry = 5;
|
||||||
|
|
||||||
|
// Number of times this task has been retried so far.
|
||||||
|
int32 retried = 6;
|
||||||
|
|
||||||
|
// Error message from the last failure.
|
||||||
|
string error_msg = 7;
|
||||||
|
|
||||||
|
// Time of last failure in Unix time,
|
||||||
|
// the number of seconds elapsed since January 1, 1970 UTC.
|
||||||
|
// Use zero to indicate no last failure.
|
||||||
|
int64 last_failed_at = 11;
|
||||||
|
|
||||||
|
// Timeout specifies timeout in seconds.
|
||||||
|
// Use zero to indicate no timeout.
|
||||||
|
int64 timeout = 8;
|
||||||
|
|
||||||
|
// Deadline specifies the deadline for the task in Unix time,
|
||||||
|
// the number of seconds elapsed since January 1, 1970 UTC.
|
||||||
|
// Use zero to indicate no deadline.
|
||||||
|
int64 deadline = 9;
|
||||||
|
|
||||||
|
// UniqueKey holds the redis key used for uniqueness lock for this task.
|
||||||
|
// Empty string indicates that no uniqueness lock was used.
|
||||||
|
string unique_key = 10;
|
||||||
|
|
||||||
|
// Retention period specified in a number of seconds.
|
||||||
|
// The task will be stored in redis as a completed task until the TTL
|
||||||
|
// expires.
|
||||||
|
int64 retention = 12;
|
||||||
|
|
||||||
|
// Time when the task completed in success in Unix time,
|
||||||
|
// the number of seconds elapsed since January 1, 1970 UTC.
|
||||||
|
// This field is populated if result_ttl > 0 upon completion.
|
||||||
|
int64 completed_at = 13;
|
||||||
|
};
|
||||||
|
|
||||||
|
// ServerInfo holds information about a running server.
|
||||||
|
message ServerInfo {
|
||||||
|
// Host machine the server is running on.
|
||||||
|
string host = 1;
|
||||||
|
|
||||||
|
// PID of the server process.
|
||||||
|
int32 pid = 2;
|
||||||
|
|
||||||
|
// Unique identifier for this server.
|
||||||
|
string server_id = 3;
|
||||||
|
|
||||||
|
// Maximum number of concurrency this server will use.
|
||||||
|
int32 concurrency = 4;
|
||||||
|
|
||||||
|
// List of queue names with their priorities.
|
||||||
|
// The server will consume tasks from the queues and prioritize
|
||||||
|
// queues with higher priority numbers.
|
||||||
|
map<string, int32> queues = 5;
|
||||||
|
|
||||||
|
// If set, the server will always consume tasks from a queue with higher
|
||||||
|
// priority.
|
||||||
|
bool strict_priority = 6;
|
||||||
|
|
||||||
|
// Status indicates the status of the server.
|
||||||
|
string status = 7;
|
||||||
|
|
||||||
|
// Time this server was started.
|
||||||
|
google.protobuf.Timestamp start_time = 8;
|
||||||
|
|
||||||
|
// Number of workers currently processing tasks.
|
||||||
|
int32 active_worker_count = 9;
|
||||||
|
};
|
||||||
|
|
||||||
|
// WorkerInfo holds information about a running worker.
|
||||||
|
message WorkerInfo {
|
||||||
|
// Host matchine this worker is running on.
|
||||||
|
string host = 1;
|
||||||
|
|
||||||
|
// PID of the process in which this worker is running.
|
||||||
|
int32 pid = 2;
|
||||||
|
|
||||||
|
// ID of the server in which this worker is running.
|
||||||
|
string server_id = 3;
|
||||||
|
|
||||||
|
// ID of the task this worker is processing.
|
||||||
|
string task_id = 4;
|
||||||
|
|
||||||
|
// Type of the task this worker is processing.
|
||||||
|
string task_type = 5;
|
||||||
|
|
||||||
|
// Payload of the task this worker is processing.
|
||||||
|
bytes task_payload = 6;
|
||||||
|
|
||||||
|
// Name of the queue the task the worker is processing belongs.
|
||||||
|
string queue = 7;
|
||||||
|
|
||||||
|
// Time this worker started processing the task.
|
||||||
|
google.protobuf.Timestamp start_time = 8;
|
||||||
|
|
||||||
|
// Deadline by which the worker needs to complete processing
|
||||||
|
// the task. If worker exceeds the deadline, the task will fail.
|
||||||
|
google.protobuf.Timestamp deadline = 9;
|
||||||
|
};
|
||||||
|
|
||||||
|
// SchedulerEntry holds information about a periodic task registered
|
||||||
|
// with a scheduler.
|
||||||
|
message SchedulerEntry {
|
||||||
|
// Identifier of the scheduler entry.
|
||||||
|
string id = 1;
|
||||||
|
|
||||||
|
// Periodic schedule spec of the entry.
|
||||||
|
string spec = 2;
|
||||||
|
|
||||||
|
// Task type of the periodic task.
|
||||||
|
string task_type = 3;
|
||||||
|
|
||||||
|
// Task payload of the periodic task.
|
||||||
|
bytes task_payload = 4;
|
||||||
|
|
||||||
|
// Options used to enqueue the periodic task.
|
||||||
|
repeated string enqueue_options = 5;
|
||||||
|
|
||||||
|
// Next time the task will be enqueued.
|
||||||
|
google.protobuf.Timestamp next_enqueue_time = 6;
|
||||||
|
|
||||||
|
// Last time the task was enqueued.
|
||||||
|
// Zero time if task was never enqueued.
|
||||||
|
google.protobuf.Timestamp prev_enqueue_time = 7;
|
||||||
|
};
|
||||||
|
|
||||||
|
// SchedulerEnqueueEvent holds information about an enqueue event
|
||||||
|
// by a scheduler.
|
||||||
|
message SchedulerEnqueueEvent {
|
||||||
|
// ID of the task that was enqueued.
|
||||||
|
string task_id = 1;
|
||||||
|
|
||||||
|
// Time the task was enqueued.
|
||||||
|
google.protobuf.Timestamp enqueue_time = 2;
|
||||||
|
};
|
||||||
@@ -5,37 +5,273 @@
|
|||||||
package rdb
|
package rdb
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/go-redis/redis/v7"
|
"github.com/hibiken/asynq/internal/asynqtest"
|
||||||
h "github.com/hibiken/asynq/internal/asynqtest"
|
|
||||||
"github.com/hibiken/asynq/internal/base"
|
"github.com/hibiken/asynq/internal/base"
|
||||||
)
|
)
|
||||||
|
|
||||||
func BenchmarkDone(b *testing.B) {
|
func BenchmarkEnqueue(b *testing.B) {
|
||||||
r := redis.NewClient(&redis.Options{
|
r := setup(b)
|
||||||
Addr: "localhost:6379",
|
ctx := context.Background()
|
||||||
DB: 8,
|
msg := asynqtest.NewTaskMessage("task1", nil)
|
||||||
})
|
|
||||||
h.FlushDB(b, r)
|
|
||||||
|
|
||||||
// populate in-progress queue with messages
|
|
||||||
var inProgress []*base.TaskMessage
|
|
||||||
for i := 0; i < 40; i++ {
|
|
||||||
inProgress = append(inProgress,
|
|
||||||
h.NewTaskMessage("send_email", map[string]interface{}{"subject": "hello", "recipient_id": 123}))
|
|
||||||
}
|
|
||||||
h.SeedInProgressQueue(b, r, inProgress)
|
|
||||||
|
|
||||||
rdb := NewRDB(r)
|
|
||||||
|
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
for n := 0; n < b.N; n++ {
|
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
b.StopTimer()
|
b.StopTimer()
|
||||||
msg := h.NewTaskMessage("reindex", map[string]interface{}{"config": "path/to/config/file"})
|
asynqtest.FlushDB(b, r.client)
|
||||||
r.LPush(base.InProgressQueue, h.MustMarshal(b, msg))
|
|
||||||
b.StartTimer()
|
b.StartTimer()
|
||||||
|
|
||||||
rdb.Done(msg)
|
if err := r.Enqueue(ctx, msg); err != nil {
|
||||||
|
b.Fatalf("Enqueue failed: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkEnqueueUnique(b *testing.B) {
|
||||||
|
r := setup(b)
|
||||||
|
ctx := context.Background()
|
||||||
|
msg := &base.TaskMessage{
|
||||||
|
Type: "task1",
|
||||||
|
Payload: nil,
|
||||||
|
Queue: base.DefaultQueueName,
|
||||||
|
UniqueKey: base.UniqueKey("default", "task1", nil),
|
||||||
|
}
|
||||||
|
uniqueTTL := 5 * time.Minute
|
||||||
|
b.ResetTimer()
|
||||||
|
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
b.StopTimer()
|
||||||
|
asynqtest.FlushDB(b, r.client)
|
||||||
|
b.StartTimer()
|
||||||
|
|
||||||
|
if err := r.EnqueueUnique(ctx, msg, uniqueTTL); err != nil {
|
||||||
|
b.Fatalf("EnqueueUnique failed: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkSchedule(b *testing.B) {
|
||||||
|
r := setup(b)
|
||||||
|
ctx := context.Background()
|
||||||
|
msg := asynqtest.NewTaskMessage("task1", nil)
|
||||||
|
processAt := time.Now().Add(3 * time.Minute)
|
||||||
|
b.ResetTimer()
|
||||||
|
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
b.StopTimer()
|
||||||
|
asynqtest.FlushDB(b, r.client)
|
||||||
|
b.StartTimer()
|
||||||
|
|
||||||
|
if err := r.Schedule(ctx, msg, processAt); err != nil {
|
||||||
|
b.Fatalf("Schedule failed: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkScheduleUnique(b *testing.B) {
|
||||||
|
r := setup(b)
|
||||||
|
ctx := context.Background()
|
||||||
|
msg := &base.TaskMessage{
|
||||||
|
Type: "task1",
|
||||||
|
Payload: nil,
|
||||||
|
Queue: base.DefaultQueueName,
|
||||||
|
UniqueKey: base.UniqueKey("default", "task1", nil),
|
||||||
|
}
|
||||||
|
processAt := time.Now().Add(3 * time.Minute)
|
||||||
|
uniqueTTL := 5 * time.Minute
|
||||||
|
b.ResetTimer()
|
||||||
|
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
b.StopTimer()
|
||||||
|
asynqtest.FlushDB(b, r.client)
|
||||||
|
b.StartTimer()
|
||||||
|
|
||||||
|
if err := r.ScheduleUnique(ctx, msg, processAt, uniqueTTL); err != nil {
|
||||||
|
b.Fatalf("EnqueueUnique failed: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkDequeueSingleQueue(b *testing.B) {
|
||||||
|
r := setup(b)
|
||||||
|
ctx := context.Background()
|
||||||
|
b.ResetTimer()
|
||||||
|
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
b.StopTimer()
|
||||||
|
asynqtest.FlushDB(b, r.client)
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
m := asynqtest.NewTaskMessageWithQueue(
|
||||||
|
fmt.Sprintf("task%d", i), nil, base.DefaultQueueName)
|
||||||
|
if err := r.Enqueue(ctx, m); err != nil {
|
||||||
|
b.Fatalf("Enqueue failed: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
b.StartTimer()
|
||||||
|
|
||||||
|
if _, _, err := r.Dequeue(base.DefaultQueueName); err != nil {
|
||||||
|
b.Fatalf("Dequeue failed: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkDequeueMultipleQueues(b *testing.B) {
|
||||||
|
qnames := []string{"critical", "default", "low"}
|
||||||
|
r := setup(b)
|
||||||
|
ctx := context.Background()
|
||||||
|
b.ResetTimer()
|
||||||
|
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
b.StopTimer()
|
||||||
|
asynqtest.FlushDB(b, r.client)
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
for _, qname := range qnames {
|
||||||
|
m := asynqtest.NewTaskMessageWithQueue(
|
||||||
|
fmt.Sprintf("%s_task%d", qname, i), nil, qname)
|
||||||
|
if err := r.Enqueue(ctx, m); err != nil {
|
||||||
|
b.Fatalf("Enqueue failed: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
b.StartTimer()
|
||||||
|
|
||||||
|
if _, _, err := r.Dequeue(qnames...); err != nil {
|
||||||
|
b.Fatalf("Dequeue failed: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkDone(b *testing.B) {
|
||||||
|
r := setup(b)
|
||||||
|
m1 := asynqtest.NewTaskMessage("task1", nil)
|
||||||
|
m2 := asynqtest.NewTaskMessage("task2", nil)
|
||||||
|
m3 := asynqtest.NewTaskMessage("task3", nil)
|
||||||
|
msgs := []*base.TaskMessage{m1, m2, m3}
|
||||||
|
zs := []base.Z{
|
||||||
|
{Message: m1, Score: time.Now().Add(10 * time.Second).Unix()},
|
||||||
|
{Message: m2, Score: time.Now().Add(20 * time.Second).Unix()},
|
||||||
|
{Message: m3, Score: time.Now().Add(30 * time.Second).Unix()},
|
||||||
|
}
|
||||||
|
ctx := context.Background()
|
||||||
|
b.ResetTimer()
|
||||||
|
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
b.StopTimer()
|
||||||
|
asynqtest.FlushDB(b, r.client)
|
||||||
|
asynqtest.SeedActiveQueue(b, r.client, msgs, base.DefaultQueueName)
|
||||||
|
asynqtest.SeedLease(b, r.client, zs, base.DefaultQueueName)
|
||||||
|
b.StartTimer()
|
||||||
|
|
||||||
|
if err := r.Done(ctx, msgs[0]); err != nil {
|
||||||
|
b.Fatalf("Done failed: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkRetry(b *testing.B) {
|
||||||
|
r := setup(b)
|
||||||
|
m1 := asynqtest.NewTaskMessage("task1", nil)
|
||||||
|
m2 := asynqtest.NewTaskMessage("task2", nil)
|
||||||
|
m3 := asynqtest.NewTaskMessage("task3", nil)
|
||||||
|
msgs := []*base.TaskMessage{m1, m2, m3}
|
||||||
|
zs := []base.Z{
|
||||||
|
{Message: m1, Score: time.Now().Add(10 * time.Second).Unix()},
|
||||||
|
{Message: m2, Score: time.Now().Add(20 * time.Second).Unix()},
|
||||||
|
{Message: m3, Score: time.Now().Add(30 * time.Second).Unix()},
|
||||||
|
}
|
||||||
|
ctx := context.Background()
|
||||||
|
b.ResetTimer()
|
||||||
|
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
b.StopTimer()
|
||||||
|
asynqtest.FlushDB(b, r.client)
|
||||||
|
asynqtest.SeedActiveQueue(b, r.client, msgs, base.DefaultQueueName)
|
||||||
|
asynqtest.SeedLease(b, r.client, zs, base.DefaultQueueName)
|
||||||
|
b.StartTimer()
|
||||||
|
|
||||||
|
if err := r.Retry(ctx, msgs[0], time.Now().Add(1*time.Minute), "error", true /*isFailure*/); err != nil {
|
||||||
|
b.Fatalf("Retry failed: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkArchive(b *testing.B) {
|
||||||
|
r := setup(b)
|
||||||
|
m1 := asynqtest.NewTaskMessage("task1", nil)
|
||||||
|
m2 := asynqtest.NewTaskMessage("task2", nil)
|
||||||
|
m3 := asynqtest.NewTaskMessage("task3", nil)
|
||||||
|
msgs := []*base.TaskMessage{m1, m2, m3}
|
||||||
|
zs := []base.Z{
|
||||||
|
{Message: m1, Score: time.Now().Add(10 * time.Second).Unix()},
|
||||||
|
{Message: m2, Score: time.Now().Add(20 * time.Second).Unix()},
|
||||||
|
{Message: m3, Score: time.Now().Add(30 * time.Second).Unix()},
|
||||||
|
}
|
||||||
|
ctx := context.Background()
|
||||||
|
b.ResetTimer()
|
||||||
|
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
b.StopTimer()
|
||||||
|
asynqtest.FlushDB(b, r.client)
|
||||||
|
asynqtest.SeedActiveQueue(b, r.client, msgs, base.DefaultQueueName)
|
||||||
|
asynqtest.SeedLease(b, r.client, zs, base.DefaultQueueName)
|
||||||
|
b.StartTimer()
|
||||||
|
|
||||||
|
if err := r.Archive(ctx, msgs[0], "error"); err != nil {
|
||||||
|
b.Fatalf("Archive failed: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkRequeue(b *testing.B) {
|
||||||
|
r := setup(b)
|
||||||
|
m1 := asynqtest.NewTaskMessage("task1", nil)
|
||||||
|
m2 := asynqtest.NewTaskMessage("task2", nil)
|
||||||
|
m3 := asynqtest.NewTaskMessage("task3", nil)
|
||||||
|
msgs := []*base.TaskMessage{m1, m2, m3}
|
||||||
|
zs := []base.Z{
|
||||||
|
{Message: m1, Score: time.Now().Add(10 * time.Second).Unix()},
|
||||||
|
{Message: m2, Score: time.Now().Add(20 * time.Second).Unix()},
|
||||||
|
{Message: m3, Score: time.Now().Add(30 * time.Second).Unix()},
|
||||||
|
}
|
||||||
|
ctx := context.Background()
|
||||||
|
b.ResetTimer()
|
||||||
|
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
b.StopTimer()
|
||||||
|
asynqtest.FlushDB(b, r.client)
|
||||||
|
asynqtest.SeedActiveQueue(b, r.client, msgs, base.DefaultQueueName)
|
||||||
|
asynqtest.SeedLease(b, r.client, zs, base.DefaultQueueName)
|
||||||
|
b.StartTimer()
|
||||||
|
|
||||||
|
if err := r.Requeue(ctx, msgs[0]); err != nil {
|
||||||
|
b.Fatalf("Requeue failed: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkCheckAndEnqueue(b *testing.B) {
|
||||||
|
r := setup(b)
|
||||||
|
now := time.Now()
|
||||||
|
var zs []base.Z
|
||||||
|
for i := -100; i < 100; i++ {
|
||||||
|
msg := asynqtest.NewTaskMessage(fmt.Sprintf("task%d", i), nil)
|
||||||
|
score := now.Add(time.Duration(i) * time.Second).Unix()
|
||||||
|
zs = append(zs, base.Z{Message: msg, Score: score})
|
||||||
|
}
|
||||||
|
b.ResetTimer()
|
||||||
|
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
b.StopTimer()
|
||||||
|
asynqtest.FlushDB(b, r.client)
|
||||||
|
asynqtest.SeedScheduledQueue(b, r.client, zs, base.DefaultQueueName)
|
||||||
|
b.StartTimer()
|
||||||
|
|
||||||
|
if err := r.ForwardIfReady(base.DefaultQueueName); err != nil {
|
||||||
|
b.Fatalf("ForwardIfReady failed: %v", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
1296
internal/rdb/rdb.go
1296
internal/rdb/rdb.go
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -6,11 +6,12 @@
|
|||||||
package testbroker
|
package testbroker
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-redis/redis/v7"
|
"github.com/go-redis/redis/v8"
|
||||||
"github.com/hibiken/asynq/internal/base"
|
"github.com/hibiken/asynq/internal/base"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -26,6 +27,9 @@ type TestBroker struct {
|
|||||||
real base.Broker
|
real base.Broker
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Make sure TestBroker implements Broker interface at compile time.
|
||||||
|
var _ base.Broker = (*TestBroker)(nil)
|
||||||
|
|
||||||
func NewTestBroker(b base.Broker) *TestBroker {
|
func NewTestBroker(b base.Broker) *TestBroker {
|
||||||
return &TestBroker{real: b}
|
return &TestBroker{real: b}
|
||||||
}
|
}
|
||||||
@@ -42,121 +46,148 @@ func (tb *TestBroker) Wakeup() {
|
|||||||
tb.sleeping = false
|
tb.sleeping = false
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tb *TestBroker) Enqueue(msg *base.TaskMessage) error {
|
func (tb *TestBroker) Enqueue(ctx context.Context, msg *base.TaskMessage) error {
|
||||||
tb.mu.Lock()
|
tb.mu.Lock()
|
||||||
defer tb.mu.Unlock()
|
defer tb.mu.Unlock()
|
||||||
if tb.sleeping {
|
if tb.sleeping {
|
||||||
return errRedisDown
|
return errRedisDown
|
||||||
}
|
}
|
||||||
return tb.real.Enqueue(msg)
|
return tb.real.Enqueue(ctx, msg)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tb *TestBroker) EnqueueUnique(msg *base.TaskMessage, ttl time.Duration) error {
|
func (tb *TestBroker) EnqueueUnique(ctx context.Context, msg *base.TaskMessage, ttl time.Duration) error {
|
||||||
tb.mu.Lock()
|
tb.mu.Lock()
|
||||||
defer tb.mu.Unlock()
|
defer tb.mu.Unlock()
|
||||||
if tb.sleeping {
|
if tb.sleeping {
|
||||||
return errRedisDown
|
return errRedisDown
|
||||||
}
|
}
|
||||||
return tb.real.EnqueueUnique(msg, ttl)
|
return tb.real.EnqueueUnique(ctx, msg, ttl)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tb *TestBroker) Dequeue(qnames ...string) (*base.TaskMessage, error) {
|
func (tb *TestBroker) Dequeue(qnames ...string) (*base.TaskMessage, time.Time, error) {
|
||||||
|
tb.mu.Lock()
|
||||||
|
defer tb.mu.Unlock()
|
||||||
|
if tb.sleeping {
|
||||||
|
return nil, time.Time{}, errRedisDown
|
||||||
|
}
|
||||||
|
return tb.real.Dequeue(qnames...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tb *TestBroker) Done(ctx context.Context, msg *base.TaskMessage) error {
|
||||||
|
tb.mu.Lock()
|
||||||
|
defer tb.mu.Unlock()
|
||||||
|
if tb.sleeping {
|
||||||
|
return errRedisDown
|
||||||
|
}
|
||||||
|
return tb.real.Done(ctx, msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tb *TestBroker) MarkAsComplete(ctx context.Context, msg *base.TaskMessage) error {
|
||||||
|
tb.mu.Lock()
|
||||||
|
defer tb.mu.Unlock()
|
||||||
|
if tb.sleeping {
|
||||||
|
return errRedisDown
|
||||||
|
}
|
||||||
|
return tb.real.MarkAsComplete(ctx, msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tb *TestBroker) Requeue(ctx context.Context, msg *base.TaskMessage) error {
|
||||||
|
tb.mu.Lock()
|
||||||
|
defer tb.mu.Unlock()
|
||||||
|
if tb.sleeping {
|
||||||
|
return errRedisDown
|
||||||
|
}
|
||||||
|
return tb.real.Requeue(ctx, msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tb *TestBroker) Schedule(ctx context.Context, msg *base.TaskMessage, processAt time.Time) error {
|
||||||
|
tb.mu.Lock()
|
||||||
|
defer tb.mu.Unlock()
|
||||||
|
if tb.sleeping {
|
||||||
|
return errRedisDown
|
||||||
|
}
|
||||||
|
return tb.real.Schedule(ctx, msg, processAt)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tb *TestBroker) ScheduleUnique(ctx context.Context, msg *base.TaskMessage, processAt time.Time, ttl time.Duration) error {
|
||||||
|
tb.mu.Lock()
|
||||||
|
defer tb.mu.Unlock()
|
||||||
|
if tb.sleeping {
|
||||||
|
return errRedisDown
|
||||||
|
}
|
||||||
|
return tb.real.ScheduleUnique(ctx, msg, processAt, ttl)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tb *TestBroker) Retry(ctx context.Context, msg *base.TaskMessage, processAt time.Time, errMsg string, isFailure bool) error {
|
||||||
|
tb.mu.Lock()
|
||||||
|
defer tb.mu.Unlock()
|
||||||
|
if tb.sleeping {
|
||||||
|
return errRedisDown
|
||||||
|
}
|
||||||
|
return tb.real.Retry(ctx, msg, processAt, errMsg, isFailure)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tb *TestBroker) Archive(ctx context.Context, msg *base.TaskMessage, errMsg string) error {
|
||||||
|
tb.mu.Lock()
|
||||||
|
defer tb.mu.Unlock()
|
||||||
|
if tb.sleeping {
|
||||||
|
return errRedisDown
|
||||||
|
}
|
||||||
|
return tb.real.Archive(ctx, msg, errMsg)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tb *TestBroker) ForwardIfReady(qnames ...string) error {
|
||||||
|
tb.mu.Lock()
|
||||||
|
defer tb.mu.Unlock()
|
||||||
|
if tb.sleeping {
|
||||||
|
return errRedisDown
|
||||||
|
}
|
||||||
|
return tb.real.ForwardIfReady(qnames...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tb *TestBroker) DeleteExpiredCompletedTasks(qname string) error {
|
||||||
|
tb.mu.Lock()
|
||||||
|
defer tb.mu.Unlock()
|
||||||
|
if tb.sleeping {
|
||||||
|
return errRedisDown
|
||||||
|
}
|
||||||
|
return tb.real.DeleteExpiredCompletedTasks(qname)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tb *TestBroker) ListLeaseExpired(cutoff time.Time, qnames ...string) ([]*base.TaskMessage, error) {
|
||||||
tb.mu.Lock()
|
tb.mu.Lock()
|
||||||
defer tb.mu.Unlock()
|
defer tb.mu.Unlock()
|
||||||
if tb.sleeping {
|
if tb.sleeping {
|
||||||
return nil, errRedisDown
|
return nil, errRedisDown
|
||||||
}
|
}
|
||||||
return tb.real.Dequeue(qnames...)
|
return tb.real.ListLeaseExpired(cutoff, qnames...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tb *TestBroker) Done(msg *base.TaskMessage) error {
|
func (tb *TestBroker) ExtendLease(qname string, ids ...string) (time.Time, error) {
|
||||||
|
tb.mu.Lock()
|
||||||
|
defer tb.mu.Unlock()
|
||||||
|
if tb.sleeping {
|
||||||
|
return time.Time{}, errRedisDown
|
||||||
|
}
|
||||||
|
return tb.real.ExtendLease(qname, ids...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tb *TestBroker) WriteServerState(info *base.ServerInfo, workers []*base.WorkerInfo, ttl time.Duration) error {
|
||||||
tb.mu.Lock()
|
tb.mu.Lock()
|
||||||
defer tb.mu.Unlock()
|
defer tb.mu.Unlock()
|
||||||
if tb.sleeping {
|
if tb.sleeping {
|
||||||
return errRedisDown
|
return errRedisDown
|
||||||
}
|
}
|
||||||
return tb.real.Done(msg)
|
return tb.real.WriteServerState(info, workers, ttl)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tb *TestBroker) Requeue(msg *base.TaskMessage) error {
|
func (tb *TestBroker) ClearServerState(host string, pid int, serverID string) error {
|
||||||
tb.mu.Lock()
|
tb.mu.Lock()
|
||||||
defer tb.mu.Unlock()
|
defer tb.mu.Unlock()
|
||||||
if tb.sleeping {
|
if tb.sleeping {
|
||||||
return errRedisDown
|
return errRedisDown
|
||||||
}
|
}
|
||||||
return tb.real.Requeue(msg)
|
return tb.real.ClearServerState(host, pid, serverID)
|
||||||
}
|
|
||||||
|
|
||||||
func (tb *TestBroker) Schedule(msg *base.TaskMessage, processAt time.Time) error {
|
|
||||||
tb.mu.Lock()
|
|
||||||
defer tb.mu.Unlock()
|
|
||||||
if tb.sleeping {
|
|
||||||
return errRedisDown
|
|
||||||
}
|
|
||||||
return tb.real.Schedule(msg, processAt)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tb *TestBroker) ScheduleUnique(msg *base.TaskMessage, processAt time.Time, ttl time.Duration) error {
|
|
||||||
tb.mu.Lock()
|
|
||||||
defer tb.mu.Unlock()
|
|
||||||
if tb.sleeping {
|
|
||||||
return errRedisDown
|
|
||||||
}
|
|
||||||
return tb.real.ScheduleUnique(msg, processAt, ttl)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tb *TestBroker) Retry(msg *base.TaskMessage, processAt time.Time, errMsg string) error {
|
|
||||||
tb.mu.Lock()
|
|
||||||
defer tb.mu.Unlock()
|
|
||||||
if tb.sleeping {
|
|
||||||
return errRedisDown
|
|
||||||
}
|
|
||||||
return tb.real.Retry(msg, processAt, errMsg)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tb *TestBroker) Kill(msg *base.TaskMessage, errMsg string) error {
|
|
||||||
tb.mu.Lock()
|
|
||||||
defer tb.mu.Unlock()
|
|
||||||
if tb.sleeping {
|
|
||||||
return errRedisDown
|
|
||||||
}
|
|
||||||
return tb.real.Kill(msg, errMsg)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tb *TestBroker) RequeueAll() (int64, error) {
|
|
||||||
tb.mu.Lock()
|
|
||||||
defer tb.mu.Unlock()
|
|
||||||
if tb.sleeping {
|
|
||||||
return 0, errRedisDown
|
|
||||||
}
|
|
||||||
return tb.real.RequeueAll()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tb *TestBroker) CheckAndEnqueue(qnames ...string) error {
|
|
||||||
tb.mu.Lock()
|
|
||||||
defer tb.mu.Unlock()
|
|
||||||
if tb.sleeping {
|
|
||||||
return errRedisDown
|
|
||||||
}
|
|
||||||
return tb.real.CheckAndEnqueue()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tb *TestBroker) WriteServerState(ss *base.ServerState, ttl time.Duration) error {
|
|
||||||
tb.mu.Lock()
|
|
||||||
defer tb.mu.Unlock()
|
|
||||||
if tb.sleeping {
|
|
||||||
return errRedisDown
|
|
||||||
}
|
|
||||||
return tb.real.WriteServerState(ss, ttl)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tb *TestBroker) ClearServerState(ss *base.ServerState) error {
|
|
||||||
tb.mu.Lock()
|
|
||||||
defer tb.mu.Unlock()
|
|
||||||
if tb.sleeping {
|
|
||||||
return errRedisDown
|
|
||||||
}
|
|
||||||
return tb.real.ClearServerState(ss)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tb *TestBroker) CancelationPubSub() (*redis.PubSub, error) {
|
func (tb *TestBroker) CancelationPubSub() (*redis.PubSub, error) {
|
||||||
@@ -177,6 +208,24 @@ func (tb *TestBroker) PublishCancelation(id string) error {
|
|||||||
return tb.real.PublishCancelation(id)
|
return tb.real.PublishCancelation(id)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (tb *TestBroker) WriteResult(qname, id string, data []byte) (int, error) {
|
||||||
|
tb.mu.Lock()
|
||||||
|
defer tb.mu.Unlock()
|
||||||
|
if tb.sleeping {
|
||||||
|
return 0, errRedisDown
|
||||||
|
}
|
||||||
|
return tb.real.WriteResult(qname, id, data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tb *TestBroker) Ping() error {
|
||||||
|
tb.mu.Lock()
|
||||||
|
defer tb.mu.Unlock()
|
||||||
|
if tb.sleeping {
|
||||||
|
return errRedisDown
|
||||||
|
}
|
||||||
|
return tb.real.Ping()
|
||||||
|
}
|
||||||
|
|
||||||
func (tb *TestBroker) Close() error {
|
func (tb *TestBroker) Close() error {
|
||||||
tb.mu.Lock()
|
tb.mu.Lock()
|
||||||
defer tb.mu.Unlock()
|
defer tb.mu.Unlock()
|
||||||
|
|||||||
59
internal/timeutil/timeutil.go
Normal file
59
internal/timeutil/timeutil.go
Normal file
@@ -0,0 +1,59 @@
|
|||||||
|
// Copyright 2022 Kentaro Hibino. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license
|
||||||
|
// that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package timeutil exports functions and types related to time and date.
|
||||||
|
package timeutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A Clock is an object that can tell you the current time.
|
||||||
|
//
|
||||||
|
// This interface allows decoupling code that uses time from the code that creates
|
||||||
|
// a point in time. You can use this to your advantage by injecting Clocks into interfaces
|
||||||
|
// rather than having implementations call time.Now() directly.
|
||||||
|
//
|
||||||
|
// Use RealClock() in production.
|
||||||
|
// Use SimulatedClock() in test.
|
||||||
|
type Clock interface {
|
||||||
|
Now() time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewRealClock() Clock { return &realTimeClock{} }
|
||||||
|
|
||||||
|
type realTimeClock struct{}
|
||||||
|
|
||||||
|
func (_ *realTimeClock) Now() time.Time { return time.Now() }
|
||||||
|
|
||||||
|
// A SimulatedClock is a concrete Clock implementation that doesn't "tick" on its own.
|
||||||
|
// Time is advanced by explicit call to the AdvanceTime() or SetTime() functions.
|
||||||
|
// This object is concurrency safe.
|
||||||
|
type SimulatedClock struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
t time.Time // guarded by mu
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewSimulatedClock(t time.Time) *SimulatedClock {
|
||||||
|
return &SimulatedClock{t: t}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *SimulatedClock) Now() time.Time {
|
||||||
|
c.mu.Lock()
|
||||||
|
defer c.mu.Unlock()
|
||||||
|
return c.t
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *SimulatedClock) SetTime(t time.Time) {
|
||||||
|
c.mu.Lock()
|
||||||
|
defer c.mu.Unlock()
|
||||||
|
c.t = t
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *SimulatedClock) AdvanceTime(d time.Duration) {
|
||||||
|
c.mu.Lock()
|
||||||
|
defer c.mu.Unlock()
|
||||||
|
c.t = c.t.Add(d)
|
||||||
|
}
|
||||||
48
internal/timeutil/timeutil_test.go
Normal file
48
internal/timeutil/timeutil_test.go
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
// Copyright 2022 Kentaro Hibino. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license
|
||||||
|
// that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package timeutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestSimulatedClock(t *testing.T) {
|
||||||
|
now := time.Now()
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
desc string
|
||||||
|
initTime time.Time
|
||||||
|
advanceBy time.Duration
|
||||||
|
wantTime time.Time
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
desc: "advance time forward",
|
||||||
|
initTime: now,
|
||||||
|
advanceBy: 30 * time.Second,
|
||||||
|
wantTime: now.Add(30 * time.Second),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "advance time backward",
|
||||||
|
initTime: now,
|
||||||
|
advanceBy: -10 * time.Second,
|
||||||
|
wantTime: now.Add(-10 * time.Second),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
c := NewSimulatedClock(tc.initTime)
|
||||||
|
|
||||||
|
if c.Now() != tc.initTime {
|
||||||
|
t.Errorf("%s: Before Advance; SimulatedClock.Now() = %v, want %v", tc.desc, c.Now(), tc.initTime)
|
||||||
|
}
|
||||||
|
|
||||||
|
c.AdvanceTime(tc.advanceBy)
|
||||||
|
|
||||||
|
if c.Now() != tc.wantTime {
|
||||||
|
t.Errorf("%s: After Advance; SimulatedClock.Now() = %v, want %v", tc.desc, c.Now(), tc.wantTime)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
81
janitor.go
Normal file
81
janitor.go
Normal file
@@ -0,0 +1,81 @@
|
|||||||
|
// Copyright 2021 Kentaro Hibino. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license
|
||||||
|
// that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package asynq
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/hibiken/asynq/internal/base"
|
||||||
|
"github.com/hibiken/asynq/internal/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A janitor is responsible for deleting expired completed tasks from the specified
|
||||||
|
// queues. It periodically checks for any expired tasks in the completed set, and
|
||||||
|
// deletes them.
|
||||||
|
type janitor struct {
|
||||||
|
logger *log.Logger
|
||||||
|
broker base.Broker
|
||||||
|
|
||||||
|
// channel to communicate back to the long running "janitor" goroutine.
|
||||||
|
done chan struct{}
|
||||||
|
|
||||||
|
// list of queue names to check.
|
||||||
|
queues []string
|
||||||
|
|
||||||
|
// average interval between checks.
|
||||||
|
avgInterval time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
type janitorParams struct {
|
||||||
|
logger *log.Logger
|
||||||
|
broker base.Broker
|
||||||
|
queues []string
|
||||||
|
interval time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
func newJanitor(params janitorParams) *janitor {
|
||||||
|
return &janitor{
|
||||||
|
logger: params.logger,
|
||||||
|
broker: params.broker,
|
||||||
|
done: make(chan struct{}),
|
||||||
|
queues: params.queues,
|
||||||
|
avgInterval: params.interval,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (j *janitor) shutdown() {
|
||||||
|
j.logger.Debug("Janitor shutting down...")
|
||||||
|
// Signal the janitor goroutine to stop.
|
||||||
|
j.done <- struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// start starts the "janitor" goroutine.
|
||||||
|
func (j *janitor) start(wg *sync.WaitGroup) {
|
||||||
|
wg.Add(1)
|
||||||
|
timer := time.NewTimer(j.avgInterval) // randomize this interval with margin of 1s
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-j.done:
|
||||||
|
j.logger.Debug("Janitor done")
|
||||||
|
return
|
||||||
|
case <-timer.C:
|
||||||
|
j.exec()
|
||||||
|
timer.Reset(j.avgInterval)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (j *janitor) exec() {
|
||||||
|
for _, qname := range j.queues {
|
||||||
|
if err := j.broker.DeleteExpiredCompletedTasks(qname); err != nil {
|
||||||
|
j.logger.Errorf("Failed to delete expired completed tasks from queue %q: %v",
|
||||||
|
qname, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
89
janitor_test.go
Normal file
89
janitor_test.go
Normal file
@@ -0,0 +1,89 @@
|
|||||||
|
// Copyright 2021 Kentaro Hibino. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license
|
||||||
|
// that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package asynq
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/google/go-cmp/cmp"
|
||||||
|
h "github.com/hibiken/asynq/internal/asynqtest"
|
||||||
|
"github.com/hibiken/asynq/internal/base"
|
||||||
|
"github.com/hibiken/asynq/internal/rdb"
|
||||||
|
)
|
||||||
|
|
||||||
|
func newCompletedTask(qname, tasktype string, payload []byte, completedAt time.Time) *base.TaskMessage {
|
||||||
|
msg := h.NewTaskMessageWithQueue(tasktype, payload, qname)
|
||||||
|
msg.CompletedAt = completedAt.Unix()
|
||||||
|
return msg
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestJanitor(t *testing.T) {
|
||||||
|
r := setup(t)
|
||||||
|
defer r.Close()
|
||||||
|
rdbClient := rdb.NewRDB(r)
|
||||||
|
const interval = 1 * time.Second
|
||||||
|
janitor := newJanitor(janitorParams{
|
||||||
|
logger: testLogger,
|
||||||
|
broker: rdbClient,
|
||||||
|
queues: []string{"default", "custom"},
|
||||||
|
interval: interval,
|
||||||
|
})
|
||||||
|
|
||||||
|
now := time.Now()
|
||||||
|
hourAgo := now.Add(-1 * time.Hour)
|
||||||
|
minuteAgo := now.Add(-1 * time.Minute)
|
||||||
|
halfHourAgo := now.Add(-30 * time.Minute)
|
||||||
|
halfHourFromNow := now.Add(30 * time.Minute)
|
||||||
|
fiveMinFromNow := now.Add(5 * time.Minute)
|
||||||
|
msg1 := newCompletedTask("default", "task1", nil, hourAgo)
|
||||||
|
msg2 := newCompletedTask("default", "task2", nil, minuteAgo)
|
||||||
|
msg3 := newCompletedTask("custom", "task3", nil, hourAgo)
|
||||||
|
msg4 := newCompletedTask("custom", "task4", nil, minuteAgo)
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
completed map[string][]base.Z // initial completed sets
|
||||||
|
wantCompleted map[string][]base.Z // expected completed sets after janitor runs
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
completed: map[string][]base.Z{
|
||||||
|
"default": {
|
||||||
|
{Message: msg1, Score: halfHourAgo.Unix()},
|
||||||
|
{Message: msg2, Score: fiveMinFromNow.Unix()},
|
||||||
|
},
|
||||||
|
"custom": {
|
||||||
|
{Message: msg3, Score: halfHourFromNow.Unix()},
|
||||||
|
{Message: msg4, Score: minuteAgo.Unix()},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
wantCompleted: map[string][]base.Z{
|
||||||
|
"default": {
|
||||||
|
{Message: msg2, Score: fiveMinFromNow.Unix()},
|
||||||
|
},
|
||||||
|
"custom": {
|
||||||
|
{Message: msg3, Score: halfHourFromNow.Unix()},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
h.FlushDB(t, r)
|
||||||
|
h.SeedAllCompletedQueues(t, r, tc.completed)
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
janitor.start(&wg)
|
||||||
|
time.Sleep(2 * interval) // make sure to let janitor run at least one time
|
||||||
|
janitor.shutdown()
|
||||||
|
|
||||||
|
for qname, want := range tc.wantCompleted {
|
||||||
|
got := h.GetCompletedEntries(t, r, qname)
|
||||||
|
if diff := cmp.Diff(want, got, h.SortZSetEntryOpt); diff != "" {
|
||||||
|
t.Errorf("diff found in %q after running janitor: (-want, +got)\n%s", base.CompletedKey(qname), diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
166
payload.go
166
payload.go
@@ -1,166 +0,0 @@
|
|||||||
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
|
||||||
// Use of this source code is governed by a MIT license
|
|
||||||
// that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package asynq
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/spf13/cast"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Payload holds arbitrary data needed for task execution.
|
|
||||||
type Payload struct {
|
|
||||||
data map[string]interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
type errKeyNotFound struct {
|
|
||||||
key string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *errKeyNotFound) Error() string {
|
|
||||||
return fmt.Sprintf("key %q does not exist", e.key)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Has reports whether key exists.
|
|
||||||
func (p Payload) Has(key string) bool {
|
|
||||||
_, ok := p.data[key]
|
|
||||||
return ok
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetString returns a string value if a string type is associated with
|
|
||||||
// the key, otherwise reports an error.
|
|
||||||
func (p Payload) GetString(key string) (string, error) {
|
|
||||||
v, ok := p.data[key]
|
|
||||||
if !ok {
|
|
||||||
return "", &errKeyNotFound{key}
|
|
||||||
}
|
|
||||||
return cast.ToStringE(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetInt returns an int value if a numeric type is associated with
|
|
||||||
// the key, otherwise reports an error.
|
|
||||||
func (p Payload) GetInt(key string) (int, error) {
|
|
||||||
v, ok := p.data[key]
|
|
||||||
if !ok {
|
|
||||||
return 0, &errKeyNotFound{key}
|
|
||||||
}
|
|
||||||
return cast.ToIntE(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetFloat64 returns a float64 value if a numeric type is associated with
|
|
||||||
// the key, otherwise reports an error.
|
|
||||||
func (p Payload) GetFloat64(key string) (float64, error) {
|
|
||||||
v, ok := p.data[key]
|
|
||||||
if !ok {
|
|
||||||
return 0, &errKeyNotFound{key}
|
|
||||||
}
|
|
||||||
return cast.ToFloat64E(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetBool returns a boolean value if a boolean type is associated with
|
|
||||||
// the key, otherwise reports an error.
|
|
||||||
func (p Payload) GetBool(key string) (bool, error) {
|
|
||||||
v, ok := p.data[key]
|
|
||||||
if !ok {
|
|
||||||
return false, &errKeyNotFound{key}
|
|
||||||
}
|
|
||||||
return cast.ToBoolE(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetStringSlice returns a slice of strings if a string slice type is associated with
|
|
||||||
// the key, otherwise reports an error.
|
|
||||||
func (p Payload) GetStringSlice(key string) ([]string, error) {
|
|
||||||
v, ok := p.data[key]
|
|
||||||
if !ok {
|
|
||||||
return nil, &errKeyNotFound{key}
|
|
||||||
}
|
|
||||||
return cast.ToStringSliceE(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetIntSlice returns a slice of ints if a int slice type is associated with
|
|
||||||
// the key, otherwise reports an error.
|
|
||||||
func (p Payload) GetIntSlice(key string) ([]int, error) {
|
|
||||||
v, ok := p.data[key]
|
|
||||||
if !ok {
|
|
||||||
return nil, &errKeyNotFound{key}
|
|
||||||
}
|
|
||||||
return cast.ToIntSliceE(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetStringMap returns a map of string to empty interface
|
|
||||||
// if a correct map type is associated with the key,
|
|
||||||
// otherwise reports an error.
|
|
||||||
func (p Payload) GetStringMap(key string) (map[string]interface{}, error) {
|
|
||||||
v, ok := p.data[key]
|
|
||||||
if !ok {
|
|
||||||
return nil, &errKeyNotFound{key}
|
|
||||||
}
|
|
||||||
return cast.ToStringMapE(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetStringMapString returns a map of string to string
|
|
||||||
// if a correct map type is associated with the key,
|
|
||||||
// otherwise reports an error.
|
|
||||||
func (p Payload) GetStringMapString(key string) (map[string]string, error) {
|
|
||||||
v, ok := p.data[key]
|
|
||||||
if !ok {
|
|
||||||
return nil, &errKeyNotFound{key}
|
|
||||||
}
|
|
||||||
return cast.ToStringMapStringE(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetStringMapStringSlice returns a map of string to string slice
|
|
||||||
// if a correct map type is associated with the key,
|
|
||||||
// otherwise reports an error.
|
|
||||||
func (p Payload) GetStringMapStringSlice(key string) (map[string][]string, error) {
|
|
||||||
v, ok := p.data[key]
|
|
||||||
if !ok {
|
|
||||||
return nil, &errKeyNotFound{key}
|
|
||||||
}
|
|
||||||
return cast.ToStringMapStringSliceE(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetStringMapInt returns a map of string to int
|
|
||||||
// if a correct map type is associated with the key,
|
|
||||||
// otherwise reports an error.
|
|
||||||
func (p Payload) GetStringMapInt(key string) (map[string]int, error) {
|
|
||||||
v, ok := p.data[key]
|
|
||||||
if !ok {
|
|
||||||
return nil, &errKeyNotFound{key}
|
|
||||||
}
|
|
||||||
return cast.ToStringMapIntE(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetStringMapBool returns a map of string to boolean
|
|
||||||
// if a correct map type is associated with the key,
|
|
||||||
// otherwise reports an error.
|
|
||||||
func (p Payload) GetStringMapBool(key string) (map[string]bool, error) {
|
|
||||||
v, ok := p.data[key]
|
|
||||||
if !ok {
|
|
||||||
return nil, &errKeyNotFound{key}
|
|
||||||
}
|
|
||||||
return cast.ToStringMapBoolE(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetTime returns a time value if a correct map type is associated with the key,
|
|
||||||
// otherwise reports an error.
|
|
||||||
func (p Payload) GetTime(key string) (time.Time, error) {
|
|
||||||
v, ok := p.data[key]
|
|
||||||
if !ok {
|
|
||||||
return time.Time{}, &errKeyNotFound{key}
|
|
||||||
}
|
|
||||||
return cast.ToTimeE(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetDuration returns a duration value if a correct map type is associated with the key,
|
|
||||||
// otherwise reports an error.
|
|
||||||
func (p Payload) GetDuration(key string) (time.Duration, error) {
|
|
||||||
v, ok := p.data[key]
|
|
||||||
if !ok {
|
|
||||||
return 0, &errKeyNotFound{key}
|
|
||||||
}
|
|
||||||
return cast.ToDurationE(v)
|
|
||||||
}
|
|
||||||
651
payload_test.go
651
payload_test.go
@@ -1,651 +0,0 @@
|
|||||||
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
|
||||||
// Use of this source code is governed by a MIT license
|
|
||||||
// that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package asynq
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/google/go-cmp/cmp"
|
|
||||||
h "github.com/hibiken/asynq/internal/asynqtest"
|
|
||||||
"github.com/hibiken/asynq/internal/base"
|
|
||||||
)
|
|
||||||
|
|
||||||
type payloadTest struct {
|
|
||||||
data map[string]interface{}
|
|
||||||
key string
|
|
||||||
nonkey string
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPayloadString(t *testing.T) {
|
|
||||||
tests := []payloadTest{
|
|
||||||
{
|
|
||||||
data: map[string]interface{}{"name": "gopher"},
|
|
||||||
key: "name",
|
|
||||||
nonkey: "unknown",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range tests {
|
|
||||||
payload := Payload{tc.data}
|
|
||||||
|
|
||||||
got, err := payload.GetString(tc.key)
|
|
||||||
if err != nil || got != tc.data[tc.key] {
|
|
||||||
t.Errorf("Payload.GetString(%q) = %v, %v, want %v, nil",
|
|
||||||
tc.key, got, err, tc.data[tc.key])
|
|
||||||
}
|
|
||||||
|
|
||||||
// encode and then decode task messsage.
|
|
||||||
in := h.NewTaskMessage("testing", tc.data)
|
|
||||||
b, err := json.Marshal(in)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
var out base.TaskMessage
|
|
||||||
err = json.Unmarshal(b, &out)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
payload = Payload{out.Payload}
|
|
||||||
got, err = payload.GetString(tc.key)
|
|
||||||
if err != nil || got != tc.data[tc.key] {
|
|
||||||
t.Errorf("With Marshaling: Payload.GetString(%q) = %v, %v, want %v, nil",
|
|
||||||
tc.key, got, err, tc.data[tc.key])
|
|
||||||
}
|
|
||||||
|
|
||||||
// access non-existent key.
|
|
||||||
got, err = payload.GetString(tc.nonkey)
|
|
||||||
if err == nil || got != "" {
|
|
||||||
t.Errorf("Payload.GetString(%q) = %v, %v; want '', error",
|
|
||||||
tc.key, got, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPayloadInt(t *testing.T) {
|
|
||||||
tests := []payloadTest{
|
|
||||||
{
|
|
||||||
data: map[string]interface{}{"user_id": 42},
|
|
||||||
key: "user_id",
|
|
||||||
nonkey: "unknown",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range tests {
|
|
||||||
payload := Payload{tc.data}
|
|
||||||
|
|
||||||
got, err := payload.GetInt(tc.key)
|
|
||||||
if err != nil || got != tc.data[tc.key] {
|
|
||||||
t.Errorf("Payload.GetInt(%q) = %v, %v, want %v, nil",
|
|
||||||
tc.key, got, err, tc.data[tc.key])
|
|
||||||
}
|
|
||||||
|
|
||||||
// encode and then decode task messsage.
|
|
||||||
in := h.NewTaskMessage("testing", tc.data)
|
|
||||||
b, err := json.Marshal(in)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
var out base.TaskMessage
|
|
||||||
err = json.Unmarshal(b, &out)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
payload = Payload{out.Payload}
|
|
||||||
got, err = payload.GetInt(tc.key)
|
|
||||||
if err != nil || got != tc.data[tc.key] {
|
|
||||||
t.Errorf("With Marshaling: Payload.GetInt(%q) = %v, %v, want %v, nil",
|
|
||||||
tc.key, got, err, tc.data[tc.key])
|
|
||||||
}
|
|
||||||
|
|
||||||
// access non-existent key.
|
|
||||||
got, err = payload.GetInt(tc.nonkey)
|
|
||||||
if err == nil || got != 0 {
|
|
||||||
t.Errorf("Payload.GetInt(%q) = %v, %v; want 0, error",
|
|
||||||
tc.key, got, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPayloadFloat64(t *testing.T) {
|
|
||||||
tests := []payloadTest{
|
|
||||||
{
|
|
||||||
data: map[string]interface{}{"pi": 3.14},
|
|
||||||
key: "pi",
|
|
||||||
nonkey: "unknown",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range tests {
|
|
||||||
payload := Payload{tc.data}
|
|
||||||
|
|
||||||
got, err := payload.GetFloat64(tc.key)
|
|
||||||
if err != nil || got != tc.data[tc.key] {
|
|
||||||
t.Errorf("Payload.GetFloat64(%q) = %v, %v, want %v, nil",
|
|
||||||
tc.key, got, err, tc.data[tc.key])
|
|
||||||
}
|
|
||||||
|
|
||||||
// encode and then decode task messsage.
|
|
||||||
in := h.NewTaskMessage("testing", tc.data)
|
|
||||||
b, err := json.Marshal(in)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
var out base.TaskMessage
|
|
||||||
err = json.Unmarshal(b, &out)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
payload = Payload{out.Payload}
|
|
||||||
got, err = payload.GetFloat64(tc.key)
|
|
||||||
if err != nil || got != tc.data[tc.key] {
|
|
||||||
t.Errorf("With Marshaling: Payload.GetFloat64(%q) = %v, %v, want %v, nil",
|
|
||||||
tc.key, got, err, tc.data[tc.key])
|
|
||||||
}
|
|
||||||
|
|
||||||
// access non-existent key.
|
|
||||||
got, err = payload.GetFloat64(tc.nonkey)
|
|
||||||
if err == nil || got != 0 {
|
|
||||||
t.Errorf("Payload.GetFloat64(%q) = %v, %v; want 0, error",
|
|
||||||
tc.key, got, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPayloadBool(t *testing.T) {
|
|
||||||
tests := []payloadTest{
|
|
||||||
{
|
|
||||||
data: map[string]interface{}{"enabled": true},
|
|
||||||
key: "enabled",
|
|
||||||
nonkey: "unknown",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range tests {
|
|
||||||
payload := Payload{tc.data}
|
|
||||||
|
|
||||||
got, err := payload.GetBool(tc.key)
|
|
||||||
if err != nil || got != tc.data[tc.key] {
|
|
||||||
t.Errorf("Payload.GetBool(%q) = %v, %v, want %v, nil",
|
|
||||||
tc.key, got, err, tc.data[tc.key])
|
|
||||||
}
|
|
||||||
|
|
||||||
// encode and then decode task messsage.
|
|
||||||
in := h.NewTaskMessage("testing", tc.data)
|
|
||||||
b, err := json.Marshal(in)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
var out base.TaskMessage
|
|
||||||
err = json.Unmarshal(b, &out)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
payload = Payload{out.Payload}
|
|
||||||
got, err = payload.GetBool(tc.key)
|
|
||||||
if err != nil || got != tc.data[tc.key] {
|
|
||||||
t.Errorf("With Marshaling: Payload.GetBool(%q) = %v, %v, want %v, nil",
|
|
||||||
tc.key, got, err, tc.data[tc.key])
|
|
||||||
}
|
|
||||||
|
|
||||||
// access non-existent key.
|
|
||||||
got, err = payload.GetBool(tc.nonkey)
|
|
||||||
if err == nil || got != false {
|
|
||||||
t.Errorf("Payload.GetBool(%q) = %v, %v; want false, error",
|
|
||||||
tc.key, got, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPayloadStringSlice(t *testing.T) {
|
|
||||||
tests := []payloadTest{
|
|
||||||
{
|
|
||||||
data: map[string]interface{}{"names": []string{"luke", "rey", "anakin"}},
|
|
||||||
key: "names",
|
|
||||||
nonkey: "unknown",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range tests {
|
|
||||||
payload := Payload{tc.data}
|
|
||||||
|
|
||||||
got, err := payload.GetStringSlice(tc.key)
|
|
||||||
diff := cmp.Diff(got, tc.data[tc.key])
|
|
||||||
if err != nil || diff != "" {
|
|
||||||
t.Errorf("Payload.GetStringSlice(%q) = %v, %v, want %v, nil",
|
|
||||||
tc.key, got, err, tc.data[tc.key])
|
|
||||||
}
|
|
||||||
|
|
||||||
// encode and then decode task messsage.
|
|
||||||
in := h.NewTaskMessage("testing", tc.data)
|
|
||||||
b, err := json.Marshal(in)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
var out base.TaskMessage
|
|
||||||
err = json.Unmarshal(b, &out)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
payload = Payload{out.Payload}
|
|
||||||
got, err = payload.GetStringSlice(tc.key)
|
|
||||||
diff = cmp.Diff(got, tc.data[tc.key])
|
|
||||||
if err != nil || diff != "" {
|
|
||||||
t.Errorf("With Marshaling: Payload.GetStringSlice(%q) = %v, %v, want %v, nil",
|
|
||||||
tc.key, got, err, tc.data[tc.key])
|
|
||||||
}
|
|
||||||
|
|
||||||
// access non-existent key.
|
|
||||||
got, err = payload.GetStringSlice(tc.nonkey)
|
|
||||||
if err == nil || got != nil {
|
|
||||||
t.Errorf("Payload.GetStringSlice(%q) = %v, %v; want nil, error",
|
|
||||||
tc.key, got, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPayloadIntSlice(t *testing.T) {
|
|
||||||
tests := []payloadTest{
|
|
||||||
{
|
|
||||||
data: map[string]interface{}{"nums": []int{9, 8, 7}},
|
|
||||||
key: "nums",
|
|
||||||
nonkey: "unknown",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range tests {
|
|
||||||
payload := Payload{tc.data}
|
|
||||||
|
|
||||||
got, err := payload.GetIntSlice(tc.key)
|
|
||||||
diff := cmp.Diff(got, tc.data[tc.key])
|
|
||||||
if err != nil || diff != "" {
|
|
||||||
t.Errorf("Payload.GetIntSlice(%q) = %v, %v, want %v, nil",
|
|
||||||
tc.key, got, err, tc.data[tc.key])
|
|
||||||
}
|
|
||||||
|
|
||||||
// encode and then decode task messsage.
|
|
||||||
in := h.NewTaskMessage("testing", tc.data)
|
|
||||||
b, err := json.Marshal(in)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
var out base.TaskMessage
|
|
||||||
err = json.Unmarshal(b, &out)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
payload = Payload{out.Payload}
|
|
||||||
got, err = payload.GetIntSlice(tc.key)
|
|
||||||
diff = cmp.Diff(got, tc.data[tc.key])
|
|
||||||
if err != nil || diff != "" {
|
|
||||||
t.Errorf("With Marshaling: Payload.GetIntSlice(%q) = %v, %v, want %v, nil",
|
|
||||||
tc.key, got, err, tc.data[tc.key])
|
|
||||||
}
|
|
||||||
|
|
||||||
// access non-existent key.
|
|
||||||
got, err = payload.GetIntSlice(tc.nonkey)
|
|
||||||
if err == nil || got != nil {
|
|
||||||
t.Errorf("Payload.GetIntSlice(%q) = %v, %v; want nil, error",
|
|
||||||
tc.key, got, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPayloadStringMap(t *testing.T) {
|
|
||||||
tests := []payloadTest{
|
|
||||||
{
|
|
||||||
data: map[string]interface{}{"user": map[string]interface{}{"name": "Jon Doe", "score": 2.2}},
|
|
||||||
key: "user",
|
|
||||||
nonkey: "unknown",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range tests {
|
|
||||||
payload := Payload{tc.data}
|
|
||||||
|
|
||||||
got, err := payload.GetStringMap(tc.key)
|
|
||||||
diff := cmp.Diff(got, tc.data[tc.key])
|
|
||||||
if err != nil || diff != "" {
|
|
||||||
t.Errorf("Payload.GetStringMap(%q) = %v, %v, want %v, nil",
|
|
||||||
tc.key, got, err, tc.data[tc.key])
|
|
||||||
}
|
|
||||||
|
|
||||||
// encode and then decode task messsage.
|
|
||||||
in := h.NewTaskMessage("testing", tc.data)
|
|
||||||
b, err := json.Marshal(in)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
var out base.TaskMessage
|
|
||||||
err = json.Unmarshal(b, &out)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
payload = Payload{out.Payload}
|
|
||||||
got, err = payload.GetStringMap(tc.key)
|
|
||||||
diff = cmp.Diff(got, tc.data[tc.key])
|
|
||||||
if err != nil || diff != "" {
|
|
||||||
t.Errorf("With Marshaling: Payload.GetStringMap(%q) = %v, %v, want %v, nil",
|
|
||||||
tc.key, got, err, tc.data[tc.key])
|
|
||||||
}
|
|
||||||
|
|
||||||
// access non-existent key.
|
|
||||||
got, err = payload.GetStringMap(tc.nonkey)
|
|
||||||
if err == nil || got != nil {
|
|
||||||
t.Errorf("Payload.GetStringMap(%q) = %v, %v; want nil, error",
|
|
||||||
tc.key, got, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPayloadStringMapString(t *testing.T) {
|
|
||||||
tests := []payloadTest{
|
|
||||||
{
|
|
||||||
data: map[string]interface{}{"address": map[string]string{"line": "123 Main St", "city": "San Francisco", "state": "CA"}},
|
|
||||||
key: "address",
|
|
||||||
nonkey: "unknown",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range tests {
|
|
||||||
payload := Payload{tc.data}
|
|
||||||
|
|
||||||
got, err := payload.GetStringMapString(tc.key)
|
|
||||||
diff := cmp.Diff(got, tc.data[tc.key])
|
|
||||||
if err != nil || diff != "" {
|
|
||||||
t.Errorf("Payload.GetStringMapString(%q) = %v, %v, want %v, nil",
|
|
||||||
tc.key, got, err, tc.data[tc.key])
|
|
||||||
}
|
|
||||||
|
|
||||||
// encode and then decode task messsage.
|
|
||||||
in := h.NewTaskMessage("testing", tc.data)
|
|
||||||
b, err := json.Marshal(in)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
var out base.TaskMessage
|
|
||||||
err = json.Unmarshal(b, &out)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
payload = Payload{out.Payload}
|
|
||||||
got, err = payload.GetStringMapString(tc.key)
|
|
||||||
diff = cmp.Diff(got, tc.data[tc.key])
|
|
||||||
if err != nil || diff != "" {
|
|
||||||
t.Errorf("With Marshaling: Payload.GetStringMapString(%q) = %v, %v, want %v, nil",
|
|
||||||
tc.key, got, err, tc.data[tc.key])
|
|
||||||
}
|
|
||||||
|
|
||||||
// access non-existent key.
|
|
||||||
got, err = payload.GetStringMapString(tc.nonkey)
|
|
||||||
if err == nil || got != nil {
|
|
||||||
t.Errorf("Payload.GetStringMapString(%q) = %v, %v; want nil, error",
|
|
||||||
tc.key, got, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPayloadStringMapStringSlice(t *testing.T) {
|
|
||||||
favs := map[string][]string{
|
|
||||||
"movies": {"forrest gump", "star wars"},
|
|
||||||
"tv_shows": {"game of thrones", "HIMYM", "breaking bad"},
|
|
||||||
}
|
|
||||||
tests := []payloadTest{
|
|
||||||
{
|
|
||||||
data: map[string]interface{}{"favorites": favs},
|
|
||||||
key: "favorites",
|
|
||||||
nonkey: "unknown",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range tests {
|
|
||||||
payload := Payload{tc.data}
|
|
||||||
|
|
||||||
got, err := payload.GetStringMapStringSlice(tc.key)
|
|
||||||
diff := cmp.Diff(got, tc.data[tc.key])
|
|
||||||
if err != nil || diff != "" {
|
|
||||||
t.Errorf("Payload.GetStringMapStringSlice(%q) = %v, %v, want %v, nil",
|
|
||||||
tc.key, got, err, tc.data[tc.key])
|
|
||||||
}
|
|
||||||
|
|
||||||
// encode and then decode task messsage.
|
|
||||||
in := h.NewTaskMessage("testing", tc.data)
|
|
||||||
b, err := json.Marshal(in)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
var out base.TaskMessage
|
|
||||||
err = json.Unmarshal(b, &out)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
payload = Payload{out.Payload}
|
|
||||||
got, err = payload.GetStringMapStringSlice(tc.key)
|
|
||||||
diff = cmp.Diff(got, tc.data[tc.key])
|
|
||||||
if err != nil || diff != "" {
|
|
||||||
t.Errorf("With Marshaling: Payload.GetStringMapStringSlice(%q) = %v, %v, want %v, nil",
|
|
||||||
tc.key, got, err, tc.data[tc.key])
|
|
||||||
}
|
|
||||||
|
|
||||||
// access non-existent key.
|
|
||||||
got, err = payload.GetStringMapStringSlice(tc.nonkey)
|
|
||||||
if err == nil || got != nil {
|
|
||||||
t.Errorf("Payload.GetStringMapStringSlice(%q) = %v, %v; want nil, error",
|
|
||||||
tc.key, got, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPayloadStringMapInt(t *testing.T) {
|
|
||||||
counter := map[string]int{
|
|
||||||
"a": 1,
|
|
||||||
"b": 101,
|
|
||||||
"c": 42,
|
|
||||||
}
|
|
||||||
tests := []payloadTest{
|
|
||||||
{
|
|
||||||
data: map[string]interface{}{"counts": counter},
|
|
||||||
key: "counts",
|
|
||||||
nonkey: "unknown",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range tests {
|
|
||||||
payload := Payload{tc.data}
|
|
||||||
|
|
||||||
got, err := payload.GetStringMapInt(tc.key)
|
|
||||||
diff := cmp.Diff(got, tc.data[tc.key])
|
|
||||||
if err != nil || diff != "" {
|
|
||||||
t.Errorf("Payload.GetStringMapInt(%q) = %v, %v, want %v, nil",
|
|
||||||
tc.key, got, err, tc.data[tc.key])
|
|
||||||
}
|
|
||||||
|
|
||||||
// encode and then decode task messsage.
|
|
||||||
in := h.NewTaskMessage("testing", tc.data)
|
|
||||||
b, err := json.Marshal(in)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
var out base.TaskMessage
|
|
||||||
err = json.Unmarshal(b, &out)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
payload = Payload{out.Payload}
|
|
||||||
got, err = payload.GetStringMapInt(tc.key)
|
|
||||||
diff = cmp.Diff(got, tc.data[tc.key])
|
|
||||||
if err != nil || diff != "" {
|
|
||||||
t.Errorf("With Marshaling: Payload.GetStringMapInt(%q) = %v, %v, want %v, nil",
|
|
||||||
tc.key, got, err, tc.data[tc.key])
|
|
||||||
}
|
|
||||||
|
|
||||||
// access non-existent key.
|
|
||||||
got, err = payload.GetStringMapInt(tc.nonkey)
|
|
||||||
if err == nil || got != nil {
|
|
||||||
t.Errorf("Payload.GetStringMapInt(%q) = %v, %v; want nil, error",
|
|
||||||
tc.key, got, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPayloadStringMapBool(t *testing.T) {
|
|
||||||
features := map[string]bool{
|
|
||||||
"A": false,
|
|
||||||
"B": true,
|
|
||||||
"C": true,
|
|
||||||
}
|
|
||||||
tests := []payloadTest{
|
|
||||||
{
|
|
||||||
data: map[string]interface{}{"features": features},
|
|
||||||
key: "features",
|
|
||||||
nonkey: "unknown",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range tests {
|
|
||||||
payload := Payload{tc.data}
|
|
||||||
|
|
||||||
got, err := payload.GetStringMapBool(tc.key)
|
|
||||||
diff := cmp.Diff(got, tc.data[tc.key])
|
|
||||||
if err != nil || diff != "" {
|
|
||||||
t.Errorf("Payload.GetStringMapBool(%q) = %v, %v, want %v, nil",
|
|
||||||
tc.key, got, err, tc.data[tc.key])
|
|
||||||
}
|
|
||||||
|
|
||||||
// encode and then decode task messsage.
|
|
||||||
in := h.NewTaskMessage("testing", tc.data)
|
|
||||||
b, err := json.Marshal(in)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
var out base.TaskMessage
|
|
||||||
err = json.Unmarshal(b, &out)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
payload = Payload{out.Payload}
|
|
||||||
got, err = payload.GetStringMapBool(tc.key)
|
|
||||||
diff = cmp.Diff(got, tc.data[tc.key])
|
|
||||||
if err != nil || diff != "" {
|
|
||||||
t.Errorf("With Marshaling: Payload.GetStringMapBool(%q) = %v, %v, want %v, nil",
|
|
||||||
tc.key, got, err, tc.data[tc.key])
|
|
||||||
}
|
|
||||||
|
|
||||||
// access non-existent key.
|
|
||||||
got, err = payload.GetStringMapBool(tc.nonkey)
|
|
||||||
if err == nil || got != nil {
|
|
||||||
t.Errorf("Payload.GetStringMapBool(%q) = %v, %v; want nil, error",
|
|
||||||
tc.key, got, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPayloadTime(t *testing.T) {
|
|
||||||
tests := []payloadTest{
|
|
||||||
{
|
|
||||||
data: map[string]interface{}{"current": time.Now()},
|
|
||||||
key: "current",
|
|
||||||
nonkey: "unknown",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range tests {
|
|
||||||
payload := Payload{tc.data}
|
|
||||||
|
|
||||||
got, err := payload.GetTime(tc.key)
|
|
||||||
diff := cmp.Diff(got, tc.data[tc.key])
|
|
||||||
if err != nil || diff != "" {
|
|
||||||
t.Errorf("Payload.GetTime(%q) = %v, %v, want %v, nil",
|
|
||||||
tc.key, got, err, tc.data[tc.key])
|
|
||||||
}
|
|
||||||
|
|
||||||
// encode and then decode task messsage.
|
|
||||||
in := h.NewTaskMessage("testing", tc.data)
|
|
||||||
b, err := json.Marshal(in)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
var out base.TaskMessage
|
|
||||||
err = json.Unmarshal(b, &out)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
payload = Payload{out.Payload}
|
|
||||||
got, err = payload.GetTime(tc.key)
|
|
||||||
diff = cmp.Diff(got, tc.data[tc.key])
|
|
||||||
if err != nil || diff != "" {
|
|
||||||
t.Errorf("With Marshaling: Payload.GetTime(%q) = %v, %v, want %v, nil",
|
|
||||||
tc.key, got, err, tc.data[tc.key])
|
|
||||||
}
|
|
||||||
|
|
||||||
// access non-existent key.
|
|
||||||
got, err = payload.GetTime(tc.nonkey)
|
|
||||||
if err == nil || !got.IsZero() {
|
|
||||||
t.Errorf("Payload.GetTime(%q) = %v, %v; want %v, error",
|
|
||||||
tc.key, got, err, time.Time{})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPayloadDuration(t *testing.T) {
|
|
||||||
tests := []payloadTest{
|
|
||||||
{
|
|
||||||
data: map[string]interface{}{"duration": 15 * time.Minute},
|
|
||||||
key: "duration",
|
|
||||||
nonkey: "unknown",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range tests {
|
|
||||||
payload := Payload{tc.data}
|
|
||||||
|
|
||||||
got, err := payload.GetDuration(tc.key)
|
|
||||||
diff := cmp.Diff(got, tc.data[tc.key])
|
|
||||||
if err != nil || diff != "" {
|
|
||||||
t.Errorf("Payload.GetDuration(%q) = %v, %v, want %v, nil",
|
|
||||||
tc.key, got, err, tc.data[tc.key])
|
|
||||||
}
|
|
||||||
|
|
||||||
// encode and then decode task messsage.
|
|
||||||
in := h.NewTaskMessage("testing", tc.data)
|
|
||||||
b, err := json.Marshal(in)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
var out base.TaskMessage
|
|
||||||
err = json.Unmarshal(b, &out)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
payload = Payload{out.Payload}
|
|
||||||
got, err = payload.GetDuration(tc.key)
|
|
||||||
diff = cmp.Diff(got, tc.data[tc.key])
|
|
||||||
if err != nil || diff != "" {
|
|
||||||
t.Errorf("With Marshaling: Payload.GetDuration(%q) = %v, %v, want %v, nil",
|
|
||||||
tc.key, got, err, tc.data[tc.key])
|
|
||||||
}
|
|
||||||
|
|
||||||
// access non-existent key.
|
|
||||||
got, err = payload.GetDuration(tc.nonkey)
|
|
||||||
if err == nil || got != 0 {
|
|
||||||
t.Errorf("Payload.GetDuration(%q) = %v, %v; want %v, error",
|
|
||||||
tc.key, got, err, time.Duration(0))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPayloadHas(t *testing.T) {
|
|
||||||
payload := Payload{map[string]interface{}{
|
|
||||||
"user_id": 123,
|
|
||||||
}}
|
|
||||||
|
|
||||||
if !payload.Has("user_id") {
|
|
||||||
t.Errorf("Payload.Has(%q) = false, want true", "user_id")
|
|
||||||
}
|
|
||||||
if payload.Has("name") {
|
|
||||||
t.Errorf("Payload.Has(%q) = true, want false", "name")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
243
periodic_task_manager.go
Normal file
243
periodic_task_manager.go
Normal file
@@ -0,0 +1,243 @@
|
|||||||
|
// Copyright 2022 Kentaro Hibino. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license
|
||||||
|
// that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package asynq
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/sha256"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"sort"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PeriodicTaskManager manages scheduling of periodic tasks.
|
||||||
|
// It syncs scheduler's entries by calling the config provider periodically.
|
||||||
|
type PeriodicTaskManager struct {
|
||||||
|
s *Scheduler
|
||||||
|
p PeriodicTaskConfigProvider
|
||||||
|
syncInterval time.Duration
|
||||||
|
done chan (struct{})
|
||||||
|
wg sync.WaitGroup
|
||||||
|
m map[string]string // map[hash]entryID
|
||||||
|
}
|
||||||
|
|
||||||
|
type PeriodicTaskManagerOpts struct {
|
||||||
|
// Required: must be non nil
|
||||||
|
PeriodicTaskConfigProvider PeriodicTaskConfigProvider
|
||||||
|
|
||||||
|
// Required: must be non nil
|
||||||
|
RedisConnOpt RedisConnOpt
|
||||||
|
|
||||||
|
// Optional: scheduler options
|
||||||
|
*SchedulerOpts
|
||||||
|
|
||||||
|
// Optional: default is 3m
|
||||||
|
SyncInterval time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
const defaultSyncInterval = 3 * time.Minute
|
||||||
|
|
||||||
|
// NewPeriodicTaskManager returns a new PeriodicTaskManager instance.
|
||||||
|
// The given opts should specify the RedisConnOp and PeriodicTaskConfigProvider at minimum.
|
||||||
|
func NewPeriodicTaskManager(opts PeriodicTaskManagerOpts) (*PeriodicTaskManager, error) {
|
||||||
|
if opts.PeriodicTaskConfigProvider == nil {
|
||||||
|
return nil, fmt.Errorf("PeriodicTaskConfigProvider cannot be nil")
|
||||||
|
}
|
||||||
|
if opts.RedisConnOpt == nil {
|
||||||
|
return nil, fmt.Errorf("RedisConnOpt cannot be nil")
|
||||||
|
}
|
||||||
|
scheduler := NewScheduler(opts.RedisConnOpt, opts.SchedulerOpts)
|
||||||
|
syncInterval := opts.SyncInterval
|
||||||
|
if syncInterval == 0 {
|
||||||
|
syncInterval = defaultSyncInterval
|
||||||
|
}
|
||||||
|
return &PeriodicTaskManager{
|
||||||
|
s: scheduler,
|
||||||
|
p: opts.PeriodicTaskConfigProvider,
|
||||||
|
syncInterval: syncInterval,
|
||||||
|
done: make(chan struct{}),
|
||||||
|
m: make(map[string]string),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// PeriodicTaskConfigProvider provides configs for periodic tasks.
|
||||||
|
// GetConfigs will be called by a PeriodicTaskManager periodically to
|
||||||
|
// sync the scheduler's entries with the configs returned by the provider.
|
||||||
|
type PeriodicTaskConfigProvider interface {
|
||||||
|
GetConfigs() ([]*PeriodicTaskConfig, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PeriodicTaskConfig specifies the details of a periodic task.
|
||||||
|
type PeriodicTaskConfig struct {
|
||||||
|
Cronspec string // required: must be non empty string
|
||||||
|
Task *Task // required: must be non nil
|
||||||
|
Opts []Option // optional: can be nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *PeriodicTaskConfig) hash() string {
|
||||||
|
h := sha256.New()
|
||||||
|
io.WriteString(h, c.Cronspec)
|
||||||
|
io.WriteString(h, c.Task.Type())
|
||||||
|
h.Write(c.Task.Payload())
|
||||||
|
opts := stringifyOptions(c.Opts)
|
||||||
|
sort.Strings(opts)
|
||||||
|
for _, opt := range opts {
|
||||||
|
io.WriteString(h, opt)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%x", h.Sum(nil))
|
||||||
|
}
|
||||||
|
|
||||||
|
func validatePeriodicTaskConfig(c *PeriodicTaskConfig) error {
|
||||||
|
if c == nil {
|
||||||
|
return fmt.Errorf("PeriodicTaskConfig cannot be nil")
|
||||||
|
}
|
||||||
|
if c.Task == nil {
|
||||||
|
return fmt.Errorf("PeriodicTaskConfig.Task cannot be nil")
|
||||||
|
}
|
||||||
|
if c.Cronspec == "" {
|
||||||
|
return fmt.Errorf("PeriodicTaskConfig.Cronspec cannot be empty")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start starts a scheduler and background goroutine to sync the scheduler with the configs
|
||||||
|
// returned by the provider.
|
||||||
|
//
|
||||||
|
// Start returns any error encountered at start up time.
|
||||||
|
func (mgr *PeriodicTaskManager) Start() error {
|
||||||
|
if mgr.s == nil || mgr.p == nil {
|
||||||
|
panic("asynq: cannot start uninitialized PeriodicTaskManager; use NewPeriodicTaskManager to initialize")
|
||||||
|
}
|
||||||
|
if err := mgr.initialSync(); err != nil {
|
||||||
|
return fmt.Errorf("asynq: %v", err)
|
||||||
|
}
|
||||||
|
if err := mgr.s.Start(); err != nil {
|
||||||
|
return fmt.Errorf("asynq: %v", err)
|
||||||
|
}
|
||||||
|
mgr.wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer mgr.wg.Done()
|
||||||
|
ticker := time.NewTicker(mgr.syncInterval)
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-mgr.done:
|
||||||
|
mgr.s.logger.Debugf("Stopping syncer goroutine")
|
||||||
|
ticker.Stop()
|
||||||
|
return
|
||||||
|
case <-ticker.C:
|
||||||
|
mgr.sync()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Shutdown gracefully shuts down the manager.
|
||||||
|
// It notifies a background syncer goroutine to stop and stops scheduler.
|
||||||
|
func (mgr *PeriodicTaskManager) Shutdown() {
|
||||||
|
close(mgr.done)
|
||||||
|
mgr.wg.Wait()
|
||||||
|
mgr.s.Shutdown()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run starts the manager and blocks until an os signal to exit the program is received.
|
||||||
|
// Once it receives a signal, it gracefully shuts down the manager.
|
||||||
|
func (mgr *PeriodicTaskManager) Run() error {
|
||||||
|
if err := mgr.Start(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
mgr.s.waitForSignals()
|
||||||
|
mgr.Shutdown()
|
||||||
|
mgr.s.logger.Debugf("PeriodicTaskManager exiting")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mgr *PeriodicTaskManager) initialSync() error {
|
||||||
|
configs, err := mgr.p.GetConfigs()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("initial call to GetConfigs failed: %v", err)
|
||||||
|
}
|
||||||
|
for _, c := range configs {
|
||||||
|
if err := validatePeriodicTaskConfig(c); err != nil {
|
||||||
|
return fmt.Errorf("initial call to GetConfigs contained an invalid config: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
mgr.add(configs)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mgr *PeriodicTaskManager) add(configs []*PeriodicTaskConfig) {
|
||||||
|
for _, c := range configs {
|
||||||
|
entryID, err := mgr.s.Register(c.Cronspec, c.Task, c.Opts...)
|
||||||
|
if err != nil {
|
||||||
|
mgr.s.logger.Errorf("Failed to register periodic task: cronspec=%q task=%q",
|
||||||
|
c.Cronspec, c.Task.Type())
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
mgr.m[c.hash()] = entryID
|
||||||
|
mgr.s.logger.Infof("Successfully registered periodic task: cronspec=%q task=%q, entryID=%s",
|
||||||
|
c.Cronspec, c.Task.Type(), entryID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mgr *PeriodicTaskManager) remove(removed map[string]string) {
|
||||||
|
for hash, entryID := range removed {
|
||||||
|
if err := mgr.s.Unregister(entryID); err != nil {
|
||||||
|
mgr.s.logger.Errorf("Failed to unregister periodic task: %v", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
delete(mgr.m, hash)
|
||||||
|
mgr.s.logger.Infof("Successfully unregistered periodic task: entryID=%s", entryID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mgr *PeriodicTaskManager) sync() {
|
||||||
|
configs, err := mgr.p.GetConfigs()
|
||||||
|
if err != nil {
|
||||||
|
mgr.s.logger.Errorf("Failed to get periodic task configs: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for _, c := range configs {
|
||||||
|
if err := validatePeriodicTaskConfig(c); err != nil {
|
||||||
|
mgr.s.logger.Errorf("Failed to sync: GetConfigs returned an invalid config: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Diff and only register/unregister the newly added/removed entries.
|
||||||
|
removed := mgr.diffRemoved(configs)
|
||||||
|
added := mgr.diffAdded(configs)
|
||||||
|
mgr.remove(removed)
|
||||||
|
mgr.add(added)
|
||||||
|
}
|
||||||
|
|
||||||
|
// diffRemoved diffs the incoming configs with the registered config and returns
|
||||||
|
// a map containing hash and entryID of each config that was removed.
|
||||||
|
func (mgr *PeriodicTaskManager) diffRemoved(configs []*PeriodicTaskConfig) map[string]string {
|
||||||
|
newConfigs := make(map[string]string)
|
||||||
|
for _, c := range configs {
|
||||||
|
newConfigs[c.hash()] = "" // empty value since we don't have entryID yet
|
||||||
|
}
|
||||||
|
removed := make(map[string]string)
|
||||||
|
for k, v := range mgr.m {
|
||||||
|
// test whether existing config is present in the incoming configs
|
||||||
|
if _, found := newConfigs[k]; !found {
|
||||||
|
removed[k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return removed
|
||||||
|
}
|
||||||
|
|
||||||
|
// diffAdded diffs the incoming configs with the registered configs and returns
|
||||||
|
// a list of configs that were added.
|
||||||
|
func (mgr *PeriodicTaskManager) diffAdded(configs []*PeriodicTaskConfig) []*PeriodicTaskConfig {
|
||||||
|
var added []*PeriodicTaskConfig
|
||||||
|
for _, c := range configs {
|
||||||
|
if _, found := mgr.m[c.hash()]; !found {
|
||||||
|
added = append(added, c)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return added
|
||||||
|
}
|
||||||
343
periodic_task_manager_test.go
Normal file
343
periodic_task_manager_test.go
Normal file
@@ -0,0 +1,343 @@
|
|||||||
|
// Copyright 2022 Kentaro Hibino. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license
|
||||||
|
// that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package asynq
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sort"
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/google/go-cmp/cmp"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Trivial implementation of PeriodicTaskConfigProvider for testing purpose.
|
||||||
|
type FakeConfigProvider struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
cfgs []*PeriodicTaskConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *FakeConfigProvider) SetConfigs(cfgs []*PeriodicTaskConfig) {
|
||||||
|
p.mu.Lock()
|
||||||
|
defer p.mu.Unlock()
|
||||||
|
p.cfgs = cfgs
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *FakeConfigProvider) GetConfigs() ([]*PeriodicTaskConfig, error) {
|
||||||
|
p.mu.Lock()
|
||||||
|
defer p.mu.Unlock()
|
||||||
|
return p.cfgs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewPeriodicTaskManager(t *testing.T) {
|
||||||
|
cfgs := []*PeriodicTaskConfig{
|
||||||
|
{Cronspec: "* * * * *", Task: NewTask("foo", nil)},
|
||||||
|
{Cronspec: "* * * * *", Task: NewTask("bar", nil)},
|
||||||
|
}
|
||||||
|
tests := []struct {
|
||||||
|
desc string
|
||||||
|
opts PeriodicTaskManagerOpts
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
desc: "with provider and redisConnOpt",
|
||||||
|
opts: PeriodicTaskManagerOpts{
|
||||||
|
RedisConnOpt: RedisClientOpt{Addr: ":6379"},
|
||||||
|
PeriodicTaskConfigProvider: &FakeConfigProvider{cfgs: cfgs},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "with sync option",
|
||||||
|
opts: PeriodicTaskManagerOpts{
|
||||||
|
RedisConnOpt: RedisClientOpt{Addr: ":6379"},
|
||||||
|
PeriodicTaskConfigProvider: &FakeConfigProvider{cfgs: cfgs},
|
||||||
|
SyncInterval: 5 * time.Minute,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "with scheduler option",
|
||||||
|
opts: PeriodicTaskManagerOpts{
|
||||||
|
RedisConnOpt: RedisClientOpt{Addr: ":6379"},
|
||||||
|
PeriodicTaskConfigProvider: &FakeConfigProvider{cfgs: cfgs},
|
||||||
|
SyncInterval: 5 * time.Minute,
|
||||||
|
SchedulerOpts: &SchedulerOpts{
|
||||||
|
LogLevel: DebugLevel,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
_, err := NewPeriodicTaskManager(tc.opts)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("%s; NewPeriodicTaskManager returned error: %v", tc.desc, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewPeriodicTaskManagerError(t *testing.T) {
|
||||||
|
cfgs := []*PeriodicTaskConfig{
|
||||||
|
{Cronspec: "* * * * *", Task: NewTask("foo", nil)},
|
||||||
|
{Cronspec: "* * * * *", Task: NewTask("bar", nil)},
|
||||||
|
}
|
||||||
|
tests := []struct {
|
||||||
|
desc string
|
||||||
|
opts PeriodicTaskManagerOpts
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
desc: "without provider",
|
||||||
|
opts: PeriodicTaskManagerOpts{
|
||||||
|
RedisConnOpt: RedisClientOpt{Addr: ":6379"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "without redisConOpt",
|
||||||
|
opts: PeriodicTaskManagerOpts{
|
||||||
|
PeriodicTaskConfigProvider: &FakeConfigProvider{cfgs: cfgs},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
_, err := NewPeriodicTaskManager(tc.opts)
|
||||||
|
if err == nil {
|
||||||
|
t.Errorf("%s; NewPeriodicTaskManager did not return error", tc.desc)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPeriodicTaskConfigHash(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
desc string
|
||||||
|
a *PeriodicTaskConfig
|
||||||
|
b *PeriodicTaskConfig
|
||||||
|
isSame bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
desc: "basic identity test",
|
||||||
|
a: &PeriodicTaskConfig{
|
||||||
|
Cronspec: "* * * * *",
|
||||||
|
Task: NewTask("foo", nil),
|
||||||
|
},
|
||||||
|
b: &PeriodicTaskConfig{
|
||||||
|
Cronspec: "* * * * *",
|
||||||
|
Task: NewTask("foo", nil),
|
||||||
|
},
|
||||||
|
isSame: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "with a option",
|
||||||
|
a: &PeriodicTaskConfig{
|
||||||
|
Cronspec: "* * * * *",
|
||||||
|
Task: NewTask("foo", nil),
|
||||||
|
Opts: []Option{Queue("myqueue")},
|
||||||
|
},
|
||||||
|
b: &PeriodicTaskConfig{
|
||||||
|
Cronspec: "* * * * *",
|
||||||
|
Task: NewTask("foo", nil),
|
||||||
|
Opts: []Option{Queue("myqueue")},
|
||||||
|
},
|
||||||
|
isSame: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "with multiple options (different order)",
|
||||||
|
a: &PeriodicTaskConfig{
|
||||||
|
Cronspec: "* * * * *",
|
||||||
|
Task: NewTask("foo", nil),
|
||||||
|
Opts: []Option{Unique(5 * time.Minute), Queue("myqueue")},
|
||||||
|
},
|
||||||
|
b: &PeriodicTaskConfig{
|
||||||
|
Cronspec: "* * * * *",
|
||||||
|
Task: NewTask("foo", nil),
|
||||||
|
Opts: []Option{Queue("myqueue"), Unique(5 * time.Minute)},
|
||||||
|
},
|
||||||
|
isSame: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "with payload",
|
||||||
|
a: &PeriodicTaskConfig{
|
||||||
|
Cronspec: "* * * * *",
|
||||||
|
Task: NewTask("foo", []byte("hello world!")),
|
||||||
|
Opts: []Option{Queue("myqueue")},
|
||||||
|
},
|
||||||
|
b: &PeriodicTaskConfig{
|
||||||
|
Cronspec: "* * * * *",
|
||||||
|
Task: NewTask("foo", []byte("hello world!")),
|
||||||
|
Opts: []Option{Queue("myqueue")},
|
||||||
|
},
|
||||||
|
isSame: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "with different cronspecs",
|
||||||
|
a: &PeriodicTaskConfig{
|
||||||
|
Cronspec: "* * * * *",
|
||||||
|
Task: NewTask("foo", nil),
|
||||||
|
},
|
||||||
|
b: &PeriodicTaskConfig{
|
||||||
|
Cronspec: "5 * * * *",
|
||||||
|
Task: NewTask("foo", nil),
|
||||||
|
},
|
||||||
|
isSame: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "with different task type",
|
||||||
|
a: &PeriodicTaskConfig{
|
||||||
|
Cronspec: "* * * * *",
|
||||||
|
Task: NewTask("foo", nil),
|
||||||
|
},
|
||||||
|
b: &PeriodicTaskConfig{
|
||||||
|
Cronspec: "* * * * *",
|
||||||
|
Task: NewTask("bar", nil),
|
||||||
|
},
|
||||||
|
isSame: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "with different options",
|
||||||
|
a: &PeriodicTaskConfig{
|
||||||
|
Cronspec: "* * * * *",
|
||||||
|
Task: NewTask("foo", nil),
|
||||||
|
Opts: []Option{Queue("myqueue")},
|
||||||
|
},
|
||||||
|
b: &PeriodicTaskConfig{
|
||||||
|
Cronspec: "* * * * *",
|
||||||
|
Task: NewTask("foo", nil),
|
||||||
|
Opts: []Option{Unique(10 * time.Minute)},
|
||||||
|
},
|
||||||
|
isSame: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "with different options (one is subset of the other)",
|
||||||
|
a: &PeriodicTaskConfig{
|
||||||
|
Cronspec: "* * * * *",
|
||||||
|
Task: NewTask("foo", nil),
|
||||||
|
Opts: []Option{Queue("myqueue")},
|
||||||
|
},
|
||||||
|
b: &PeriodicTaskConfig{
|
||||||
|
Cronspec: "* * * * *",
|
||||||
|
Task: NewTask("foo", nil),
|
||||||
|
Opts: []Option{Queue("myqueue"), Unique(10 * time.Minute)},
|
||||||
|
},
|
||||||
|
isSame: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "with different payload",
|
||||||
|
a: &PeriodicTaskConfig{
|
||||||
|
Cronspec: "* * * * *",
|
||||||
|
Task: NewTask("foo", []byte("hello!")),
|
||||||
|
Opts: []Option{Queue("myqueue")},
|
||||||
|
},
|
||||||
|
b: &PeriodicTaskConfig{
|
||||||
|
Cronspec: "* * * * *",
|
||||||
|
Task: NewTask("foo", []byte("HELLO!")),
|
||||||
|
Opts: []Option{Queue("myqueue"), Unique(10 * time.Minute)},
|
||||||
|
},
|
||||||
|
isSame: false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
if tc.isSame && tc.a.hash() != tc.b.hash() {
|
||||||
|
t.Errorf("%s: a.hash=%s b.hash=%s expected to be equal",
|
||||||
|
tc.desc, tc.a.hash(), tc.b.hash())
|
||||||
|
}
|
||||||
|
if !tc.isSame && tc.a.hash() == tc.b.hash() {
|
||||||
|
t.Errorf("%s: a.hash=%s b.hash=%s expected to be not equal",
|
||||||
|
tc.desc, tc.a.hash(), tc.b.hash())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Things to test.
|
||||||
|
// - Run the manager
|
||||||
|
// - Change provider to return new configs
|
||||||
|
// - Verify that the scheduler synced with the new config
|
||||||
|
func TestPeriodicTaskManager(t *testing.T) {
|
||||||
|
// Note: In this test, we'll use task type as an ID for each config.
|
||||||
|
cfgs := []*PeriodicTaskConfig{
|
||||||
|
{Task: NewTask("task1", nil), Cronspec: "* * * * 1"},
|
||||||
|
{Task: NewTask("task2", nil), Cronspec: "* * * * 2"},
|
||||||
|
}
|
||||||
|
const syncInterval = 3 * time.Second
|
||||||
|
provider := &FakeConfigProvider{cfgs: cfgs}
|
||||||
|
mgr, err := NewPeriodicTaskManager(PeriodicTaskManagerOpts{
|
||||||
|
RedisConnOpt: getRedisConnOpt(t),
|
||||||
|
PeriodicTaskConfigProvider: provider,
|
||||||
|
SyncInterval: syncInterval,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to initialize PeriodicTaskManager: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := mgr.Start(); err != nil {
|
||||||
|
t.Fatalf("Failed to start PeriodicTaskManager: %v", err)
|
||||||
|
}
|
||||||
|
defer mgr.Shutdown()
|
||||||
|
|
||||||
|
got := extractCronEntries(mgr.s)
|
||||||
|
want := []*cronEntry{
|
||||||
|
{Cronspec: "* * * * 1", TaskType: "task1"},
|
||||||
|
{Cronspec: "* * * * 2", TaskType: "task2"},
|
||||||
|
}
|
||||||
|
if diff := cmp.Diff(want, got, sortCronEntry); diff != "" {
|
||||||
|
t.Errorf("Diff found in scheduler's registered entries: %s", diff)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Change the underlying configs
|
||||||
|
// - task2 removed
|
||||||
|
// - task3 added
|
||||||
|
provider.SetConfigs([]*PeriodicTaskConfig{
|
||||||
|
{Task: NewTask("task1", nil), Cronspec: "* * * * 1"},
|
||||||
|
{Task: NewTask("task3", nil), Cronspec: "* * * * 3"},
|
||||||
|
})
|
||||||
|
|
||||||
|
// Wait for the next sync
|
||||||
|
time.Sleep(syncInterval * 2)
|
||||||
|
|
||||||
|
// Verify the entries are synced
|
||||||
|
got = extractCronEntries(mgr.s)
|
||||||
|
want = []*cronEntry{
|
||||||
|
{Cronspec: "* * * * 1", TaskType: "task1"},
|
||||||
|
{Cronspec: "* * * * 3", TaskType: "task3"},
|
||||||
|
}
|
||||||
|
if diff := cmp.Diff(want, got, sortCronEntry); diff != "" {
|
||||||
|
t.Errorf("Diff found in scheduler's registered entries: %s", diff)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Change the underlying configs
|
||||||
|
// All configs removed, empty set.
|
||||||
|
provider.SetConfigs([]*PeriodicTaskConfig{})
|
||||||
|
|
||||||
|
// Wait for the next sync
|
||||||
|
time.Sleep(syncInterval * 2)
|
||||||
|
|
||||||
|
// Verify the entries are synced
|
||||||
|
got = extractCronEntries(mgr.s)
|
||||||
|
want = []*cronEntry{}
|
||||||
|
if diff := cmp.Diff(want, got, sortCronEntry); diff != "" {
|
||||||
|
t.Errorf("Diff found in scheduler's registered entries: %s", diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func extractCronEntries(s *Scheduler) []*cronEntry {
|
||||||
|
var out []*cronEntry
|
||||||
|
for _, e := range s.cron.Entries() {
|
||||||
|
job := e.Job.(*enqueueJob)
|
||||||
|
out = append(out, &cronEntry{Cronspec: job.cronspec, TaskType: job.task.Type()})
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
var sortCronEntry = cmp.Transformer("sortCronEntry", func(in []*cronEntry) []*cronEntry {
|
||||||
|
out := append([]*cronEntry(nil), in...)
|
||||||
|
sort.Slice(out, func(i, j int) bool {
|
||||||
|
return out[i].TaskType < out[j].TaskType
|
||||||
|
})
|
||||||
|
return out
|
||||||
|
})
|
||||||
|
|
||||||
|
// A simple struct to allow for simpler comparison in test.
|
||||||
|
type cronEntry struct {
|
||||||
|
Cronspec string
|
||||||
|
TaskType string
|
||||||
|
}
|
||||||
328
processor.go
328
processor.go
@@ -7,31 +7,38 @@ package asynq
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"math"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
|
"runtime"
|
||||||
|
"runtime/debug"
|
||||||
"sort"
|
"sort"
|
||||||
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/hibiken/asynq/internal/base"
|
"github.com/hibiken/asynq/internal/base"
|
||||||
|
asynqcontext "github.com/hibiken/asynq/internal/context"
|
||||||
|
"github.com/hibiken/asynq/internal/errors"
|
||||||
"github.com/hibiken/asynq/internal/log"
|
"github.com/hibiken/asynq/internal/log"
|
||||||
"github.com/hibiken/asynq/internal/rdb"
|
"github.com/hibiken/asynq/internal/timeutil"
|
||||||
"golang.org/x/time/rate"
|
"golang.org/x/time/rate"
|
||||||
)
|
)
|
||||||
|
|
||||||
type processor struct {
|
type processor struct {
|
||||||
logger *log.Logger
|
logger *log.Logger
|
||||||
broker base.Broker
|
broker base.Broker
|
||||||
|
clock timeutil.Clock
|
||||||
ss *base.ServerState
|
|
||||||
|
|
||||||
handler Handler
|
handler Handler
|
||||||
|
baseCtxFn func() context.Context
|
||||||
|
|
||||||
queueConfig map[string]int
|
queueConfig map[string]int
|
||||||
|
|
||||||
// orderedQueues is set only in strict-priority mode.
|
// orderedQueues is set only in strict-priority mode.
|
||||||
orderedQueues []string
|
orderedQueues []string
|
||||||
|
|
||||||
retryDelayFunc retryDelayFunc
|
retryDelayFunc RetryDelayFunc
|
||||||
|
isFailureFunc func(error) bool
|
||||||
|
|
||||||
errHandler ErrorHandler
|
errHandler ErrorHandler
|
||||||
|
|
||||||
@@ -52,53 +59,64 @@ type processor struct {
|
|||||||
done chan struct{}
|
done chan struct{}
|
||||||
once sync.Once
|
once sync.Once
|
||||||
|
|
||||||
// abort channel is closed when the shutdown of the "processor" goroutine starts.
|
// quit channel is closed when the shutdown of the "processor" goroutine starts.
|
||||||
abort chan struct{}
|
|
||||||
|
|
||||||
// quit channel communicates to the in-flight worker goroutines to stop.
|
|
||||||
quit chan struct{}
|
quit chan struct{}
|
||||||
|
|
||||||
// cancelations is a set of cancel functions for all in-progress tasks.
|
// abort channel communicates to the in-flight worker goroutines to stop.
|
||||||
cancelations *base.Cancelations
|
abort chan struct{}
|
||||||
}
|
|
||||||
|
|
||||||
type retryDelayFunc func(n int, err error, task *Task) time.Duration
|
// cancelations is a set of cancel functions for all active tasks.
|
||||||
|
cancelations *base.Cancelations
|
||||||
|
|
||||||
|
starting chan<- *workerInfo
|
||||||
|
finished chan<- *base.TaskMessage
|
||||||
|
}
|
||||||
|
|
||||||
type processorParams struct {
|
type processorParams struct {
|
||||||
logger *log.Logger
|
logger *log.Logger
|
||||||
broker base.Broker
|
broker base.Broker
|
||||||
ss *base.ServerState
|
baseCtxFn func() context.Context
|
||||||
retryDelayFunc retryDelayFunc
|
retryDelayFunc RetryDelayFunc
|
||||||
|
isFailureFunc func(error) bool
|
||||||
syncCh chan<- *syncRequest
|
syncCh chan<- *syncRequest
|
||||||
cancelations *base.Cancelations
|
cancelations *base.Cancelations
|
||||||
|
concurrency int
|
||||||
|
queues map[string]int
|
||||||
|
strictPriority bool
|
||||||
errHandler ErrorHandler
|
errHandler ErrorHandler
|
||||||
shutdownTimeout time.Duration
|
shutdownTimeout time.Duration
|
||||||
|
starting chan<- *workerInfo
|
||||||
|
finished chan<- *base.TaskMessage
|
||||||
}
|
}
|
||||||
|
|
||||||
// newProcessor constructs a new processor.
|
// newProcessor constructs a new processor.
|
||||||
func newProcessor(params processorParams) *processor {
|
func newProcessor(params processorParams) *processor {
|
||||||
info := params.ss.GetInfo()
|
queues := normalizeQueues(params.queues)
|
||||||
qcfg := normalizeQueueCfg(info.Queues)
|
|
||||||
orderedQueues := []string(nil)
|
orderedQueues := []string(nil)
|
||||||
if info.StrictPriority {
|
if params.strictPriority {
|
||||||
orderedQueues = sortByPriority(qcfg)
|
orderedQueues = sortByPriority(queues)
|
||||||
}
|
}
|
||||||
return &processor{
|
return &processor{
|
||||||
logger: params.logger,
|
logger: params.logger,
|
||||||
broker: params.broker,
|
broker: params.broker,
|
||||||
ss: params.ss,
|
baseCtxFn: params.baseCtxFn,
|
||||||
queueConfig: qcfg,
|
clock: timeutil.NewRealClock(),
|
||||||
|
queueConfig: queues,
|
||||||
orderedQueues: orderedQueues,
|
orderedQueues: orderedQueues,
|
||||||
retryDelayFunc: params.retryDelayFunc,
|
retryDelayFunc: params.retryDelayFunc,
|
||||||
|
isFailureFunc: params.isFailureFunc,
|
||||||
syncRequestCh: params.syncCh,
|
syncRequestCh: params.syncCh,
|
||||||
cancelations: params.cancelations,
|
cancelations: params.cancelations,
|
||||||
errLogLimiter: rate.NewLimiter(rate.Every(3*time.Second), 1),
|
errLogLimiter: rate.NewLimiter(rate.Every(3*time.Second), 1),
|
||||||
sema: make(chan struct{}, info.Concurrency),
|
sema: make(chan struct{}, params.concurrency),
|
||||||
done: make(chan struct{}),
|
done: make(chan struct{}),
|
||||||
abort: make(chan struct{}),
|
|
||||||
quit: make(chan struct{}),
|
quit: make(chan struct{}),
|
||||||
|
abort: make(chan struct{}),
|
||||||
errHandler: params.errHandler,
|
errHandler: params.errHandler,
|
||||||
handler: HandlerFunc(func(ctx context.Context, t *Task) error { return fmt.Errorf("handler not set") }),
|
handler: HandlerFunc(func(ctx context.Context, t *Task) error { return fmt.Errorf("handler not set") }),
|
||||||
|
shutdownTimeout: params.shutdownTimeout,
|
||||||
|
starting: params.starting,
|
||||||
|
finished: params.finished,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -108,37 +126,28 @@ func (p *processor) stop() {
|
|||||||
p.once.Do(func() {
|
p.once.Do(func() {
|
||||||
p.logger.Debug("Processor shutting down...")
|
p.logger.Debug("Processor shutting down...")
|
||||||
// Unblock if processor is waiting for sema token.
|
// Unblock if processor is waiting for sema token.
|
||||||
close(p.abort)
|
close(p.quit)
|
||||||
// Signal the processor goroutine to stop processing tasks
|
// Signal the processor goroutine to stop processing tasks
|
||||||
// from the queue.
|
// from the queue.
|
||||||
p.done <- struct{}{}
|
p.done <- struct{}{}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// NOTE: once terminated, processor cannot be re-started.
|
// NOTE: once shutdown, processor cannot be re-started.
|
||||||
func (p *processor) terminate() {
|
func (p *processor) shutdown() {
|
||||||
p.stop()
|
p.stop()
|
||||||
|
|
||||||
time.AfterFunc(p.shutdownTimeout, func() { close(p.quit) })
|
time.AfterFunc(p.shutdownTimeout, func() { close(p.abort) })
|
||||||
|
|
||||||
p.logger.Info("Waiting for all workers to finish...")
|
p.logger.Info("Waiting for all workers to finish...")
|
||||||
|
|
||||||
// send cancellation signal to all in-progress task handlers
|
|
||||||
for _, cancel := range p.cancelations.GetAll() {
|
|
||||||
cancel()
|
|
||||||
}
|
|
||||||
|
|
||||||
// block until all workers have released the token
|
// block until all workers have released the token
|
||||||
for i := 0; i < cap(p.sema); i++ {
|
for i := 0; i < cap(p.sema); i++ {
|
||||||
p.sema <- struct{}{}
|
p.sema <- struct{}{}
|
||||||
}
|
}
|
||||||
p.logger.Info("All workers have finished")
|
p.logger.Info("All workers have finished")
|
||||||
p.restore() // move any unfinished tasks back to the queue.
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *processor) start(wg *sync.WaitGroup) {
|
func (p *processor) start(wg *sync.WaitGroup) {
|
||||||
// NOTE: The call to "restore" needs to complete before starting
|
|
||||||
// the processor goroutine.
|
|
||||||
p.restore()
|
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
@@ -157,137 +166,216 @@ func (p *processor) start(wg *sync.WaitGroup) {
|
|||||||
// exec pulls a task out of the queue and starts a worker goroutine to
|
// exec pulls a task out of the queue and starts a worker goroutine to
|
||||||
// process the task.
|
// process the task.
|
||||||
func (p *processor) exec() {
|
func (p *processor) exec() {
|
||||||
|
select {
|
||||||
|
case <-p.quit:
|
||||||
|
return
|
||||||
|
case p.sema <- struct{}{}: // acquire token
|
||||||
qnames := p.queues()
|
qnames := p.queues()
|
||||||
msg, err := p.broker.Dequeue(qnames...)
|
msg, leaseExpirationTime, err := p.broker.Dequeue(qnames...)
|
||||||
switch {
|
switch {
|
||||||
case err == rdb.ErrNoProcessableTask:
|
case errors.Is(err, errors.ErrNoProcessableTask):
|
||||||
// queues are empty, this is a normal behavior.
|
|
||||||
if len(qnames) > 1 {
|
|
||||||
// sleep to avoid slamming redis and let scheduler move tasks into queues.
|
|
||||||
// Note: With multiple queues, we are not using blocking pop operation and
|
|
||||||
// polling queues instead. This adds significant load to redis.
|
|
||||||
time.Sleep(time.Second)
|
|
||||||
}
|
|
||||||
p.logger.Debug("All queues are empty")
|
p.logger.Debug("All queues are empty")
|
||||||
|
// Queues are empty, this is a normal behavior.
|
||||||
|
// Sleep to avoid slamming redis and let scheduler move tasks into queues.
|
||||||
|
// Note: We are not using blocking pop operation and polling queues instead.
|
||||||
|
// This adds significant load to redis.
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
<-p.sema // release token
|
||||||
return
|
return
|
||||||
case err != nil:
|
case err != nil:
|
||||||
if p.errLogLimiter.Allow() {
|
if p.errLogLimiter.Allow() {
|
||||||
p.logger.Errorf("Dequeue error: %v", err)
|
p.logger.Errorf("Dequeue error: %v", err)
|
||||||
}
|
}
|
||||||
|
<-p.sema // release token
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
select {
|
lease := base.NewLease(leaseExpirationTime)
|
||||||
case <-p.abort:
|
deadline := p.computeDeadline(msg)
|
||||||
// shutdown is starting, return immediately after requeuing the message.
|
p.starting <- &workerInfo{msg, time.Now(), deadline, lease}
|
||||||
p.requeue(msg)
|
|
||||||
return
|
|
||||||
case p.sema <- struct{}{}: // acquire token
|
|
||||||
p.ss.AddWorkerStats(msg, time.Now())
|
|
||||||
go func() {
|
go func() {
|
||||||
defer func() {
|
defer func() {
|
||||||
p.ss.DeleteWorkerStats(msg)
|
p.finished <- msg
|
||||||
<-p.sema // release token
|
<-p.sema // release token
|
||||||
}()
|
}()
|
||||||
|
|
||||||
ctx, cancel := createContext(msg)
|
ctx, cancel := asynqcontext.New(p.baseCtxFn(), msg, deadline)
|
||||||
p.cancelations.Add(msg.ID.String(), cancel)
|
p.cancelations.Add(msg.ID, cancel)
|
||||||
defer func() {
|
defer func() {
|
||||||
cancel()
|
cancel()
|
||||||
p.cancelations.Delete(msg.ID.String())
|
p.cancelations.Delete(msg.ID)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
// check context before starting a worker goroutine.
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
// already canceled (e.g. deadline exceeded).
|
||||||
|
p.handleFailedMessage(ctx, lease, msg, ctx.Err())
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
resCh := make(chan error, 1)
|
resCh := make(chan error, 1)
|
||||||
task := NewTask(msg.Type, msg.Payload)
|
go func() {
|
||||||
go func() { resCh <- perform(ctx, task, p.handler) }()
|
task := newTask(
|
||||||
|
msg.Type,
|
||||||
|
msg.Payload,
|
||||||
|
&ResultWriter{
|
||||||
|
id: msg.ID,
|
||||||
|
qname: msg.Queue,
|
||||||
|
broker: p.broker,
|
||||||
|
ctx: ctx,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
resCh <- p.perform(ctx, task)
|
||||||
|
}()
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case <-p.quit:
|
case <-p.abort:
|
||||||
// time is up, quit this worker goroutine.
|
// time is up, push the message back to queue and quit this worker goroutine.
|
||||||
p.logger.Warnf("Quitting worker. task id=%s", msg.ID)
|
p.logger.Warnf("Quitting worker. task id=%s", msg.ID)
|
||||||
|
p.requeue(lease, msg)
|
||||||
|
return
|
||||||
|
case <-lease.Done():
|
||||||
|
cancel()
|
||||||
|
p.handleFailedMessage(ctx, lease, msg, ErrLeaseExpired)
|
||||||
|
return
|
||||||
|
case <-ctx.Done():
|
||||||
|
p.handleFailedMessage(ctx, lease, msg, ctx.Err())
|
||||||
return
|
return
|
||||||
case resErr := <-resCh:
|
case resErr := <-resCh:
|
||||||
// Note: One of three things should happen.
|
|
||||||
// 1) Done -> Removes the message from InProgress
|
|
||||||
// 2) Retry -> Removes the message from InProgress & Adds the message to Retry
|
|
||||||
// 3) Kill -> Removes the message from InProgress & Adds the message to Dead
|
|
||||||
if resErr != nil {
|
if resErr != nil {
|
||||||
if p.errHandler != nil {
|
p.handleFailedMessage(ctx, lease, msg, resErr)
|
||||||
p.errHandler.HandleError(task, resErr, msg.Retried, msg.Retry)
|
|
||||||
}
|
|
||||||
if msg.Retried >= msg.Retry {
|
|
||||||
p.kill(msg, resErr)
|
|
||||||
} else {
|
|
||||||
p.retry(msg, resErr)
|
|
||||||
}
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
p.markAsDone(msg)
|
p.handleSucceededMessage(lease, msg)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// restore moves all tasks from "in-progress" back to queue
|
func (p *processor) requeue(l *base.Lease, msg *base.TaskMessage) {
|
||||||
// to restore all unfinished tasks.
|
if !l.IsValid() {
|
||||||
func (p *processor) restore() {
|
// If lease is not valid, do not write to redis; Let recoverer take care of it.
|
||||||
n, err := p.broker.RequeueAll()
|
return
|
||||||
if err != nil {
|
|
||||||
p.logger.Errorf("Could not restore unfinished tasks: %v", err)
|
|
||||||
}
|
}
|
||||||
if n > 0 {
|
ctx, _ := context.WithDeadline(context.Background(), l.Deadline())
|
||||||
p.logger.Infof("Restored %d unfinished tasks back to queue", n)
|
err := p.broker.Requeue(ctx, msg)
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *processor) requeue(msg *base.TaskMessage) {
|
|
||||||
err := p.broker.Requeue(msg)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
p.logger.Errorf("Could not push task id=%s back to queue: %v", msg.ID, err)
|
p.logger.Errorf("Could not push task id=%s back to queue: %v", msg.ID, err)
|
||||||
|
} else {
|
||||||
|
p.logger.Infof("Pushed task id=%s back to queue", msg.ID)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *processor) markAsDone(msg *base.TaskMessage) {
|
func (p *processor) handleSucceededMessage(l *base.Lease, msg *base.TaskMessage) {
|
||||||
err := p.broker.Done(msg)
|
if msg.Retention > 0 {
|
||||||
|
p.markAsComplete(l, msg)
|
||||||
|
} else {
|
||||||
|
p.markAsDone(l, msg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *processor) markAsComplete(l *base.Lease, msg *base.TaskMessage) {
|
||||||
|
if !l.IsValid() {
|
||||||
|
// If lease is not valid, do not write to redis; Let recoverer take care of it.
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ctx, _ := context.WithDeadline(context.Background(), l.Deadline())
|
||||||
|
err := p.broker.MarkAsComplete(ctx, msg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errMsg := fmt.Sprintf("Could not remove task id=%s from %q", msg.ID, base.InProgressQueue)
|
errMsg := fmt.Sprintf("Could not move task id=%s type=%q from %q to %q: %+v",
|
||||||
|
msg.ID, msg.Type, base.ActiveKey(msg.Queue), base.CompletedKey(msg.Queue), err)
|
||||||
p.logger.Warnf("%s; Will retry syncing", errMsg)
|
p.logger.Warnf("%s; Will retry syncing", errMsg)
|
||||||
p.syncRequestCh <- &syncRequest{
|
p.syncRequestCh <- &syncRequest{
|
||||||
fn: func() error {
|
fn: func() error {
|
||||||
return p.broker.Done(msg)
|
return p.broker.MarkAsComplete(ctx, msg)
|
||||||
},
|
},
|
||||||
errMsg: errMsg,
|
errMsg: errMsg,
|
||||||
|
deadline: l.Deadline(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *processor) retry(msg *base.TaskMessage, e error) {
|
func (p *processor) markAsDone(l *base.Lease, msg *base.TaskMessage) {
|
||||||
|
if !l.IsValid() {
|
||||||
|
// If lease is not valid, do not write to redis; Let recoverer take care of it.
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ctx, _ := context.WithDeadline(context.Background(), l.Deadline())
|
||||||
|
err := p.broker.Done(ctx, msg)
|
||||||
|
if err != nil {
|
||||||
|
errMsg := fmt.Sprintf("Could not remove task id=%s type=%q from %q err: %+v", msg.ID, msg.Type, base.ActiveKey(msg.Queue), err)
|
||||||
|
p.logger.Warnf("%s; Will retry syncing", errMsg)
|
||||||
|
p.syncRequestCh <- &syncRequest{
|
||||||
|
fn: func() error {
|
||||||
|
return p.broker.Done(ctx, msg)
|
||||||
|
},
|
||||||
|
errMsg: errMsg,
|
||||||
|
deadline: l.Deadline(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SkipRetry is used as a return value from Handler.ProcessTask to indicate that
|
||||||
|
// the task should not be retried and should be archived instead.
|
||||||
|
var SkipRetry = errors.New("skip retry for the task")
|
||||||
|
|
||||||
|
func (p *processor) handleFailedMessage(ctx context.Context, l *base.Lease, msg *base.TaskMessage, err error) {
|
||||||
|
if p.errHandler != nil {
|
||||||
|
p.errHandler.HandleError(ctx, NewTask(msg.Type, msg.Payload), err)
|
||||||
|
}
|
||||||
|
if !p.isFailureFunc(err) {
|
||||||
|
// retry the task without marking it as failed
|
||||||
|
p.retry(l, msg, err, false /*isFailure*/)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if msg.Retried >= msg.Retry || errors.Is(err, SkipRetry) {
|
||||||
|
p.logger.Warnf("Retry exhausted for task id=%s", msg.ID)
|
||||||
|
p.archive(l, msg, err)
|
||||||
|
} else {
|
||||||
|
p.retry(l, msg, err, true /*isFailure*/)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *processor) retry(l *base.Lease, msg *base.TaskMessage, e error, isFailure bool) {
|
||||||
|
if !l.IsValid() {
|
||||||
|
// If lease is not valid, do not write to redis; Let recoverer take care of it.
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ctx, _ := context.WithDeadline(context.Background(), l.Deadline())
|
||||||
d := p.retryDelayFunc(msg.Retried, e, NewTask(msg.Type, msg.Payload))
|
d := p.retryDelayFunc(msg.Retried, e, NewTask(msg.Type, msg.Payload))
|
||||||
retryAt := time.Now().Add(d)
|
retryAt := time.Now().Add(d)
|
||||||
err := p.broker.Retry(msg, retryAt, e.Error())
|
err := p.broker.Retry(ctx, msg, retryAt, e.Error(), isFailure)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errMsg := fmt.Sprintf("Could not move task id=%s from %q to %q", msg.ID, base.InProgressQueue, base.RetryQueue)
|
errMsg := fmt.Sprintf("Could not move task id=%s from %q to %q", msg.ID, base.ActiveKey(msg.Queue), base.RetryKey(msg.Queue))
|
||||||
p.logger.Warnf("%s; Will retry syncing", errMsg)
|
p.logger.Warnf("%s; Will retry syncing", errMsg)
|
||||||
p.syncRequestCh <- &syncRequest{
|
p.syncRequestCh <- &syncRequest{
|
||||||
fn: func() error {
|
fn: func() error {
|
||||||
return p.broker.Retry(msg, retryAt, e.Error())
|
return p.broker.Retry(ctx, msg, retryAt, e.Error(), isFailure)
|
||||||
},
|
},
|
||||||
errMsg: errMsg,
|
errMsg: errMsg,
|
||||||
|
deadline: l.Deadline(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *processor) kill(msg *base.TaskMessage, e error) {
|
func (p *processor) archive(l *base.Lease, msg *base.TaskMessage, e error) {
|
||||||
p.logger.Warnf("Retry exhausted for task id=%s", msg.ID)
|
if !l.IsValid() {
|
||||||
err := p.broker.Kill(msg, e.Error())
|
// If lease is not valid, do not write to redis; Let recoverer take care of it.
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ctx, _ := context.WithDeadline(context.Background(), l.Deadline())
|
||||||
|
err := p.broker.Archive(ctx, msg, e.Error())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errMsg := fmt.Sprintf("Could not move task id=%s from %q to %q", msg.ID, base.InProgressQueue, base.DeadQueue)
|
errMsg := fmt.Sprintf("Could not move task id=%s from %q to %q", msg.ID, base.ActiveKey(msg.Queue), base.ArchivedKey(msg.Queue))
|
||||||
p.logger.Warnf("%s; Will retry syncing", errMsg)
|
p.logger.Warnf("%s; Will retry syncing", errMsg)
|
||||||
p.syncRequestCh <- &syncRequest{
|
p.syncRequestCh <- &syncRequest{
|
||||||
fn: func() error {
|
fn: func() error {
|
||||||
return p.broker.Kill(msg, e.Error())
|
return p.broker.Archive(ctx, msg, e.Error())
|
||||||
},
|
},
|
||||||
errMsg: errMsg,
|
errMsg: errMsg,
|
||||||
|
deadline: l.Deadline(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -322,13 +410,26 @@ func (p *processor) queues() []string {
|
|||||||
// perform calls the handler with the given task.
|
// perform calls the handler with the given task.
|
||||||
// If the call returns without panic, it simply returns the value,
|
// If the call returns without panic, it simply returns the value,
|
||||||
// otherwise, it recovers from panic and returns an error.
|
// otherwise, it recovers from panic and returns an error.
|
||||||
func perform(ctx context.Context, task *Task, h Handler) (err error) {
|
func (p *processor) perform(ctx context.Context, task *Task) (err error) {
|
||||||
defer func() {
|
defer func() {
|
||||||
if x := recover(); x != nil {
|
if x := recover(); x != nil {
|
||||||
|
p.logger.Errorf("recovering from panic. See the stack trace below for details:\n%s", string(debug.Stack()))
|
||||||
|
_, file, line, ok := runtime.Caller(1) // skip the first frame (panic itself)
|
||||||
|
if ok && strings.Contains(file, "runtime/") {
|
||||||
|
// The panic came from the runtime, most likely due to incorrect
|
||||||
|
// map/slice usage. The parent frame should have the real trigger.
|
||||||
|
_, file, line, ok = runtime.Caller(2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Include the file and line number info in the error, if runtime.Caller returned ok.
|
||||||
|
if ok {
|
||||||
|
err = fmt.Errorf("panic [%s:%d]: %v", file, line, x)
|
||||||
|
} else {
|
||||||
err = fmt.Errorf("panic: %v", x)
|
err = fmt.Errorf("panic: %v", x)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
}()
|
}()
|
||||||
return h.ProcessTask(ctx, task)
|
return p.handler.ProcessTask(ctx, task)
|
||||||
}
|
}
|
||||||
|
|
||||||
// uniq dedupes elements and returns a slice of unique names of length l.
|
// uniq dedupes elements and returns a slice of unique names of length l.
|
||||||
@@ -374,16 +475,15 @@ func (x byPriority) Len() int { return len(x) }
|
|||||||
func (x byPriority) Less(i, j int) bool { return x[i].priority < x[j].priority }
|
func (x byPriority) Less(i, j int) bool { return x[i].priority < x[j].priority }
|
||||||
func (x byPriority) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
func (x byPriority) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||||
|
|
||||||
// normalizeQueueCfg divides priority numbers by their
|
// normalizeQueues divides priority numbers by their greatest common divisor.
|
||||||
// greatest common divisor.
|
func normalizeQueues(queues map[string]int) map[string]int {
|
||||||
func normalizeQueueCfg(queueCfg map[string]int) map[string]int {
|
|
||||||
var xs []int
|
var xs []int
|
||||||
for _, x := range queueCfg {
|
for _, x := range queues {
|
||||||
xs = append(xs, x)
|
xs = append(xs, x)
|
||||||
}
|
}
|
||||||
d := gcd(xs...)
|
d := gcd(xs...)
|
||||||
res := make(map[string]int)
|
res := make(map[string]int)
|
||||||
for q, x := range queueCfg {
|
for q, x := range queues {
|
||||||
res[q] = x / d
|
res[q] = x / d
|
||||||
}
|
}
|
||||||
return res
|
return res
|
||||||
@@ -405,3 +505,19 @@ func gcd(xs ...int) int {
|
|||||||
}
|
}
|
||||||
return res
|
return res
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// computeDeadline returns the given task's deadline,
|
||||||
|
func (p *processor) computeDeadline(msg *base.TaskMessage) time.Time {
|
||||||
|
if msg.Timeout == 0 && msg.Deadline == 0 {
|
||||||
|
p.logger.Errorf("asynq: internal error: both timeout and deadline are not set for the task message: %s", msg.ID)
|
||||||
|
return p.clock.Now().Add(defaultTimeout)
|
||||||
|
}
|
||||||
|
if msg.Timeout != 0 && msg.Deadline != 0 {
|
||||||
|
deadlineUnix := math.Min(float64(p.clock.Now().Unix()+msg.Timeout), float64(msg.Deadline))
|
||||||
|
return time.Unix(int64(deadlineUnix), 0)
|
||||||
|
}
|
||||||
|
if msg.Timeout != 0 {
|
||||||
|
return p.clock.Now().Add(time.Duration(msg.Timeout) * time.Second)
|
||||||
|
}
|
||||||
|
return time.Unix(msg.Deadline, 0)
|
||||||
|
}
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ package asynq
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"sort"
|
"sort"
|
||||||
"sync"
|
"sync"
|
||||||
@@ -16,17 +17,79 @@ import (
|
|||||||
"github.com/google/go-cmp/cmp/cmpopts"
|
"github.com/google/go-cmp/cmp/cmpopts"
|
||||||
h "github.com/hibiken/asynq/internal/asynqtest"
|
h "github.com/hibiken/asynq/internal/asynqtest"
|
||||||
"github.com/hibiken/asynq/internal/base"
|
"github.com/hibiken/asynq/internal/base"
|
||||||
|
"github.com/hibiken/asynq/internal/errors"
|
||||||
|
"github.com/hibiken/asynq/internal/log"
|
||||||
"github.com/hibiken/asynq/internal/rdb"
|
"github.com/hibiken/asynq/internal/rdb"
|
||||||
|
"github.com/hibiken/asynq/internal/timeutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestProcessorSuccess(t *testing.T) {
|
var taskCmpOpts = []cmp.Option{
|
||||||
|
sortTaskOpt, // sort the tasks
|
||||||
|
cmp.AllowUnexported(Task{}), // allow typename, payload fields to be compared
|
||||||
|
cmpopts.IgnoreFields(Task{}, "opts", "w"), // ignore opts, w fields
|
||||||
|
}
|
||||||
|
|
||||||
|
// fakeHeartbeater receives from starting and finished channels and do nothing.
|
||||||
|
func fakeHeartbeater(starting <-chan *workerInfo, finished <-chan *base.TaskMessage, done <-chan struct{}) {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-starting:
|
||||||
|
case <-finished:
|
||||||
|
case <-done:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// fakeSyncer receives from sync channel and do nothing.
|
||||||
|
func fakeSyncer(syncCh <-chan *syncRequest, done <-chan struct{}) {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-syncCh:
|
||||||
|
case <-done:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns a processor instance configured for testing purpose.
|
||||||
|
func newProcessorForTest(t *testing.T, r *rdb.RDB, h Handler) *processor {
|
||||||
|
starting := make(chan *workerInfo)
|
||||||
|
finished := make(chan *base.TaskMessage)
|
||||||
|
syncCh := make(chan *syncRequest)
|
||||||
|
done := make(chan struct{})
|
||||||
|
t.Cleanup(func() { close(done) })
|
||||||
|
go fakeHeartbeater(starting, finished, done)
|
||||||
|
go fakeSyncer(syncCh, done)
|
||||||
|
p := newProcessor(processorParams{
|
||||||
|
logger: testLogger,
|
||||||
|
broker: r,
|
||||||
|
baseCtxFn: context.Background,
|
||||||
|
retryDelayFunc: DefaultRetryDelayFunc,
|
||||||
|
isFailureFunc: defaultIsFailureFunc,
|
||||||
|
syncCh: syncCh,
|
||||||
|
cancelations: base.NewCancelations(),
|
||||||
|
concurrency: 10,
|
||||||
|
queues: defaultQueueConfig,
|
||||||
|
strictPriority: false,
|
||||||
|
errHandler: nil,
|
||||||
|
shutdownTimeout: defaultShutdownTimeout,
|
||||||
|
starting: starting,
|
||||||
|
finished: finished,
|
||||||
|
})
|
||||||
|
p.handler = h
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProcessorSuccessWithSingleQueue(t *testing.T) {
|
||||||
r := setup(t)
|
r := setup(t)
|
||||||
|
defer r.Close()
|
||||||
rdbClient := rdb.NewRDB(r)
|
rdbClient := rdb.NewRDB(r)
|
||||||
|
|
||||||
m1 := h.NewTaskMessage("send_email", nil)
|
m1 := h.NewTaskMessage("task1", nil)
|
||||||
m2 := h.NewTaskMessage("gen_thumbnail", nil)
|
m2 := h.NewTaskMessage("task2", nil)
|
||||||
m3 := h.NewTaskMessage("reindex", nil)
|
m3 := h.NewTaskMessage("task3", nil)
|
||||||
m4 := h.NewTaskMessage("sync", nil)
|
m4 := h.NewTaskMessage("task4", nil)
|
||||||
|
|
||||||
t1 := NewTask(m1.Type, m1.Payload)
|
t1 := NewTask(m1.Type, m1.Payload)
|
||||||
t2 := NewTask(m2.Type, m2.Payload)
|
t2 := NewTask(m2.Type, m2.Payload)
|
||||||
@@ -34,17 +97,17 @@ func TestProcessorSuccess(t *testing.T) {
|
|||||||
t4 := NewTask(m4.Type, m4.Payload)
|
t4 := NewTask(m4.Type, m4.Payload)
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
enqueued []*base.TaskMessage // initial default queue state
|
pending []*base.TaskMessage // initial default queue state
|
||||||
incoming []*base.TaskMessage // tasks to be enqueued during run
|
incoming []*base.TaskMessage // tasks to be enqueued during run
|
||||||
wantProcessed []*Task // tasks to be processed at the end
|
wantProcessed []*Task // tasks to be processed at the end
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
enqueued: []*base.TaskMessage{m1},
|
pending: []*base.TaskMessage{m1},
|
||||||
incoming: []*base.TaskMessage{m2, m3, m4},
|
incoming: []*base.TaskMessage{m2, m3, m4},
|
||||||
wantProcessed: []*Task{t1, t2, t3, t4},
|
wantProcessed: []*Task{t1, t2, t3, t4},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
enqueued: []*base.TaskMessage{},
|
pending: []*base.TaskMessage{},
|
||||||
incoming: []*base.TaskMessage{m1},
|
incoming: []*base.TaskMessage{m1},
|
||||||
wantProcessed: []*Task{t1},
|
wantProcessed: []*Task{t1},
|
||||||
},
|
},
|
||||||
@@ -52,7 +115,7 @@ func TestProcessorSuccess(t *testing.T) {
|
|||||||
|
|
||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
h.FlushDB(t, r) // clean up db before each test case.
|
h.FlushDB(t, r) // clean up db before each test case.
|
||||||
h.SeedEnqueuedQueue(t, r, tc.enqueued) // initialize default queue.
|
h.SeedPendingQueue(t, r, tc.pending, base.DefaultQueueName) // initialize default queue.
|
||||||
|
|
||||||
// instantiate a new processor
|
// instantiate a new processor
|
||||||
var mu sync.Mutex
|
var mu sync.Mutex
|
||||||
@@ -63,42 +126,163 @@ func TestProcessorSuccess(t *testing.T) {
|
|||||||
processed = append(processed, task)
|
processed = append(processed, task)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
ss := base.NewServerState("localhost", 1234, 10, defaultQueueConfig, false)
|
p := newProcessorForTest(t, rdbClient, HandlerFunc(handler))
|
||||||
p := newProcessor(processorParams{
|
|
||||||
logger: testLogger,
|
|
||||||
broker: rdbClient,
|
|
||||||
ss: ss,
|
|
||||||
retryDelayFunc: defaultDelayFunc,
|
|
||||||
syncCh: nil,
|
|
||||||
cancelations: base.NewCancelations(),
|
|
||||||
errHandler: nil,
|
|
||||||
shutdownTimeout: defaultShutdownTimeout,
|
|
||||||
})
|
|
||||||
p.handler = HandlerFunc(handler)
|
|
||||||
|
|
||||||
p.start(&sync.WaitGroup{})
|
p.start(&sync.WaitGroup{})
|
||||||
for _, msg := range tc.incoming {
|
for _, msg := range tc.incoming {
|
||||||
err := rdbClient.Enqueue(msg)
|
err := rdbClient.Enqueue(context.Background(), msg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
p.terminate()
|
p.shutdown()
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
time.Sleep(time.Second) // wait for one second to allow all enqueued tasks to be processed.
|
time.Sleep(2 * time.Second) // wait for two second to allow all pending tasks to be processed.
|
||||||
p.terminate()
|
if l := r.LLen(context.Background(), base.ActiveKey(base.DefaultQueueName)).Val(); l != 0 {
|
||||||
|
t.Errorf("%q has %d tasks, want 0", base.ActiveKey(base.DefaultQueueName), l)
|
||||||
|
}
|
||||||
|
p.shutdown()
|
||||||
|
|
||||||
if diff := cmp.Diff(tc.wantProcessed, processed, sortTaskOpt, cmp.AllowUnexported(Payload{})); diff != "" {
|
mu.Lock()
|
||||||
|
if diff := cmp.Diff(tc.wantProcessed, processed, taskCmpOpts...); diff != "" {
|
||||||
t.Errorf("mismatch found in processed tasks; (-want, +got)\n%s", diff)
|
t.Errorf("mismatch found in processed tasks; (-want, +got)\n%s", diff)
|
||||||
}
|
}
|
||||||
|
mu.Unlock()
|
||||||
if l := r.LLen(base.InProgressQueue).Val(); l != 0 {
|
|
||||||
t.Errorf("%q has %d tasks, want 0", base.InProgressQueue, l)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestProcessorSuccessWithMultipleQueues(t *testing.T) {
|
||||||
|
var (
|
||||||
|
r = setup(t)
|
||||||
|
rdbClient = rdb.NewRDB(r)
|
||||||
|
|
||||||
|
m1 = h.NewTaskMessage("task1", nil)
|
||||||
|
m2 = h.NewTaskMessage("task2", nil)
|
||||||
|
m3 = h.NewTaskMessageWithQueue("task3", nil, "high")
|
||||||
|
m4 = h.NewTaskMessageWithQueue("task4", nil, "low")
|
||||||
|
|
||||||
|
t1 = NewTask(m1.Type, m1.Payload)
|
||||||
|
t2 = NewTask(m2.Type, m2.Payload)
|
||||||
|
t3 = NewTask(m3.Type, m3.Payload)
|
||||||
|
t4 = NewTask(m4.Type, m4.Payload)
|
||||||
|
)
|
||||||
|
defer r.Close()
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
pending map[string][]*base.TaskMessage
|
||||||
|
queues []string // list of queues to consume the tasks from
|
||||||
|
wantProcessed []*Task // tasks to be processed at the end
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
pending: map[string][]*base.TaskMessage{
|
||||||
|
"default": {m1, m2},
|
||||||
|
"high": {m3},
|
||||||
|
"low": {m4},
|
||||||
|
},
|
||||||
|
queues: []string{"default", "high", "low"},
|
||||||
|
wantProcessed: []*Task{t1, t2, t3, t4},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
// Set up test case.
|
||||||
|
h.FlushDB(t, r)
|
||||||
|
h.SeedAllPendingQueues(t, r, tc.pending)
|
||||||
|
|
||||||
|
// Instantiate a new processor.
|
||||||
|
var mu sync.Mutex
|
||||||
|
var processed []*Task
|
||||||
|
handler := func(ctx context.Context, task *Task) error {
|
||||||
|
mu.Lock()
|
||||||
|
defer mu.Unlock()
|
||||||
|
processed = append(processed, task)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
p := newProcessorForTest(t, rdbClient, HandlerFunc(handler))
|
||||||
|
p.queueConfig = map[string]int{
|
||||||
|
"default": 2,
|
||||||
|
"high": 3,
|
||||||
|
"low": 1,
|
||||||
|
}
|
||||||
|
|
||||||
|
p.start(&sync.WaitGroup{})
|
||||||
|
// Wait for two second to allow all pending tasks to be processed.
|
||||||
|
time.Sleep(2 * time.Second)
|
||||||
|
// Make sure no messages are stuck in active list.
|
||||||
|
for _, qname := range tc.queues {
|
||||||
|
if l := r.LLen(context.Background(), base.ActiveKey(qname)).Val(); l != 0 {
|
||||||
|
t.Errorf("%q has %d tasks, want 0", base.ActiveKey(qname), l)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
p.shutdown()
|
||||||
|
|
||||||
|
mu.Lock()
|
||||||
|
if diff := cmp.Diff(tc.wantProcessed, processed, taskCmpOpts...); diff != "" {
|
||||||
|
t.Errorf("mismatch found in processed tasks; (-want, +got)\n%s", diff)
|
||||||
|
}
|
||||||
|
mu.Unlock()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// https://github.com/hibiken/asynq/issues/166
|
||||||
|
func TestProcessTasksWithLargeNumberInPayload(t *testing.T) {
|
||||||
|
r := setup(t)
|
||||||
|
defer r.Close()
|
||||||
|
rdbClient := rdb.NewRDB(r)
|
||||||
|
|
||||||
|
m1 := h.NewTaskMessage("large_number", h.JSON(map[string]interface{}{"data": 111111111111111111}))
|
||||||
|
t1 := NewTask(m1.Type, m1.Payload)
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
pending []*base.TaskMessage // initial default queue state
|
||||||
|
wantProcessed []*Task // tasks to be processed at the end
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
pending: []*base.TaskMessage{m1},
|
||||||
|
wantProcessed: []*Task{t1},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
h.FlushDB(t, r) // clean up db before each test case.
|
||||||
|
h.SeedPendingQueue(t, r, tc.pending, base.DefaultQueueName) // initialize default queue.
|
||||||
|
|
||||||
|
var mu sync.Mutex
|
||||||
|
var processed []*Task
|
||||||
|
handler := func(ctx context.Context, task *Task) error {
|
||||||
|
mu.Lock()
|
||||||
|
defer mu.Unlock()
|
||||||
|
var payload map[string]int
|
||||||
|
if err := json.Unmarshal(task.Payload(), &payload); err != nil {
|
||||||
|
t.Errorf("coult not decode payload: %v", err)
|
||||||
|
}
|
||||||
|
if data, ok := payload["data"]; ok {
|
||||||
|
t.Logf("data == %d", data)
|
||||||
|
} else {
|
||||||
|
t.Errorf("could not get data from payload")
|
||||||
|
}
|
||||||
|
processed = append(processed, task)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
p := newProcessorForTest(t, rdbClient, HandlerFunc(handler))
|
||||||
|
|
||||||
|
p.start(&sync.WaitGroup{})
|
||||||
|
time.Sleep(2 * time.Second) // wait for two second to allow all pending tasks to be processed.
|
||||||
|
if l := r.LLen(context.Background(), base.ActiveKey(base.DefaultQueueName)).Val(); l != 0 {
|
||||||
|
t.Errorf("%q has %d tasks, want 0", base.ActiveKey(base.DefaultQueueName), l)
|
||||||
|
}
|
||||||
|
p.shutdown()
|
||||||
|
|
||||||
|
mu.Lock()
|
||||||
|
if diff := cmp.Diff(tc.wantProcessed, processed, taskCmpOpts...); diff != "" {
|
||||||
|
t.Errorf("mismatch found in processed tasks; (-want, +got)\n%s", diff)
|
||||||
|
}
|
||||||
|
mu.Unlock()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestProcessorRetry(t *testing.T) {
|
func TestProcessorRetry(t *testing.T) {
|
||||||
r := setup(t)
|
r := setup(t)
|
||||||
|
defer r.Close()
|
||||||
rdbClient := rdb.NewRDB(r)
|
rdbClient := rdb.NewRDB(r)
|
||||||
|
|
||||||
m1 := h.NewTaskMessage("send_email", nil)
|
m1 := h.NewTaskMessage("send_email", nil)
|
||||||
@@ -108,52 +292,63 @@ func TestProcessorRetry(t *testing.T) {
|
|||||||
m4 := h.NewTaskMessage("sync", nil)
|
m4 := h.NewTaskMessage("sync", nil)
|
||||||
|
|
||||||
errMsg := "something went wrong"
|
errMsg := "something went wrong"
|
||||||
// r* is m* after retry
|
wrappedSkipRetry := fmt.Errorf("%s:%w", errMsg, SkipRetry)
|
||||||
r1 := *m1
|
|
||||||
r1.ErrorMsg = errMsg
|
|
||||||
r2 := *m2
|
|
||||||
r2.ErrorMsg = errMsg
|
|
||||||
r2.Retried = m2.Retried + 1
|
|
||||||
r3 := *m3
|
|
||||||
r3.ErrorMsg = errMsg
|
|
||||||
r3.Retried = m3.Retried + 1
|
|
||||||
r4 := *m4
|
|
||||||
r4.ErrorMsg = errMsg
|
|
||||||
r4.Retried = m4.Retried + 1
|
|
||||||
|
|
||||||
now := time.Now()
|
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
enqueued []*base.TaskMessage // initial default queue state
|
desc string // test description
|
||||||
incoming []*base.TaskMessage // tasks to be enqueued during run
|
pending []*base.TaskMessage // initial default queue state
|
||||||
delay time.Duration // retry delay duration
|
delay time.Duration // retry delay duration
|
||||||
handler Handler // task handler
|
handler Handler // task handler
|
||||||
wait time.Duration // wait duration between starting and stopping processor for this test case
|
wait time.Duration // wait duration between starting and stopping processor for this test case
|
||||||
wantRetry []h.ZSetEntry // tasks in retry queue at the end
|
wantErrMsg string // error message the task should record
|
||||||
wantDead []*base.TaskMessage // tasks in dead queue at the end
|
wantRetry []*base.TaskMessage // tasks in retry queue at the end
|
||||||
|
wantArchived []*base.TaskMessage // tasks in archived queue at the end
|
||||||
wantErrCount int // number of times error handler should be called
|
wantErrCount int // number of times error handler should be called
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
enqueued: []*base.TaskMessage{m1, m2},
|
desc: "Should automatically retry errored tasks",
|
||||||
incoming: []*base.TaskMessage{m3, m4},
|
pending: []*base.TaskMessage{m1, m2, m3, m4},
|
||||||
delay: time.Minute,
|
delay: time.Minute,
|
||||||
handler: HandlerFunc(func(ctx context.Context, task *Task) error {
|
handler: HandlerFunc(func(ctx context.Context, task *Task) error {
|
||||||
return fmt.Errorf(errMsg)
|
return fmt.Errorf(errMsg)
|
||||||
}),
|
}),
|
||||||
wait: time.Second,
|
wait: 2 * time.Second,
|
||||||
wantRetry: []h.ZSetEntry{
|
wantErrMsg: errMsg,
|
||||||
{Msg: &r2, Score: float64(now.Add(time.Minute).Unix())},
|
wantRetry: []*base.TaskMessage{m2, m3, m4},
|
||||||
{Msg: &r3, Score: float64(now.Add(time.Minute).Unix())},
|
wantArchived: []*base.TaskMessage{m1},
|
||||||
{Msg: &r4, Score: float64(now.Add(time.Minute).Unix())},
|
|
||||||
},
|
|
||||||
wantDead: []*base.TaskMessage{&r1},
|
|
||||||
wantErrCount: 4,
|
wantErrCount: 4,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
desc: "Should skip retry errored tasks",
|
||||||
|
pending: []*base.TaskMessage{m1, m2},
|
||||||
|
delay: time.Minute,
|
||||||
|
handler: HandlerFunc(func(ctx context.Context, task *Task) error {
|
||||||
|
return SkipRetry // return SkipRetry without wrapping
|
||||||
|
}),
|
||||||
|
wait: 2 * time.Second,
|
||||||
|
wantErrMsg: SkipRetry.Error(),
|
||||||
|
wantRetry: []*base.TaskMessage{},
|
||||||
|
wantArchived: []*base.TaskMessage{m1, m2},
|
||||||
|
wantErrCount: 2, // ErrorHandler should still be called with SkipRetry error
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "Should skip retry errored tasks (with error wrapping)",
|
||||||
|
pending: []*base.TaskMessage{m1, m2},
|
||||||
|
delay: time.Minute,
|
||||||
|
handler: HandlerFunc(func(ctx context.Context, task *Task) error {
|
||||||
|
return wrappedSkipRetry
|
||||||
|
}),
|
||||||
|
wait: 2 * time.Second,
|
||||||
|
wantErrMsg: wrappedSkipRetry.Error(),
|
||||||
|
wantRetry: []*base.TaskMessage{},
|
||||||
|
wantArchived: []*base.TaskMessage{m1, m2},
|
||||||
|
wantErrCount: 2, // ErrorHandler should still be called with SkipRetry error
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
h.FlushDB(t, r) // clean up db before each test case.
|
h.FlushDB(t, r) // clean up db before each test case.
|
||||||
h.SeedEnqueuedQueue(t, r, tc.enqueued) // initialize default queue.
|
h.SeedPendingQueue(t, r, tc.pending, base.DefaultQueueName) // initialize default queue.
|
||||||
|
|
||||||
// instantiate a new processor
|
// instantiate a new processor
|
||||||
delayFunc := func(n int, e error, t *Task) time.Duration {
|
delayFunc := func(n int, e error, t *Task) time.Duration {
|
||||||
@@ -163,48 +358,49 @@ func TestProcessorRetry(t *testing.T) {
|
|||||||
mu sync.Mutex // guards n
|
mu sync.Mutex // guards n
|
||||||
n int // number of times error handler is called
|
n int // number of times error handler is called
|
||||||
)
|
)
|
||||||
errHandler := func(t *Task, err error, retried, maxRetry int) {
|
errHandler := func(ctx context.Context, t *Task, err error) {
|
||||||
mu.Lock()
|
mu.Lock()
|
||||||
defer mu.Unlock()
|
defer mu.Unlock()
|
||||||
n++
|
n++
|
||||||
}
|
}
|
||||||
ss := base.NewServerState("localhost", 1234, 10, defaultQueueConfig, false)
|
p := newProcessorForTest(t, rdbClient, tc.handler)
|
||||||
p := newProcessor(processorParams{
|
p.errHandler = ErrorHandlerFunc(errHandler)
|
||||||
logger: testLogger,
|
p.retryDelayFunc = delayFunc
|
||||||
broker: rdbClient,
|
|
||||||
ss: ss,
|
|
||||||
retryDelayFunc: delayFunc,
|
|
||||||
syncCh: nil,
|
|
||||||
cancelations: base.NewCancelations(),
|
|
||||||
errHandler: ErrorHandlerFunc(errHandler),
|
|
||||||
shutdownTimeout: defaultShutdownTimeout,
|
|
||||||
})
|
|
||||||
p.handler = tc.handler
|
|
||||||
|
|
||||||
p.start(&sync.WaitGroup{})
|
p.start(&sync.WaitGroup{})
|
||||||
for _, msg := range tc.incoming {
|
runTime := time.Now() // time when processor is running
|
||||||
err := rdbClient.Enqueue(msg)
|
time.Sleep(tc.wait) // FIXME: This makes test flaky.
|
||||||
if err != nil {
|
p.shutdown()
|
||||||
p.terminate()
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
time.Sleep(tc.wait)
|
|
||||||
p.terminate()
|
|
||||||
|
|
||||||
cmpOpt := cmpopts.EquateApprox(0, float64(time.Second)) // allow up to a second difference in zset score
|
cmpOpt := h.EquateInt64Approx(int64(tc.wait.Seconds())) // allow up to a wait-second difference in zset score
|
||||||
gotRetry := h.GetRetryEntries(t, r)
|
gotRetry := h.GetRetryEntries(t, r, base.DefaultQueueName)
|
||||||
if diff := cmp.Diff(tc.wantRetry, gotRetry, h.SortZSetEntryOpt, cmpOpt); diff != "" {
|
var wantRetry []base.Z // Note: construct wantRetry here since `LastFailedAt` and ZSCORE is relative to each test run.
|
||||||
t.Errorf("mismatch found in %q after running processor; (-want, +got)\n%s", base.RetryQueue, diff)
|
for _, msg := range tc.wantRetry {
|
||||||
|
wantRetry = append(wantRetry,
|
||||||
|
base.Z{
|
||||||
|
Message: h.TaskMessageAfterRetry(*msg, tc.wantErrMsg, runTime),
|
||||||
|
Score: runTime.Add(tc.delay).Unix(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if diff := cmp.Diff(wantRetry, gotRetry, h.SortZSetEntryOpt, cmpOpt); diff != "" {
|
||||||
|
t.Errorf("%s: mismatch found in %q after running processor; (-want, +got)\n%s", tc.desc, base.RetryKey(base.DefaultQueueName), diff)
|
||||||
}
|
}
|
||||||
|
|
||||||
gotDead := h.GetDeadMessages(t, r)
|
gotArchived := h.GetArchivedEntries(t, r, base.DefaultQueueName)
|
||||||
if diff := cmp.Diff(tc.wantDead, gotDead, h.SortMsgOpt); diff != "" {
|
var wantArchived []base.Z // Note: construct wantArchived here since `LastFailedAt` and ZSCORE is relative to each test run.
|
||||||
t.Errorf("mismatch found in %q after running processor; (-want, +got)\n%s", base.DeadQueue, diff)
|
for _, msg := range tc.wantArchived {
|
||||||
|
wantArchived = append(wantArchived,
|
||||||
|
base.Z{
|
||||||
|
Message: h.TaskMessageWithError(*msg, tc.wantErrMsg, runTime),
|
||||||
|
Score: runTime.Unix(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if diff := cmp.Diff(wantArchived, gotArchived, h.SortZSetEntryOpt, cmpOpt); diff != "" {
|
||||||
|
t.Errorf("%s: mismatch found in %q after running processor; (-want, +got)\n%s", tc.desc, base.ArchivedKey(base.DefaultQueueName), diff)
|
||||||
}
|
}
|
||||||
|
|
||||||
if l := r.LLen(base.InProgressQueue).Val(); l != 0 {
|
if l := r.LLen(context.Background(), base.ActiveKey(base.DefaultQueueName)).Val(); l != 0 {
|
||||||
t.Errorf("%q has %d tasks, want 0", base.InProgressQueue, l)
|
t.Errorf("%s: %q has %d tasks, want 0", base.ActiveKey(base.DefaultQueueName), tc.desc, l)
|
||||||
}
|
}
|
||||||
|
|
||||||
if n != tc.wantErrCount {
|
if n != tc.wantErrCount {
|
||||||
@@ -213,6 +409,179 @@ func TestProcessorRetry(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestProcessorMarkAsComplete(t *testing.T) {
|
||||||
|
r := setup(t)
|
||||||
|
defer r.Close()
|
||||||
|
rdbClient := rdb.NewRDB(r)
|
||||||
|
|
||||||
|
msg1 := h.NewTaskMessage("one", nil)
|
||||||
|
msg2 := h.NewTaskMessage("two", nil)
|
||||||
|
msg3 := h.NewTaskMessageWithQueue("three", nil, "custom")
|
||||||
|
msg1.Retention = 3600
|
||||||
|
msg3.Retention = 7200
|
||||||
|
|
||||||
|
handler := func(ctx context.Context, task *Task) error { return nil }
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
pending map[string][]*base.TaskMessage
|
||||||
|
completed map[string][]base.Z
|
||||||
|
queueCfg map[string]int
|
||||||
|
wantPending map[string][]*base.TaskMessage
|
||||||
|
wantCompleted func(completedAt time.Time) map[string][]base.Z
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
pending: map[string][]*base.TaskMessage{
|
||||||
|
"default": {msg1, msg2},
|
||||||
|
"custom": {msg3},
|
||||||
|
},
|
||||||
|
completed: map[string][]base.Z{
|
||||||
|
"default": {},
|
||||||
|
"custom": {},
|
||||||
|
},
|
||||||
|
queueCfg: map[string]int{
|
||||||
|
"default": 1,
|
||||||
|
"custom": 1,
|
||||||
|
},
|
||||||
|
wantPending: map[string][]*base.TaskMessage{
|
||||||
|
"default": {},
|
||||||
|
"custom": {},
|
||||||
|
},
|
||||||
|
wantCompleted: func(completedAt time.Time) map[string][]base.Z {
|
||||||
|
return map[string][]base.Z{
|
||||||
|
"default": {{Message: h.TaskMessageWithCompletedAt(*msg1, completedAt), Score: completedAt.Unix() + msg1.Retention}},
|
||||||
|
"custom": {{Message: h.TaskMessageWithCompletedAt(*msg3, completedAt), Score: completedAt.Unix() + msg3.Retention}},
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
h.FlushDB(t, r)
|
||||||
|
h.SeedAllPendingQueues(t, r, tc.pending)
|
||||||
|
h.SeedAllCompletedQueues(t, r, tc.completed)
|
||||||
|
|
||||||
|
p := newProcessorForTest(t, rdbClient, HandlerFunc(handler))
|
||||||
|
p.queueConfig = tc.queueCfg
|
||||||
|
|
||||||
|
p.start(&sync.WaitGroup{})
|
||||||
|
runTime := time.Now() // time when processor is running
|
||||||
|
time.Sleep(2 * time.Second)
|
||||||
|
p.shutdown()
|
||||||
|
|
||||||
|
for qname, want := range tc.wantPending {
|
||||||
|
gotPending := h.GetPendingMessages(t, r, qname)
|
||||||
|
if diff := cmp.Diff(want, gotPending, cmpopts.EquateEmpty()); diff != "" {
|
||||||
|
t.Errorf("diff found in %q pending set; want=%v, got=%v\n%s", qname, want, gotPending, diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for qname, want := range tc.wantCompleted(runTime) {
|
||||||
|
gotCompleted := h.GetCompletedEntries(t, r, qname)
|
||||||
|
if diff := cmp.Diff(want, gotCompleted, cmpopts.EquateEmpty()); diff != "" {
|
||||||
|
t.Errorf("diff found in %q completed set; want=%v, got=%v\n%s", qname, want, gotCompleted, diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test a scenario where the worker server cannot communicate with redis due to a network failure
|
||||||
|
// and the lease expires
|
||||||
|
func TestProcessorWithExpiredLease(t *testing.T) {
|
||||||
|
r := setup(t)
|
||||||
|
defer r.Close()
|
||||||
|
rdbClient := rdb.NewRDB(r)
|
||||||
|
|
||||||
|
m1 := h.NewTaskMessage("task1", nil)
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
pending []*base.TaskMessage
|
||||||
|
handler Handler
|
||||||
|
wantErrCount int
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
pending: []*base.TaskMessage{m1},
|
||||||
|
handler: HandlerFunc(func(ctx context.Context, task *Task) error {
|
||||||
|
// make sure the task processing time exceeds lease duration
|
||||||
|
// to test expired lease.
|
||||||
|
time.Sleep(rdb.LeaseDuration + 10*time.Second)
|
||||||
|
return nil
|
||||||
|
}),
|
||||||
|
wantErrCount: 1, // ErrorHandler should still be called with ErrLeaseExpired
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
h.FlushDB(t, r)
|
||||||
|
h.SeedPendingQueue(t, r, tc.pending, base.DefaultQueueName)
|
||||||
|
|
||||||
|
starting := make(chan *workerInfo)
|
||||||
|
finished := make(chan *base.TaskMessage)
|
||||||
|
syncCh := make(chan *syncRequest)
|
||||||
|
done := make(chan struct{})
|
||||||
|
t.Cleanup(func() { close(done) })
|
||||||
|
// fake heartbeater which notifies lease expiration
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case w := <-starting:
|
||||||
|
// simulate expiration by resetting to some time in the past
|
||||||
|
w.lease.Reset(time.Now().Add(-5 * time.Second))
|
||||||
|
if !w.lease.NotifyExpiration() {
|
||||||
|
panic("Failed to notifiy lease expiration")
|
||||||
|
}
|
||||||
|
case <-finished:
|
||||||
|
// do nothing
|
||||||
|
case <-done:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
go fakeSyncer(syncCh, done)
|
||||||
|
p := newProcessor(processorParams{
|
||||||
|
logger: testLogger,
|
||||||
|
broker: rdbClient,
|
||||||
|
baseCtxFn: context.Background,
|
||||||
|
retryDelayFunc: DefaultRetryDelayFunc,
|
||||||
|
isFailureFunc: defaultIsFailureFunc,
|
||||||
|
syncCh: syncCh,
|
||||||
|
cancelations: base.NewCancelations(),
|
||||||
|
concurrency: 10,
|
||||||
|
queues: defaultQueueConfig,
|
||||||
|
strictPriority: false,
|
||||||
|
errHandler: nil,
|
||||||
|
shutdownTimeout: defaultShutdownTimeout,
|
||||||
|
starting: starting,
|
||||||
|
finished: finished,
|
||||||
|
})
|
||||||
|
p.handler = tc.handler
|
||||||
|
var (
|
||||||
|
mu sync.Mutex // guards n and errs
|
||||||
|
n int // number of times error handler is called
|
||||||
|
errs []error // error passed to error handler
|
||||||
|
)
|
||||||
|
p.errHandler = ErrorHandlerFunc(func(ctx context.Context, t *Task, err error) {
|
||||||
|
mu.Lock()
|
||||||
|
defer mu.Unlock()
|
||||||
|
n++
|
||||||
|
errs = append(errs, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
p.start(&sync.WaitGroup{})
|
||||||
|
time.Sleep(4 * time.Second)
|
||||||
|
p.shutdown()
|
||||||
|
|
||||||
|
if n != tc.wantErrCount {
|
||||||
|
t.Errorf("Unexpected number of error count: got %d, want %d", n, tc.wantErrCount)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for i := 0; i < tc.wantErrCount; i++ {
|
||||||
|
if !errors.Is(errs[i], ErrLeaseExpired) {
|
||||||
|
t.Errorf("Unexpected error was passed to ErrorHandler: got %v want %v", errs[i], ErrLeaseExpired)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestProcessorQueues(t *testing.T) {
|
func TestProcessorQueues(t *testing.T) {
|
||||||
sortOpt := cmp.Transformer("SortStrings", func(in []string) []string {
|
sortOpt := cmp.Transformer("SortStrings", func(in []string) []string {
|
||||||
out := append([]string(nil), in...) // Copy input to avoid mutating it
|
out := append([]string(nil), in...) // Copy input to avoid mutating it
|
||||||
@@ -241,17 +610,10 @@ func TestProcessorQueues(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
ss := base.NewServerState("localhost", 1234, 10, tc.queueCfg, false)
|
// Note: rdb and handler not needed for this test.
|
||||||
p := newProcessor(processorParams{
|
p := newProcessorForTest(t, nil, nil)
|
||||||
logger: testLogger,
|
p.queueConfig = tc.queueCfg
|
||||||
broker: nil,
|
|
||||||
ss: ss,
|
|
||||||
retryDelayFunc: defaultDelayFunc,
|
|
||||||
syncCh: nil,
|
|
||||||
cancelations: base.NewCancelations(),
|
|
||||||
errHandler: nil,
|
|
||||||
shutdownTimeout: defaultShutdownTimeout,
|
|
||||||
})
|
|
||||||
got := p.queues()
|
got := p.queues()
|
||||||
if diff := cmp.Diff(tc.want, got, sortOpt); diff != "" {
|
if diff := cmp.Diff(tc.want, got, sortOpt); diff != "" {
|
||||||
t.Errorf("with queue config: %v\n(*processor).queues() = %v, want %v\n(-want,+got):\n%s",
|
t.Errorf("with queue config: %v\n(*processor).queues() = %v, want %v\n(-want,+got):\n%s",
|
||||||
@@ -261,36 +623,42 @@ func TestProcessorQueues(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestProcessorWithStrictPriority(t *testing.T) {
|
func TestProcessorWithStrictPriority(t *testing.T) {
|
||||||
r := setup(t)
|
var (
|
||||||
rdbClient := rdb.NewRDB(r)
|
r = setup(t)
|
||||||
|
|
||||||
m1 := h.NewTaskMessage("send_email", nil)
|
rdbClient = rdb.NewRDB(r)
|
||||||
m2 := h.NewTaskMessage("send_email", nil)
|
|
||||||
m3 := h.NewTaskMessage("send_email", nil)
|
|
||||||
m4 := h.NewTaskMessage("gen_thumbnail", nil)
|
|
||||||
m5 := h.NewTaskMessage("gen_thumbnail", nil)
|
|
||||||
m6 := h.NewTaskMessage("sync", nil)
|
|
||||||
m7 := h.NewTaskMessage("sync", nil)
|
|
||||||
|
|
||||||
t1 := NewTask(m1.Type, m1.Payload)
|
m1 = h.NewTaskMessageWithQueue("task1", nil, "critical")
|
||||||
t2 := NewTask(m2.Type, m2.Payload)
|
m2 = h.NewTaskMessageWithQueue("task2", nil, "critical")
|
||||||
t3 := NewTask(m3.Type, m3.Payload)
|
m3 = h.NewTaskMessageWithQueue("task3", nil, "critical")
|
||||||
t4 := NewTask(m4.Type, m4.Payload)
|
m4 = h.NewTaskMessageWithQueue("task4", nil, base.DefaultQueueName)
|
||||||
t5 := NewTask(m5.Type, m5.Payload)
|
m5 = h.NewTaskMessageWithQueue("task5", nil, base.DefaultQueueName)
|
||||||
t6 := NewTask(m6.Type, m6.Payload)
|
m6 = h.NewTaskMessageWithQueue("task6", nil, "low")
|
||||||
t7 := NewTask(m7.Type, m7.Payload)
|
m7 = h.NewTaskMessageWithQueue("task7", nil, "low")
|
||||||
|
|
||||||
|
t1 = NewTask(m1.Type, m1.Payload)
|
||||||
|
t2 = NewTask(m2.Type, m2.Payload)
|
||||||
|
t3 = NewTask(m3.Type, m3.Payload)
|
||||||
|
t4 = NewTask(m4.Type, m4.Payload)
|
||||||
|
t5 = NewTask(m5.Type, m5.Payload)
|
||||||
|
t6 = NewTask(m6.Type, m6.Payload)
|
||||||
|
t7 = NewTask(m7.Type, m7.Payload)
|
||||||
|
)
|
||||||
|
defer r.Close()
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
enqueued map[string][]*base.TaskMessage // initial queues state
|
pending map[string][]*base.TaskMessage // initial queues state
|
||||||
|
queues []string // list of queues to consume tasks from
|
||||||
wait time.Duration // wait duration between starting and stopping processor for this test case
|
wait time.Duration // wait duration between starting and stopping processor for this test case
|
||||||
wantProcessed []*Task // tasks to be processed at the end
|
wantProcessed []*Task // tasks to be processed at the end
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
enqueued: map[string][]*base.TaskMessage{
|
pending: map[string][]*base.TaskMessage{
|
||||||
base.DefaultQueueName: {m4, m5},
|
base.DefaultQueueName: {m4, m5},
|
||||||
"critical": {m1, m2, m3},
|
"critical": {m1, m2, m3},
|
||||||
"low": {m6, m7},
|
"low": {m6, m7},
|
||||||
},
|
},
|
||||||
|
queues: []string{base.DefaultQueueName, "critical", "low"},
|
||||||
wait: time.Second,
|
wait: time.Second,
|
||||||
wantProcessed: []*Task{t1, t2, t3, t4, t5, t6, t7},
|
wantProcessed: []*Task{t1, t2, t3, t4, t5, t6, t7},
|
||||||
},
|
},
|
||||||
@@ -298,8 +666,8 @@ func TestProcessorWithStrictPriority(t *testing.T) {
|
|||||||
|
|
||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
h.FlushDB(t, r) // clean up db before each test case.
|
h.FlushDB(t, r) // clean up db before each test case.
|
||||||
for qname, msgs := range tc.enqueued {
|
for qname, msgs := range tc.pending {
|
||||||
h.SeedEnqueuedQueue(t, r, msgs, qname)
|
h.SeedPendingQueue(t, r, msgs, qname)
|
||||||
}
|
}
|
||||||
|
|
||||||
// instantiate a new processor
|
// instantiate a new processor
|
||||||
@@ -312,39 +680,53 @@ func TestProcessorWithStrictPriority(t *testing.T) {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
queueCfg := map[string]int{
|
queueCfg := map[string]int{
|
||||||
"critical": 3,
|
|
||||||
base.DefaultQueueName: 2,
|
base.DefaultQueueName: 2,
|
||||||
|
"critical": 3,
|
||||||
"low": 1,
|
"low": 1,
|
||||||
}
|
}
|
||||||
// Note: Set concurrency to 1 to make sure tasks are processed one at a time.
|
starting := make(chan *workerInfo)
|
||||||
ss := base.NewServerState("localhost", 1234, 1 /* concurrency */, queueCfg, true /*strict*/)
|
finished := make(chan *base.TaskMessage)
|
||||||
|
syncCh := make(chan *syncRequest)
|
||||||
|
done := make(chan struct{})
|
||||||
|
defer func() { close(done) }()
|
||||||
|
go fakeHeartbeater(starting, finished, done)
|
||||||
|
go fakeSyncer(syncCh, done)
|
||||||
p := newProcessor(processorParams{
|
p := newProcessor(processorParams{
|
||||||
logger: testLogger,
|
logger: testLogger,
|
||||||
broker: rdbClient,
|
broker: rdbClient,
|
||||||
ss: ss,
|
baseCtxFn: context.Background,
|
||||||
retryDelayFunc: defaultDelayFunc,
|
retryDelayFunc: DefaultRetryDelayFunc,
|
||||||
syncCh: nil,
|
isFailureFunc: defaultIsFailureFunc,
|
||||||
|
syncCh: syncCh,
|
||||||
cancelations: base.NewCancelations(),
|
cancelations: base.NewCancelations(),
|
||||||
|
concurrency: 1, // Set concurrency to 1 to make sure tasks are processed one at a time.
|
||||||
|
queues: queueCfg,
|
||||||
|
strictPriority: true,
|
||||||
errHandler: nil,
|
errHandler: nil,
|
||||||
shutdownTimeout: defaultShutdownTimeout,
|
shutdownTimeout: defaultShutdownTimeout,
|
||||||
|
starting: starting,
|
||||||
|
finished: finished,
|
||||||
})
|
})
|
||||||
p.handler = HandlerFunc(handler)
|
p.handler = HandlerFunc(handler)
|
||||||
|
|
||||||
p.start(&sync.WaitGroup{})
|
p.start(&sync.WaitGroup{})
|
||||||
time.Sleep(tc.wait)
|
time.Sleep(tc.wait)
|
||||||
p.terminate()
|
// Make sure no tasks are stuck in active list.
|
||||||
|
for _, qname := range tc.queues {
|
||||||
|
if l := r.LLen(context.Background(), base.ActiveKey(qname)).Val(); l != 0 {
|
||||||
|
t.Errorf("%q has %d tasks, want 0", base.ActiveKey(qname), l)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
p.shutdown()
|
||||||
|
|
||||||
if diff := cmp.Diff(tc.wantProcessed, processed, cmp.AllowUnexported(Payload{})); diff != "" {
|
if diff := cmp.Diff(tc.wantProcessed, processed, taskCmpOpts...); diff != "" {
|
||||||
t.Errorf("mismatch found in processed tasks; (-want, +got)\n%s", diff)
|
t.Errorf("mismatch found in processed tasks; (-want, +got)\n%s", diff)
|
||||||
}
|
}
|
||||||
|
|
||||||
if l := r.LLen(base.InProgressQueue).Val(); l != 0 {
|
|
||||||
t.Errorf("%q has %d tasks, want 0", base.InProgressQueue, l)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPerform(t *testing.T) {
|
func TestProcessorPerform(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
desc string
|
desc string
|
||||||
handler HandlerFunc
|
handler HandlerFunc
|
||||||
@@ -356,7 +738,7 @@ func TestPerform(t *testing.T) {
|
|||||||
handler: func(ctx context.Context, t *Task) error {
|
handler: func(ctx context.Context, t *Task) error {
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
task: NewTask("gen_thumbnail", map[string]interface{}{"src": "some/img/path"}),
|
task: NewTask("gen_thumbnail", h.JSON(map[string]interface{}{"src": "some/img/path"})),
|
||||||
wantErr: false,
|
wantErr: false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -364,7 +746,7 @@ func TestPerform(t *testing.T) {
|
|||||||
handler: func(ctx context.Context, t *Task) error {
|
handler: func(ctx context.Context, t *Task) error {
|
||||||
return fmt.Errorf("something went wrong")
|
return fmt.Errorf("something went wrong")
|
||||||
},
|
},
|
||||||
task: NewTask("gen_thumbnail", map[string]interface{}{"src": "some/img/path"}),
|
task: NewTask("gen_thumbnail", h.JSON(map[string]interface{}{"src": "some/img/path"})),
|
||||||
wantErr: true,
|
wantErr: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -372,13 +754,17 @@ func TestPerform(t *testing.T) {
|
|||||||
handler: func(ctx context.Context, t *Task) error {
|
handler: func(ctx context.Context, t *Task) error {
|
||||||
panic("something went terribly wrong")
|
panic("something went terribly wrong")
|
||||||
},
|
},
|
||||||
task: NewTask("gen_thumbnail", map[string]interface{}{"src": "some/img/path"}),
|
task: NewTask("gen_thumbnail", h.JSON(map[string]interface{}{"src": "some/img/path"})),
|
||||||
wantErr: true,
|
wantErr: true,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
// Note: We don't need to fully initialized the processor since we are only testing
|
||||||
|
// perform method.
|
||||||
|
p := newProcessorForTest(t, nil, nil)
|
||||||
|
|
||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
got := perform(context.Background(), tc.task, tc.handler)
|
p.handler = tc.handler
|
||||||
|
got := p.perform(context.Background(), tc.task)
|
||||||
if !tc.wantErr && got != nil {
|
if !tc.wantErr && got != nil {
|
||||||
t.Errorf("%s: perform() = %v, want nil", tc.desc, got)
|
t.Errorf("%s: perform() = %v, want nil", tc.desc, got)
|
||||||
continue
|
continue
|
||||||
@@ -412,7 +798,7 @@ func TestGCD(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNormalizeQueueCfg(t *testing.T) {
|
func TestNormalizeQueues(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
input map[string]int
|
input map[string]int
|
||||||
want map[string]int
|
want map[string]int
|
||||||
@@ -462,10 +848,76 @@ func TestNormalizeQueueCfg(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
got := normalizeQueueCfg(tc.input)
|
got := normalizeQueues(tc.input)
|
||||||
if diff := cmp.Diff(tc.want, got); diff != "" {
|
if diff := cmp.Diff(tc.want, got); diff != "" {
|
||||||
t.Errorf("normalizeQueueCfg(%v) = %v, want %v; (-want, +got):\n%s",
|
t.Errorf("normalizeQueues(%v) = %v, want %v; (-want, +got):\n%s",
|
||||||
tc.input, got, tc.want, diff)
|
tc.input, got, tc.want, diff)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestProcessorComputeDeadline(t *testing.T) {
|
||||||
|
now := time.Now()
|
||||||
|
p := processor{
|
||||||
|
logger: log.NewLogger(nil),
|
||||||
|
clock: timeutil.NewSimulatedClock(now),
|
||||||
|
}
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
desc string
|
||||||
|
msg *base.TaskMessage
|
||||||
|
want time.Time
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
desc: "message with only timeout specified",
|
||||||
|
msg: &base.TaskMessage{
|
||||||
|
Timeout: int64((30 * time.Minute).Seconds()),
|
||||||
|
},
|
||||||
|
want: now.Add(30 * time.Minute),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "message with only deadline specified",
|
||||||
|
msg: &base.TaskMessage{
|
||||||
|
Deadline: now.Add(24 * time.Hour).Unix(),
|
||||||
|
},
|
||||||
|
want: now.Add(24 * time.Hour),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "message with both timeout and deadline set (now+timeout < deadline)",
|
||||||
|
msg: &base.TaskMessage{
|
||||||
|
Deadline: now.Add(24 * time.Hour).Unix(),
|
||||||
|
Timeout: int64((30 * time.Minute).Seconds()),
|
||||||
|
},
|
||||||
|
want: now.Add(30 * time.Minute),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "message with both timeout and deadline set (now+timeout > deadline)",
|
||||||
|
msg: &base.TaskMessage{
|
||||||
|
Deadline: now.Add(10 * time.Minute).Unix(),
|
||||||
|
Timeout: int64((30 * time.Minute).Seconds()),
|
||||||
|
},
|
||||||
|
want: now.Add(10 * time.Minute),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "message with both timeout and deadline set (now+timeout == deadline)",
|
||||||
|
msg: &base.TaskMessage{
|
||||||
|
Deadline: now.Add(30 * time.Minute).Unix(),
|
||||||
|
Timeout: int64((30 * time.Minute).Seconds()),
|
||||||
|
},
|
||||||
|
want: now.Add(30 * time.Minute),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "message without timeout and deadline",
|
||||||
|
msg: &base.TaskMessage{},
|
||||||
|
want: now.Add(defaultTimeout),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
got := p.computeDeadline(tc.msg)
|
||||||
|
// Compare the Unix epoch with seconds granularity
|
||||||
|
if got.Unix() != tc.want.Unix() {
|
||||||
|
t.Errorf("%s: got=%v, want=%v", tc.desc, got.Unix(), tc.want.Unix())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
113
recoverer.go
Normal file
113
recoverer.go
Normal file
@@ -0,0 +1,113 @@
|
|||||||
|
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license
|
||||||
|
// that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package asynq
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/hibiken/asynq/internal/base"
|
||||||
|
"github.com/hibiken/asynq/internal/errors"
|
||||||
|
"github.com/hibiken/asynq/internal/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
type recoverer struct {
|
||||||
|
logger *log.Logger
|
||||||
|
broker base.Broker
|
||||||
|
retryDelayFunc RetryDelayFunc
|
||||||
|
isFailureFunc func(error) bool
|
||||||
|
|
||||||
|
// channel to communicate back to the long running "recoverer" goroutine.
|
||||||
|
done chan struct{}
|
||||||
|
|
||||||
|
// list of queues to check for deadline.
|
||||||
|
queues []string
|
||||||
|
|
||||||
|
// poll interval.
|
||||||
|
interval time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
type recovererParams struct {
|
||||||
|
logger *log.Logger
|
||||||
|
broker base.Broker
|
||||||
|
queues []string
|
||||||
|
interval time.Duration
|
||||||
|
retryDelayFunc RetryDelayFunc
|
||||||
|
isFailureFunc func(error) bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func newRecoverer(params recovererParams) *recoverer {
|
||||||
|
return &recoverer{
|
||||||
|
logger: params.logger,
|
||||||
|
broker: params.broker,
|
||||||
|
done: make(chan struct{}),
|
||||||
|
queues: params.queues,
|
||||||
|
interval: params.interval,
|
||||||
|
retryDelayFunc: params.retryDelayFunc,
|
||||||
|
isFailureFunc: params.isFailureFunc,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *recoverer) shutdown() {
|
||||||
|
r.logger.Debug("Recoverer shutting down...")
|
||||||
|
// Signal the recoverer goroutine to stop polling.
|
||||||
|
r.done <- struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *recoverer) start(wg *sync.WaitGroup) {
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
r.recover()
|
||||||
|
timer := time.NewTimer(r.interval)
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-r.done:
|
||||||
|
r.logger.Debug("Recoverer done")
|
||||||
|
timer.Stop()
|
||||||
|
return
|
||||||
|
case <-timer.C:
|
||||||
|
r.recover()
|
||||||
|
timer.Reset(r.interval)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrLeaseExpired error indicates that the task failed because the worker working on the task
|
||||||
|
// could not extend its lease due to missing heartbeats. The worker may have crashed or got cutoff from the network.
|
||||||
|
var ErrLeaseExpired = errors.New("asynq: task lease expired")
|
||||||
|
|
||||||
|
func (r *recoverer) recover() {
|
||||||
|
// Get all tasks which have expired 30 seconds ago or earlier to accomodate certain amount of clock skew.
|
||||||
|
cutoff := time.Now().Add(-30 * time.Second)
|
||||||
|
msgs, err := r.broker.ListLeaseExpired(cutoff, r.queues...)
|
||||||
|
if err != nil {
|
||||||
|
r.logger.Warn("recoverer: could not list lease expired tasks")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for _, msg := range msgs {
|
||||||
|
if msg.Retried >= msg.Retry {
|
||||||
|
r.archive(msg, ErrLeaseExpired)
|
||||||
|
} else {
|
||||||
|
r.retry(msg, ErrLeaseExpired)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *recoverer) retry(msg *base.TaskMessage, err error) {
|
||||||
|
delay := r.retryDelayFunc(msg.Retried, err, NewTask(msg.Type, msg.Payload))
|
||||||
|
retryAt := time.Now().Add(delay)
|
||||||
|
if err := r.broker.Retry(context.Background(), msg, retryAt, err.Error(), r.isFailureFunc(err)); err != nil {
|
||||||
|
r.logger.Warnf("recoverer: could not retry lease expired task: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *recoverer) archive(msg *base.TaskMessage, err error) {
|
||||||
|
if err := r.broker.Archive(context.Background(), msg, err.Error()); err != nil {
|
||||||
|
r.logger.Warnf("recoverer: could not move task to archive: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
276
recoverer_test.go
Normal file
276
recoverer_test.go
Normal file
@@ -0,0 +1,276 @@
|
|||||||
|
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license
|
||||||
|
// that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package asynq
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/google/go-cmp/cmp"
|
||||||
|
h "github.com/hibiken/asynq/internal/asynqtest"
|
||||||
|
"github.com/hibiken/asynq/internal/base"
|
||||||
|
"github.com/hibiken/asynq/internal/rdb"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestRecoverer(t *testing.T) {
|
||||||
|
r := setup(t)
|
||||||
|
defer r.Close()
|
||||||
|
rdbClient := rdb.NewRDB(r)
|
||||||
|
|
||||||
|
t1 := h.NewTaskMessageWithQueue("task1", nil, "default")
|
||||||
|
t2 := h.NewTaskMessageWithQueue("task2", nil, "default")
|
||||||
|
t3 := h.NewTaskMessageWithQueue("task3", nil, "critical")
|
||||||
|
t4 := h.NewTaskMessageWithQueue("task4", nil, "default")
|
||||||
|
t4.Retried = t4.Retry // t4 has reached its max retry count
|
||||||
|
|
||||||
|
now := time.Now()
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
desc string
|
||||||
|
active map[string][]*base.TaskMessage
|
||||||
|
lease map[string][]base.Z
|
||||||
|
retry map[string][]base.Z
|
||||||
|
archived map[string][]base.Z
|
||||||
|
wantActive map[string][]*base.TaskMessage
|
||||||
|
wantLease map[string][]base.Z
|
||||||
|
wantRetry map[string][]*base.TaskMessage
|
||||||
|
wantArchived map[string][]*base.TaskMessage
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
desc: "with one active task",
|
||||||
|
active: map[string][]*base.TaskMessage{
|
||||||
|
"default": {t1},
|
||||||
|
},
|
||||||
|
lease: map[string][]base.Z{
|
||||||
|
"default": {{Message: t1, Score: now.Add(-1 * time.Minute).Unix()}},
|
||||||
|
},
|
||||||
|
retry: map[string][]base.Z{
|
||||||
|
"default": {},
|
||||||
|
},
|
||||||
|
archived: map[string][]base.Z{
|
||||||
|
"default": {},
|
||||||
|
},
|
||||||
|
wantActive: map[string][]*base.TaskMessage{
|
||||||
|
"default": {},
|
||||||
|
},
|
||||||
|
wantLease: map[string][]base.Z{
|
||||||
|
"default": {},
|
||||||
|
},
|
||||||
|
wantRetry: map[string][]*base.TaskMessage{
|
||||||
|
"default": {t1},
|
||||||
|
},
|
||||||
|
wantArchived: map[string][]*base.TaskMessage{
|
||||||
|
"default": {},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "with a task with max-retry reached",
|
||||||
|
active: map[string][]*base.TaskMessage{
|
||||||
|
"default": {t4},
|
||||||
|
"critical": {},
|
||||||
|
},
|
||||||
|
lease: map[string][]base.Z{
|
||||||
|
"default": {{Message: t4, Score: now.Add(-40 * time.Second).Unix()}},
|
||||||
|
"critical": {},
|
||||||
|
},
|
||||||
|
retry: map[string][]base.Z{
|
||||||
|
"default": {},
|
||||||
|
"critical": {},
|
||||||
|
},
|
||||||
|
archived: map[string][]base.Z{
|
||||||
|
"default": {},
|
||||||
|
"critical": {},
|
||||||
|
},
|
||||||
|
wantActive: map[string][]*base.TaskMessage{
|
||||||
|
"default": {},
|
||||||
|
"critical": {},
|
||||||
|
},
|
||||||
|
wantLease: map[string][]base.Z{
|
||||||
|
"default": {},
|
||||||
|
"critical": {},
|
||||||
|
},
|
||||||
|
wantRetry: map[string][]*base.TaskMessage{
|
||||||
|
"default": {},
|
||||||
|
"critical": {},
|
||||||
|
},
|
||||||
|
wantArchived: map[string][]*base.TaskMessage{
|
||||||
|
"default": {t4},
|
||||||
|
"critical": {},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "with multiple active tasks, and one expired",
|
||||||
|
active: map[string][]*base.TaskMessage{
|
||||||
|
"default": {t1, t2},
|
||||||
|
"critical": {t3},
|
||||||
|
},
|
||||||
|
lease: map[string][]base.Z{
|
||||||
|
"default": {
|
||||||
|
{Message: t1, Score: now.Add(-2 * time.Minute).Unix()},
|
||||||
|
{Message: t2, Score: now.Add(20 * time.Second).Unix()},
|
||||||
|
},
|
||||||
|
"critical": {
|
||||||
|
{Message: t3, Score: now.Add(20 * time.Second).Unix()},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
retry: map[string][]base.Z{
|
||||||
|
"default": {},
|
||||||
|
"critical": {},
|
||||||
|
},
|
||||||
|
archived: map[string][]base.Z{
|
||||||
|
"default": {},
|
||||||
|
"critical": {},
|
||||||
|
},
|
||||||
|
wantActive: map[string][]*base.TaskMessage{
|
||||||
|
"default": {t2},
|
||||||
|
"critical": {t3},
|
||||||
|
},
|
||||||
|
wantLease: map[string][]base.Z{
|
||||||
|
"default": {{Message: t2, Score: now.Add(20 * time.Second).Unix()}},
|
||||||
|
"critical": {{Message: t3, Score: now.Add(20 * time.Second).Unix()}},
|
||||||
|
},
|
||||||
|
wantRetry: map[string][]*base.TaskMessage{
|
||||||
|
"default": {t1},
|
||||||
|
"critical": {},
|
||||||
|
},
|
||||||
|
wantArchived: map[string][]*base.TaskMessage{
|
||||||
|
"default": {},
|
||||||
|
"critical": {},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "with multiple expired active tasks",
|
||||||
|
active: map[string][]*base.TaskMessage{
|
||||||
|
"default": {t1, t2},
|
||||||
|
"critical": {t3},
|
||||||
|
},
|
||||||
|
lease: map[string][]base.Z{
|
||||||
|
"default": {
|
||||||
|
{Message: t1, Score: now.Add(-1 * time.Minute).Unix()},
|
||||||
|
{Message: t2, Score: now.Add(10 * time.Second).Unix()},
|
||||||
|
},
|
||||||
|
"critical": {
|
||||||
|
{Message: t3, Score: now.Add(-1 * time.Minute).Unix()},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
retry: map[string][]base.Z{
|
||||||
|
"default": {},
|
||||||
|
"cricial": {},
|
||||||
|
},
|
||||||
|
archived: map[string][]base.Z{
|
||||||
|
"default": {},
|
||||||
|
"cricial": {},
|
||||||
|
},
|
||||||
|
wantActive: map[string][]*base.TaskMessage{
|
||||||
|
"default": {t2},
|
||||||
|
"critical": {},
|
||||||
|
},
|
||||||
|
wantLease: map[string][]base.Z{
|
||||||
|
"default": {{Message: t2, Score: now.Add(10 * time.Second).Unix()}},
|
||||||
|
},
|
||||||
|
wantRetry: map[string][]*base.TaskMessage{
|
||||||
|
"default": {t1},
|
||||||
|
"critical": {t3},
|
||||||
|
},
|
||||||
|
wantArchived: map[string][]*base.TaskMessage{
|
||||||
|
"default": {},
|
||||||
|
"critical": {},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "with empty active queue",
|
||||||
|
active: map[string][]*base.TaskMessage{
|
||||||
|
"default": {},
|
||||||
|
"critical": {},
|
||||||
|
},
|
||||||
|
lease: map[string][]base.Z{
|
||||||
|
"default": {},
|
||||||
|
"critical": {},
|
||||||
|
},
|
||||||
|
retry: map[string][]base.Z{
|
||||||
|
"default": {},
|
||||||
|
"critical": {},
|
||||||
|
},
|
||||||
|
archived: map[string][]base.Z{
|
||||||
|
"default": {},
|
||||||
|
"critical": {},
|
||||||
|
},
|
||||||
|
wantActive: map[string][]*base.TaskMessage{
|
||||||
|
"default": {},
|
||||||
|
"critical": {},
|
||||||
|
},
|
||||||
|
wantLease: map[string][]base.Z{
|
||||||
|
"default": {},
|
||||||
|
"critical": {},
|
||||||
|
},
|
||||||
|
wantRetry: map[string][]*base.TaskMessage{
|
||||||
|
"default": {},
|
||||||
|
"critical": {},
|
||||||
|
},
|
||||||
|
wantArchived: map[string][]*base.TaskMessage{
|
||||||
|
"default": {},
|
||||||
|
"critical": {},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
h.FlushDB(t, r)
|
||||||
|
h.SeedAllActiveQueues(t, r, tc.active)
|
||||||
|
h.SeedAllLease(t, r, tc.lease)
|
||||||
|
h.SeedAllRetryQueues(t, r, tc.retry)
|
||||||
|
h.SeedAllArchivedQueues(t, r, tc.archived)
|
||||||
|
|
||||||
|
recoverer := newRecoverer(recovererParams{
|
||||||
|
logger: testLogger,
|
||||||
|
broker: rdbClient,
|
||||||
|
queues: []string{"default", "critical"},
|
||||||
|
interval: 1 * time.Second,
|
||||||
|
retryDelayFunc: func(n int, err error, task *Task) time.Duration { return 30 * time.Second },
|
||||||
|
isFailureFunc: defaultIsFailureFunc,
|
||||||
|
})
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
recoverer.start(&wg)
|
||||||
|
runTime := time.Now() // time when recoverer is running
|
||||||
|
time.Sleep(2 * time.Second)
|
||||||
|
recoverer.shutdown()
|
||||||
|
|
||||||
|
for qname, want := range tc.wantActive {
|
||||||
|
gotActive := h.GetActiveMessages(t, r, qname)
|
||||||
|
if diff := cmp.Diff(want, gotActive, h.SortMsgOpt); diff != "" {
|
||||||
|
t.Errorf("%s; mismatch found in %q; (-want,+got)\n%s", tc.desc, base.ActiveKey(qname), diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for qname, want := range tc.wantLease {
|
||||||
|
gotLease := h.GetLeaseEntries(t, r, qname)
|
||||||
|
if diff := cmp.Diff(want, gotLease, h.SortZSetEntryOpt); diff != "" {
|
||||||
|
t.Errorf("%s; mismatch found in %q; (-want,+got)\n%s", tc.desc, base.LeaseKey(qname), diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cmpOpt := h.EquateInt64Approx(2) // allow up to two-second difference in `LastFailedAt`
|
||||||
|
for qname, msgs := range tc.wantRetry {
|
||||||
|
gotRetry := h.GetRetryMessages(t, r, qname)
|
||||||
|
var wantRetry []*base.TaskMessage // Note: construct message here since `LastFailedAt` is relative to each test run
|
||||||
|
for _, msg := range msgs {
|
||||||
|
wantRetry = append(wantRetry, h.TaskMessageAfterRetry(*msg, ErrLeaseExpired.Error(), runTime))
|
||||||
|
}
|
||||||
|
if diff := cmp.Diff(wantRetry, gotRetry, h.SortMsgOpt, cmpOpt); diff != "" {
|
||||||
|
t.Errorf("%s; mismatch found in %q: (-want, +got)\n%s", tc.desc, base.RetryKey(qname), diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for qname, msgs := range tc.wantArchived {
|
||||||
|
gotArchived := h.GetArchivedMessages(t, r, qname)
|
||||||
|
var wantArchived []*base.TaskMessage
|
||||||
|
for _, msg := range msgs {
|
||||||
|
wantArchived = append(wantArchived, h.TaskMessageWithError(*msg, ErrLeaseExpired.Error(), runTime))
|
||||||
|
}
|
||||||
|
if diff := cmp.Diff(wantArchived, gotArchived, h.SortMsgOpt, cmpOpt); diff != "" {
|
||||||
|
t.Errorf("%s; mismatch found in %q: (-want, +got)\n%s", tc.desc, base.ArchivedKey(qname), diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
300
scheduler.go
300
scheduler.go
@@ -5,73 +5,295 @@
|
|||||||
package asynq
|
package asynq
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/go-redis/redis/v8"
|
||||||
|
"github.com/google/uuid"
|
||||||
"github.com/hibiken/asynq/internal/base"
|
"github.com/hibiken/asynq/internal/base"
|
||||||
"github.com/hibiken/asynq/internal/log"
|
"github.com/hibiken/asynq/internal/log"
|
||||||
|
"github.com/hibiken/asynq/internal/rdb"
|
||||||
|
"github.com/robfig/cron/v3"
|
||||||
)
|
)
|
||||||
|
|
||||||
type scheduler struct {
|
// A Scheduler kicks off tasks at regular intervals based on the user defined schedule.
|
||||||
logger *log.Logger
|
//
|
||||||
broker base.Broker
|
// Schedulers are safe for concurrent use by multiple goroutines.
|
||||||
|
type Scheduler struct {
|
||||||
|
id string
|
||||||
|
|
||||||
// channel to communicate back to the long running "scheduler" goroutine.
|
state *serverState
|
||||||
|
|
||||||
|
logger *log.Logger
|
||||||
|
client *Client
|
||||||
|
rdb *rdb.RDB
|
||||||
|
cron *cron.Cron
|
||||||
|
location *time.Location
|
||||||
done chan struct{}
|
done chan struct{}
|
||||||
|
wg sync.WaitGroup
|
||||||
|
errHandler func(task *Task, opts []Option, err error)
|
||||||
|
|
||||||
// poll interval on average
|
// guards idmap
|
||||||
avgInterval time.Duration
|
mu sync.Mutex
|
||||||
|
// idmap maps Scheduler's entry ID to cron.EntryID
|
||||||
// list of queues to move the tasks into.
|
// to avoid using cron.EntryID as the public API of
|
||||||
qnames []string
|
// the Scheduler.
|
||||||
|
idmap map[string]cron.EntryID
|
||||||
}
|
}
|
||||||
|
|
||||||
type schedulerParams struct {
|
// NewScheduler returns a new Scheduler instance given the redis connection option.
|
||||||
logger *log.Logger
|
// The parameter opts is optional, defaults will be used if opts is set to nil
|
||||||
broker base.Broker
|
func NewScheduler(r RedisConnOpt, opts *SchedulerOpts) *Scheduler {
|
||||||
interval time.Duration
|
c, ok := r.MakeRedisClient().(redis.UniversalClient)
|
||||||
queues map[string]int
|
if !ok {
|
||||||
|
panic(fmt.Sprintf("asynq: unsupported RedisConnOpt type %T", r))
|
||||||
|
}
|
||||||
|
if opts == nil {
|
||||||
|
opts = &SchedulerOpts{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func newScheduler(params schedulerParams) *scheduler {
|
logger := log.NewLogger(opts.Logger)
|
||||||
var qnames []string
|
loglevel := opts.LogLevel
|
||||||
for q := range params.queues {
|
if loglevel == level_unspecified {
|
||||||
qnames = append(qnames, q)
|
loglevel = InfoLevel
|
||||||
}
|
}
|
||||||
return &scheduler{
|
logger.SetLevel(toInternalLogLevel(loglevel))
|
||||||
logger: params.logger,
|
|
||||||
broker: params.broker,
|
loc := opts.Location
|
||||||
|
if loc == nil {
|
||||||
|
loc = time.UTC
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Scheduler{
|
||||||
|
id: generateSchedulerID(),
|
||||||
|
state: &serverState{value: srvStateNew},
|
||||||
|
logger: logger,
|
||||||
|
client: NewClient(r),
|
||||||
|
rdb: rdb.NewRDB(c),
|
||||||
|
cron: cron.New(cron.WithLocation(loc)),
|
||||||
|
location: loc,
|
||||||
done: make(chan struct{}),
|
done: make(chan struct{}),
|
||||||
avgInterval: params.interval,
|
errHandler: opts.EnqueueErrorHandler,
|
||||||
qnames: qnames,
|
idmap: make(map[string]cron.EntryID),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *scheduler) terminate() {
|
func generateSchedulerID() string {
|
||||||
s.logger.Debug("Scheduler shutting down...")
|
host, err := os.Hostname()
|
||||||
// Signal the scheduler goroutine to stop polling.
|
if err != nil {
|
||||||
s.done <- struct{}{}
|
host = "unknown-host"
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%s:%d:%v", host, os.Getpid(), uuid.New())
|
||||||
}
|
}
|
||||||
|
|
||||||
// start starts the "scheduler" goroutine.
|
// SchedulerOpts specifies scheduler options.
|
||||||
func (s *scheduler) start(wg *sync.WaitGroup) {
|
type SchedulerOpts struct {
|
||||||
wg.Add(1)
|
// Logger specifies the logger used by the scheduler instance.
|
||||||
go func() {
|
//
|
||||||
defer wg.Done()
|
// If unset, the default logger is used.
|
||||||
|
Logger Logger
|
||||||
|
|
||||||
|
// LogLevel specifies the minimum log level to enable.
|
||||||
|
//
|
||||||
|
// If unset, InfoLevel is used by default.
|
||||||
|
LogLevel LogLevel
|
||||||
|
|
||||||
|
// Location specifies the time zone location.
|
||||||
|
//
|
||||||
|
// If unset, the UTC time zone (time.UTC) is used.
|
||||||
|
Location *time.Location
|
||||||
|
|
||||||
|
// EnqueueErrorHandler gets called when scheduler cannot enqueue a registered task
|
||||||
|
// due to an error.
|
||||||
|
EnqueueErrorHandler func(task *Task, opts []Option, err error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// enqueueJob encapsulates the job of enqueing a task and recording the event.
|
||||||
|
type enqueueJob struct {
|
||||||
|
id uuid.UUID
|
||||||
|
cronspec string
|
||||||
|
task *Task
|
||||||
|
opts []Option
|
||||||
|
location *time.Location
|
||||||
|
logger *log.Logger
|
||||||
|
client *Client
|
||||||
|
rdb *rdb.RDB
|
||||||
|
errHandler func(task *Task, opts []Option, err error)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (j *enqueueJob) Run() {
|
||||||
|
info, err := j.client.Enqueue(j.task, j.opts...)
|
||||||
|
if err != nil {
|
||||||
|
j.logger.Errorf("scheduler could not enqueue a task %+v: %v", j.task, err)
|
||||||
|
if j.errHandler != nil {
|
||||||
|
j.errHandler(j.task, j.opts, err)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
j.logger.Debugf("scheduler enqueued a task: %+v", info)
|
||||||
|
event := &base.SchedulerEnqueueEvent{
|
||||||
|
TaskID: info.ID,
|
||||||
|
EnqueuedAt: time.Now().In(j.location),
|
||||||
|
}
|
||||||
|
err = j.rdb.RecordSchedulerEnqueueEvent(j.id.String(), event)
|
||||||
|
if err != nil {
|
||||||
|
j.logger.Errorf("scheduler could not record enqueue event of enqueued task %+v: %v", j.task, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Register registers a task to be enqueued on the given schedule specified by the cronspec.
|
||||||
|
// It returns an ID of the newly registered entry.
|
||||||
|
func (s *Scheduler) Register(cronspec string, task *Task, opts ...Option) (entryID string, err error) {
|
||||||
|
job := &enqueueJob{
|
||||||
|
id: uuid.New(),
|
||||||
|
cronspec: cronspec,
|
||||||
|
task: task,
|
||||||
|
opts: opts,
|
||||||
|
location: s.location,
|
||||||
|
client: s.client,
|
||||||
|
rdb: s.rdb,
|
||||||
|
logger: s.logger,
|
||||||
|
errHandler: s.errHandler,
|
||||||
|
}
|
||||||
|
cronID, err := s.cron.AddJob(cronspec, job)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
s.mu.Lock()
|
||||||
|
s.idmap[job.id.String()] = cronID
|
||||||
|
s.mu.Unlock()
|
||||||
|
return job.id.String(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unregister removes a registered entry by entry ID.
|
||||||
|
// Unregister returns a non-nil error if no entries were found for the given entryID.
|
||||||
|
func (s *Scheduler) Unregister(entryID string) error {
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
cronID, ok := s.idmap[entryID]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("asynq: no scheduler entry found")
|
||||||
|
}
|
||||||
|
delete(s.idmap, entryID)
|
||||||
|
s.cron.Remove(cronID)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run starts the scheduler until an os signal to exit the program is received.
|
||||||
|
// It returns an error if scheduler is already running or has been shutdown.
|
||||||
|
func (s *Scheduler) Run() error {
|
||||||
|
if err := s.Start(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
s.waitForSignals()
|
||||||
|
s.Shutdown()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start starts the scheduler.
|
||||||
|
// It returns an error if the scheduler is already running or has been shutdown.
|
||||||
|
func (s *Scheduler) Start() error {
|
||||||
|
if err := s.start(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
s.logger.Info("Scheduler starting")
|
||||||
|
s.logger.Infof("Scheduler timezone is set to %v", s.location)
|
||||||
|
s.cron.Start()
|
||||||
|
s.wg.Add(1)
|
||||||
|
go s.runHeartbeater()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Checks server state and returns an error if pre-condition is not met.
|
||||||
|
// Otherwise it sets the server state to active.
|
||||||
|
func (s *Scheduler) start() error {
|
||||||
|
s.state.mu.Lock()
|
||||||
|
defer s.state.mu.Unlock()
|
||||||
|
switch s.state.value {
|
||||||
|
case srvStateActive:
|
||||||
|
return fmt.Errorf("asynq: the scheduler is already running")
|
||||||
|
case srvStateClosed:
|
||||||
|
return fmt.Errorf("asynq: the scheduler has already been stopped")
|
||||||
|
}
|
||||||
|
s.state.value = srvStateActive
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Shutdown stops and shuts down the scheduler.
|
||||||
|
func (s *Scheduler) Shutdown() {
|
||||||
|
s.state.mu.Lock()
|
||||||
|
if s.state.value == srvStateNew || s.state.value == srvStateClosed {
|
||||||
|
// scheduler is not running, do nothing and return.
|
||||||
|
s.state.mu.Unlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
s.state.value = srvStateClosed
|
||||||
|
s.state.mu.Unlock()
|
||||||
|
|
||||||
|
s.logger.Info("Scheduler shutting down")
|
||||||
|
close(s.done) // signal heartbeater to stop
|
||||||
|
ctx := s.cron.Stop()
|
||||||
|
<-ctx.Done()
|
||||||
|
s.wg.Wait()
|
||||||
|
|
||||||
|
s.clearHistory()
|
||||||
|
s.client.Close()
|
||||||
|
s.rdb.Close()
|
||||||
|
s.logger.Info("Scheduler stopped")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Scheduler) runHeartbeater() {
|
||||||
|
defer s.wg.Done()
|
||||||
|
ticker := time.NewTicker(5 * time.Second)
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-s.done:
|
case <-s.done:
|
||||||
s.logger.Debug("Scheduler done")
|
s.logger.Debugf("Scheduler heatbeater shutting down")
|
||||||
|
s.rdb.ClearSchedulerEntries(s.id)
|
||||||
return
|
return
|
||||||
case <-time.After(s.avgInterval):
|
case <-ticker.C:
|
||||||
s.exec()
|
s.beat()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *scheduler) exec() {
|
// beat writes a snapshot of entries to redis.
|
||||||
if err := s.broker.CheckAndEnqueue(s.qnames...); err != nil {
|
func (s *Scheduler) beat() {
|
||||||
s.logger.Errorf("Could not enqueue scheduled tasks: %v", err)
|
var entries []*base.SchedulerEntry
|
||||||
|
for _, entry := range s.cron.Entries() {
|
||||||
|
job := entry.Job.(*enqueueJob)
|
||||||
|
e := &base.SchedulerEntry{
|
||||||
|
ID: job.id.String(),
|
||||||
|
Spec: job.cronspec,
|
||||||
|
Type: job.task.Type(),
|
||||||
|
Payload: job.task.Payload(),
|
||||||
|
Opts: stringifyOptions(job.opts),
|
||||||
|
Next: entry.Next,
|
||||||
|
Prev: entry.Prev,
|
||||||
|
}
|
||||||
|
entries = append(entries, e)
|
||||||
|
}
|
||||||
|
s.logger.Debugf("Writing entries %v", entries)
|
||||||
|
if err := s.rdb.WriteSchedulerEntries(s.id, entries, 5*time.Second); err != nil {
|
||||||
|
s.logger.Warnf("Scheduler could not write heartbeat data: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func stringifyOptions(opts []Option) []string {
|
||||||
|
var res []string
|
||||||
|
for _, opt := range opts {
|
||||||
|
res = append(res, opt.String())
|
||||||
|
}
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Scheduler) clearHistory() {
|
||||||
|
for _, entry := range s.cron.Entries() {
|
||||||
|
job := entry.Job.(*enqueueJob)
|
||||||
|
if err := s.rdb.ClearSchedulerHistory(job.id.String()); err != nil {
|
||||||
|
s.logger.Warnf("Could not clear scheduler history for entry %q: %v", job.id.String(), err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -10,89 +10,147 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/google/go-cmp/cmp"
|
"github.com/google/go-cmp/cmp"
|
||||||
h "github.com/hibiken/asynq/internal/asynqtest"
|
"github.com/hibiken/asynq/internal/asynqtest"
|
||||||
"github.com/hibiken/asynq/internal/base"
|
"github.com/hibiken/asynq/internal/base"
|
||||||
"github.com/hibiken/asynq/internal/rdb"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestScheduler(t *testing.T) {
|
func TestSchedulerRegister(t *testing.T) {
|
||||||
r := setup(t)
|
|
||||||
rdbClient := rdb.NewRDB(r)
|
|
||||||
const pollInterval = time.Second
|
|
||||||
s := newScheduler(schedulerParams{
|
|
||||||
logger: testLogger,
|
|
||||||
broker: rdbClient,
|
|
||||||
interval: pollInterval,
|
|
||||||
queues: defaultQueueConfig,
|
|
||||||
})
|
|
||||||
t1 := h.NewTaskMessage("gen_thumbnail", nil)
|
|
||||||
t2 := h.NewTaskMessage("send_email", nil)
|
|
||||||
t3 := h.NewTaskMessage("reindex", nil)
|
|
||||||
t4 := h.NewTaskMessage("sync", nil)
|
|
||||||
now := time.Now()
|
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
initScheduled []h.ZSetEntry // scheduled queue initial state
|
cronspec string
|
||||||
initRetry []h.ZSetEntry // retry queue initial state
|
task *Task
|
||||||
initQueue []*base.TaskMessage // default queue initial state
|
opts []Option
|
||||||
wait time.Duration // wait duration before checking for final state
|
wait time.Duration
|
||||||
wantScheduled []*base.TaskMessage // schedule queue final state
|
queue string
|
||||||
wantRetry []*base.TaskMessage // retry queue final state
|
want []*base.TaskMessage
|
||||||
wantQueue []*base.TaskMessage // default queue final state
|
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
initScheduled: []h.ZSetEntry{
|
cronspec: "@every 3s",
|
||||||
{Msg: t1, Score: float64(now.Add(time.Hour).Unix())},
|
task: NewTask("task1", nil),
|
||||||
{Msg: t2, Score: float64(now.Add(-2 * time.Second).Unix())},
|
opts: []Option{MaxRetry(10)},
|
||||||
},
|
wait: 10 * time.Second,
|
||||||
initRetry: []h.ZSetEntry{
|
queue: "default",
|
||||||
{Msg: t3, Score: float64(time.Now().Add(-500 * time.Millisecond).Unix())},
|
want: []*base.TaskMessage{
|
||||||
},
|
{
|
||||||
initQueue: []*base.TaskMessage{t4},
|
Type: "task1",
|
||||||
wait: pollInterval * 2,
|
Payload: nil,
|
||||||
wantScheduled: []*base.TaskMessage{t1},
|
Retry: 10,
|
||||||
wantRetry: []*base.TaskMessage{},
|
Timeout: int64(defaultTimeout.Seconds()),
|
||||||
wantQueue: []*base.TaskMessage{t2, t3, t4},
|
Queue: "default",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
initScheduled: []h.ZSetEntry{
|
Type: "task1",
|
||||||
{Msg: t1, Score: float64(now.Unix())},
|
Payload: nil,
|
||||||
{Msg: t2, Score: float64(now.Add(-2 * time.Second).Unix())},
|
Retry: 10,
|
||||||
{Msg: t3, Score: float64(now.Add(-500 * time.Millisecond).Unix())},
|
Timeout: int64(defaultTimeout.Seconds()),
|
||||||
|
Queue: "default",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Type: "task1",
|
||||||
|
Payload: nil,
|
||||||
|
Retry: 10,
|
||||||
|
Timeout: int64(defaultTimeout.Seconds()),
|
||||||
|
Queue: "default",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
initRetry: []h.ZSetEntry{},
|
|
||||||
initQueue: []*base.TaskMessage{t4},
|
|
||||||
wait: pollInterval * 2,
|
|
||||||
wantScheduled: []*base.TaskMessage{},
|
|
||||||
wantRetry: []*base.TaskMessage{},
|
|
||||||
wantQueue: []*base.TaskMessage{t1, t2, t3, t4},
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
r := setup(t)
|
||||||
|
|
||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
h.FlushDB(t, r) // clean up db before each test case.
|
scheduler := NewScheduler(getRedisConnOpt(t), nil)
|
||||||
h.SeedScheduledQueue(t, r, tc.initScheduled) // initialize scheduled queue
|
if _, err := scheduler.Register(tc.cronspec, tc.task, tc.opts...); err != nil {
|
||||||
h.SeedRetryQueue(t, r, tc.initRetry) // initialize retry queue
|
t.Fatal(err)
|
||||||
h.SeedEnqueuedQueue(t, r, tc.initQueue) // initialize default queue
|
}
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
if err := scheduler.Start(); err != nil {
|
||||||
s.start(&wg)
|
t.Fatal(err)
|
||||||
|
}
|
||||||
time.Sleep(tc.wait)
|
time.Sleep(tc.wait)
|
||||||
s.terminate()
|
scheduler.Shutdown()
|
||||||
|
|
||||||
gotScheduled := h.GetScheduledMessages(t, r)
|
got := asynqtest.GetPendingMessages(t, r, tc.queue)
|
||||||
if diff := cmp.Diff(tc.wantScheduled, gotScheduled, h.SortMsgOpt); diff != "" {
|
if diff := cmp.Diff(tc.want, got, asynqtest.IgnoreIDOpt); diff != "" {
|
||||||
t.Errorf("mismatch found in %q after running scheduler: (-want, +got)\n%s", base.ScheduledQueue, diff)
|
t.Errorf("mismatch found in queue %q: (-want,+got)\n%s", tc.queue, diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
gotRetry := h.GetRetryMessages(t, r)
|
func TestSchedulerWhenRedisDown(t *testing.T) {
|
||||||
if diff := cmp.Diff(tc.wantRetry, gotRetry, h.SortMsgOpt); diff != "" {
|
var (
|
||||||
t.Errorf("mismatch found in %q after running scheduler: (-want, +got)\n%s", base.RetryQueue, diff)
|
mu sync.Mutex
|
||||||
|
counter int
|
||||||
|
)
|
||||||
|
errorHandler := func(task *Task, opts []Option, err error) {
|
||||||
|
mu.Lock()
|
||||||
|
counter++
|
||||||
|
mu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
gotEnqueued := h.GetEnqueuedMessages(t, r)
|
// Connect to non-existent redis instance to simulate a redis server being down.
|
||||||
if diff := cmp.Diff(tc.wantQueue, gotEnqueued, h.SortMsgOpt); diff != "" {
|
scheduler := NewScheduler(
|
||||||
t.Errorf("mismatch found in %q after running scheduler: (-want, +got)\n%s", base.DefaultQueue, diff)
|
RedisClientOpt{Addr: ":9876"},
|
||||||
|
&SchedulerOpts{EnqueueErrorHandler: errorHandler},
|
||||||
|
)
|
||||||
|
|
||||||
|
task := NewTask("test", nil)
|
||||||
|
|
||||||
|
if _, err := scheduler.Register("@every 3s", task); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := scheduler.Start(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
// Scheduler should attempt to enqueue the task three times (every 3s).
|
||||||
|
time.Sleep(10 * time.Second)
|
||||||
|
scheduler.Shutdown()
|
||||||
|
|
||||||
|
mu.Lock()
|
||||||
|
if counter != 3 {
|
||||||
|
t.Errorf("EnqueueErrorHandler was called %d times, want 3", counter)
|
||||||
|
}
|
||||||
|
mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSchedulerUnregister(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
cronspec string
|
||||||
|
task *Task
|
||||||
|
opts []Option
|
||||||
|
wait time.Duration
|
||||||
|
queue string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
cronspec: "@every 3s",
|
||||||
|
task: NewTask("task1", nil),
|
||||||
|
opts: []Option{MaxRetry(10)},
|
||||||
|
wait: 10 * time.Second,
|
||||||
|
queue: "default",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
r := setup(t)
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
scheduler := NewScheduler(getRedisConnOpt(t), nil)
|
||||||
|
entryID, err := scheduler.Register(tc.cronspec, tc.task, tc.opts...)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if err := scheduler.Unregister(entryID); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := scheduler.Start(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
time.Sleep(tc.wait)
|
||||||
|
scheduler.Shutdown()
|
||||||
|
|
||||||
|
got := asynqtest.GetPendingMessages(t, r, tc.queue)
|
||||||
|
if len(got) != 0 {
|
||||||
|
t.Errorf("%d tasks were enqueued, want zero", len(got))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -62,7 +62,7 @@ func (mux *ServeMux) Handler(t *Task) (h Handler, pattern string) {
|
|||||||
mux.mu.RLock()
|
mux.mu.RLock()
|
||||||
defer mux.mu.RUnlock()
|
defer mux.mu.RUnlock()
|
||||||
|
|
||||||
h, pattern = mux.match(t.Type)
|
h, pattern = mux.match(t.Type())
|
||||||
if h == nil {
|
if h == nil {
|
||||||
h, pattern = NotFoundHandler(), ""
|
h, pattern = NotFoundHandler(), ""
|
||||||
}
|
}
|
||||||
@@ -98,7 +98,7 @@ func (mux *ServeMux) Handle(pattern string, handler Handler) {
|
|||||||
mux.mu.Lock()
|
mux.mu.Lock()
|
||||||
defer mux.mu.Unlock()
|
defer mux.mu.Unlock()
|
||||||
|
|
||||||
if pattern == "" {
|
if strings.TrimSpace(pattern) == "" {
|
||||||
panic("asynq: invalid pattern")
|
panic("asynq: invalid pattern")
|
||||||
}
|
}
|
||||||
if handler == nil {
|
if handler == nil {
|
||||||
@@ -151,7 +151,7 @@ func (mux *ServeMux) Use(mws ...MiddlewareFunc) {
|
|||||||
|
|
||||||
// NotFound returns an error indicating that the handler was not found for the given task.
|
// NotFound returns an error indicating that the handler was not found for the given task.
|
||||||
func NotFound(ctx context.Context, task *Task) error {
|
func NotFound(ctx context.Context, task *Task) error {
|
||||||
return fmt.Errorf("handler not found for task %q", task.Type)
|
return fmt.Errorf("handler not found for task %q", task.Type())
|
||||||
}
|
}
|
||||||
|
|
||||||
// NotFoundHandler returns a simple task handler that returns a ``not found`` error.
|
// NotFoundHandler returns a simple task handler that returns a ``not found`` error.
|
||||||
|
|||||||
@@ -68,7 +68,7 @@ func TestServeMux(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if called != tc.want {
|
if called != tc.want {
|
||||||
t.Errorf("%q handler was called for task %q, want %q to be called", called, task.Type, tc.want)
|
t.Errorf("%q handler was called for task %q, want %q to be called", called, task.Type(), tc.want)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -124,7 +124,7 @@ func TestServeMuxNotFound(t *testing.T) {
|
|||||||
task := NewTask(tc.typename, nil)
|
task := NewTask(tc.typename, nil)
|
||||||
err := mux.ProcessTask(context.Background(), task)
|
err := mux.ProcessTask(context.Background(), task)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Errorf("ProcessTask did not return error for task %q, should return 'not found' error", task.Type)
|
t.Errorf("ProcessTask did not return error for task %q, should return 'not found' error", task.Type())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -164,7 +164,7 @@ func TestServeMuxMiddlewares(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if called != tc.want {
|
if called != tc.want {
|
||||||
t.Errorf("%q handler was called for task %q, want %q to be called", called, task.Type, tc.want)
|
t.Errorf("%q handler was called for task %q, want %q to be called", called, task.Type(), tc.want)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
344
server.go
344
server.go
@@ -10,43 +10,83 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"os"
|
|
||||||
"runtime"
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/go-redis/redis/v8"
|
||||||
"github.com/hibiken/asynq/internal/base"
|
"github.com/hibiken/asynq/internal/base"
|
||||||
"github.com/hibiken/asynq/internal/log"
|
"github.com/hibiken/asynq/internal/log"
|
||||||
"github.com/hibiken/asynq/internal/rdb"
|
"github.com/hibiken/asynq/internal/rdb"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Server is responsible for managing the background-task processing.
|
// Server is responsible for task processing and task lifecycle management.
|
||||||
//
|
//
|
||||||
// Server pulls tasks off queues and processes them.
|
// Server pulls tasks off queues and processes them.
|
||||||
// If the processing of a task is unsuccessful, server will
|
// If the processing of a task is unsuccessful, server will schedule it for a retry.
|
||||||
// schedule it for a retry.
|
//
|
||||||
// A task will be retried until either the task gets processed successfully
|
// A task will be retried until either the task gets processed successfully
|
||||||
// or until it reaches its max retry count.
|
// or until it reaches its max retry count.
|
||||||
//
|
//
|
||||||
// If a task exhausts its retries, it will be moved to the "dead" queue and
|
// If a task exhausts its retries, it will be moved to the archive and
|
||||||
// will be kept in the queue for some time until a certain condition is met
|
// will be kept in the archive set.
|
||||||
// (e.g., queue size reaches a certain limit, or the task has been in the
|
// Note that the archive size is finite and once it reaches its max size,
|
||||||
// queue for a certain amount of time).
|
// oldest tasks in the archive will be deleted.
|
||||||
type Server struct {
|
type Server struct {
|
||||||
ss *base.ServerState
|
|
||||||
|
|
||||||
logger *log.Logger
|
logger *log.Logger
|
||||||
|
|
||||||
broker base.Broker
|
broker base.Broker
|
||||||
|
|
||||||
|
state *serverState
|
||||||
|
|
||||||
// wait group to wait for all goroutines to finish.
|
// wait group to wait for all goroutines to finish.
|
||||||
wg sync.WaitGroup
|
wg sync.WaitGroup
|
||||||
scheduler *scheduler
|
forwarder *forwarder
|
||||||
processor *processor
|
processor *processor
|
||||||
syncer *syncer
|
syncer *syncer
|
||||||
heartbeater *heartbeater
|
heartbeater *heartbeater
|
||||||
subscriber *subscriber
|
subscriber *subscriber
|
||||||
|
recoverer *recoverer
|
||||||
|
healthchecker *healthchecker
|
||||||
|
janitor *janitor
|
||||||
|
}
|
||||||
|
|
||||||
|
type serverState struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
value serverStateValue
|
||||||
|
}
|
||||||
|
|
||||||
|
type serverStateValue int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// StateNew represents a new server. Server begins in
|
||||||
|
// this state and then transition to StatusActive when
|
||||||
|
// Start or Run is callled.
|
||||||
|
srvStateNew serverStateValue = iota
|
||||||
|
|
||||||
|
// StateActive indicates the server is up and active.
|
||||||
|
srvStateActive
|
||||||
|
|
||||||
|
// StateStopped indicates the server is up but no longer processing new tasks.
|
||||||
|
srvStateStopped
|
||||||
|
|
||||||
|
// StateClosed indicates the server has been shutdown.
|
||||||
|
srvStateClosed
|
||||||
|
)
|
||||||
|
|
||||||
|
var serverStates = []string{
|
||||||
|
"new",
|
||||||
|
"active",
|
||||||
|
"stopped",
|
||||||
|
"closed",
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s serverStateValue) String() string {
|
||||||
|
if srvStateNew <= s && s <= srvStateClosed {
|
||||||
|
return serverStates[s]
|
||||||
|
}
|
||||||
|
return "unknown status"
|
||||||
}
|
}
|
||||||
|
|
||||||
// Config specifies the server's background-task processing behavior.
|
// Config specifies the server's background-task processing behavior.
|
||||||
@@ -54,17 +94,27 @@ type Config struct {
|
|||||||
// Maximum number of concurrent processing of tasks.
|
// Maximum number of concurrent processing of tasks.
|
||||||
//
|
//
|
||||||
// If set to a zero or negative value, NewServer will overwrite the value
|
// If set to a zero or negative value, NewServer will overwrite the value
|
||||||
// to the number of CPUs usable by the currennt process.
|
// to the number of CPUs usable by the current process.
|
||||||
Concurrency int
|
Concurrency int
|
||||||
|
|
||||||
|
// BaseContext optionally specifies a function that returns the base context for Handler invocations on this server.
|
||||||
|
//
|
||||||
|
// If BaseContext is nil, the default is context.Background().
|
||||||
|
// If this is defined, then it MUST return a non-nil context
|
||||||
|
BaseContext func() context.Context
|
||||||
|
|
||||||
// Function to calculate retry delay for a failed task.
|
// Function to calculate retry delay for a failed task.
|
||||||
//
|
//
|
||||||
// By default, it uses exponential backoff algorithm to calculate the delay.
|
// By default, it uses exponential backoff algorithm to calculate the delay.
|
||||||
|
RetryDelayFunc RetryDelayFunc
|
||||||
|
|
||||||
|
// Predicate function to determine whether the error returned from Handler is a failure.
|
||||||
|
// If the function returns false, Server will not increment the retried counter for the task,
|
||||||
|
// and Server won't record the queue stats (processed and failed stats) to avoid skewing the error
|
||||||
|
// rate of the queue.
|
||||||
//
|
//
|
||||||
// n is the number of times the task has been retried.
|
// By default, if the given error is non-nil the function returns true.
|
||||||
// e is the error returned by the task handler.
|
IsFailure func(error) bool
|
||||||
// t is the task in question.
|
|
||||||
RetryDelayFunc func(n int, e error, t *Task) time.Duration
|
|
||||||
|
|
||||||
// List of queues to process with given priority value. Keys are the names of the
|
// List of queues to process with given priority value. Keys are the names of the
|
||||||
// queues and values are associated priority value.
|
// queues and values are associated priority value.
|
||||||
@@ -74,11 +124,13 @@ type Config struct {
|
|||||||
// Priority is treated as follows to avoid starving low priority queues.
|
// Priority is treated as follows to avoid starving low priority queues.
|
||||||
//
|
//
|
||||||
// Example:
|
// Example:
|
||||||
|
//
|
||||||
// Queues: map[string]int{
|
// Queues: map[string]int{
|
||||||
// "critical": 6,
|
// "critical": 6,
|
||||||
// "default": 3,
|
// "default": 3,
|
||||||
// "low": 1,
|
// "low": 1,
|
||||||
// }
|
// }
|
||||||
|
//
|
||||||
// With the above config and given that all queues are not empty, the tasks
|
// With the above config and given that all queues are not empty, the tasks
|
||||||
// in "critical", "default", "low" should be processed 60%, 30%, 10% of
|
// in "critical", "default", "low" should be processed 60%, 30%, 10% of
|
||||||
// the time respectively.
|
// the time respectively.
|
||||||
@@ -98,7 +150,10 @@ type Config struct {
|
|||||||
// HandleError is invoked only if the task handler returns a non-nil error.
|
// HandleError is invoked only if the task handler returns a non-nil error.
|
||||||
//
|
//
|
||||||
// Example:
|
// Example:
|
||||||
// func reportError(task *asynq.Task, err error, retried, maxRetry int) {
|
//
|
||||||
|
// func reportError(ctx context, task *asynq.Task, err error) {
|
||||||
|
// retried, _ := asynq.GetRetryCount(ctx)
|
||||||
|
// maxRetry, _ := asynq.GetMaxRetry(ctx)
|
||||||
// if retried >= maxRetry {
|
// if retried >= maxRetry {
|
||||||
// err = fmt.Errorf("retry exhausted for task %s: %w", task.Type, err)
|
// err = fmt.Errorf("retry exhausted for task %s: %w", task.Type, err)
|
||||||
// }
|
// }
|
||||||
@@ -123,22 +178,45 @@ type Config struct {
|
|||||||
//
|
//
|
||||||
// If unset or zero, default timeout of 8 seconds is used.
|
// If unset or zero, default timeout of 8 seconds is used.
|
||||||
ShutdownTimeout time.Duration
|
ShutdownTimeout time.Duration
|
||||||
|
|
||||||
|
// HealthCheckFunc is called periodically with any errors encountered during ping to the
|
||||||
|
// connected redis server.
|
||||||
|
HealthCheckFunc func(error)
|
||||||
|
|
||||||
|
// HealthCheckInterval specifies the interval between healthchecks.
|
||||||
|
//
|
||||||
|
// If unset or zero, the interval is set to 15 seconds.
|
||||||
|
HealthCheckInterval time.Duration
|
||||||
|
|
||||||
|
// DelayedTaskCheckInterval specifies the interval between checks run on 'scheduled' and 'retry'
|
||||||
|
// tasks, and forwarding them to 'pending' state if they are ready to be processed.
|
||||||
|
//
|
||||||
|
// If unset or zero, the interval is set to 5 seconds.
|
||||||
|
DelayedTaskCheckInterval time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
// An ErrorHandler handles errors returned by the task handler.
|
// An ErrorHandler handles an error occured during task processing.
|
||||||
type ErrorHandler interface {
|
type ErrorHandler interface {
|
||||||
HandleError(task *Task, err error, retried, maxRetry int)
|
HandleError(ctx context.Context, task *Task, err error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// The ErrorHandlerFunc type is an adapter to allow the use of ordinary functions as a ErrorHandler.
|
// The ErrorHandlerFunc type is an adapter to allow the use of ordinary functions as a ErrorHandler.
|
||||||
// If f is a function with the appropriate signature, ErrorHandlerFunc(f) is a ErrorHandler that calls f.
|
// If f is a function with the appropriate signature, ErrorHandlerFunc(f) is a ErrorHandler that calls f.
|
||||||
type ErrorHandlerFunc func(task *Task, err error, retried, maxRetry int)
|
type ErrorHandlerFunc func(ctx context.Context, task *Task, err error)
|
||||||
|
|
||||||
// HandleError calls fn(task, err, retried, maxRetry)
|
// HandleError calls fn(ctx, task, err)
|
||||||
func (fn ErrorHandlerFunc) HandleError(task *Task, err error, retried, maxRetry int) {
|
func (fn ErrorHandlerFunc) HandleError(ctx context.Context, task *Task, err error) {
|
||||||
fn(task, err, retried, maxRetry)
|
fn(ctx, task, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RetryDelayFunc calculates the retry delay duration for a failed task given
|
||||||
|
// the retry count, error, and the task.
|
||||||
|
//
|
||||||
|
// n is the number of times the task has been retried.
|
||||||
|
// e is the error returned by the task handler.
|
||||||
|
// t is the task in question.
|
||||||
|
type RetryDelayFunc func(n int, e error, t *Task) time.Duration
|
||||||
|
|
||||||
// Logger supports logging at various log levels.
|
// Logger supports logging at various log levels.
|
||||||
type Logger interface {
|
type Logger interface {
|
||||||
// Debug logs a message at Debug level.
|
// Debug logs a message at Debug level.
|
||||||
@@ -239,32 +317,57 @@ func toInternalLogLevel(l LogLevel) log.Level {
|
|||||||
panic(fmt.Sprintf("asynq: unexpected log level: %v", l))
|
panic(fmt.Sprintf("asynq: unexpected log level: %v", l))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Formula taken from https://github.com/mperham/sidekiq.
|
// DefaultRetryDelayFunc is the default RetryDelayFunc used if one is not specified in Config.
|
||||||
func defaultDelayFunc(n int, e error, t *Task) time.Duration {
|
// It uses exponential back-off strategy to calculate the retry delay.
|
||||||
|
func DefaultRetryDelayFunc(n int, e error, t *Task) time.Duration {
|
||||||
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||||
|
// Formula taken from https://github.com/mperham/sidekiq.
|
||||||
s := int(math.Pow(float64(n), 4)) + 15 + (r.Intn(30) * (n + 1))
|
s := int(math.Pow(float64(n), 4)) + 15 + (r.Intn(30) * (n + 1))
|
||||||
return time.Duration(s) * time.Second
|
return time.Duration(s) * time.Second
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func defaultIsFailureFunc(err error) bool { return err != nil }
|
||||||
|
|
||||||
var defaultQueueConfig = map[string]int{
|
var defaultQueueConfig = map[string]int{
|
||||||
base.DefaultQueueName: 1,
|
base.DefaultQueueName: 1,
|
||||||
}
|
}
|
||||||
|
|
||||||
const defaultShutdownTimeout = 8 * time.Second
|
const (
|
||||||
|
defaultShutdownTimeout = 8 * time.Second
|
||||||
|
|
||||||
|
defaultHealthCheckInterval = 15 * time.Second
|
||||||
|
|
||||||
|
defaultDelayedTaskCheckInterval = 5 * time.Second
|
||||||
|
)
|
||||||
|
|
||||||
// NewServer returns a new Server given a redis connection option
|
// NewServer returns a new Server given a redis connection option
|
||||||
// and background processing configuration.
|
// and server configuration.
|
||||||
func NewServer(r RedisConnOpt, cfg Config) *Server {
|
func NewServer(r RedisConnOpt, cfg Config) *Server {
|
||||||
|
c, ok := r.MakeRedisClient().(redis.UniversalClient)
|
||||||
|
if !ok {
|
||||||
|
panic(fmt.Sprintf("asynq: unsupported RedisConnOpt type %T", r))
|
||||||
|
}
|
||||||
|
baseCtxFn := cfg.BaseContext
|
||||||
|
if baseCtxFn == nil {
|
||||||
|
baseCtxFn = context.Background
|
||||||
|
}
|
||||||
n := cfg.Concurrency
|
n := cfg.Concurrency
|
||||||
if n < 1 {
|
if n < 1 {
|
||||||
n = runtime.NumCPU()
|
n = runtime.NumCPU()
|
||||||
}
|
}
|
||||||
delayFunc := cfg.RetryDelayFunc
|
delayFunc := cfg.RetryDelayFunc
|
||||||
if delayFunc == nil {
|
if delayFunc == nil {
|
||||||
delayFunc = defaultDelayFunc
|
delayFunc = DefaultRetryDelayFunc
|
||||||
|
}
|
||||||
|
isFailureFunc := cfg.IsFailure
|
||||||
|
if isFailureFunc == nil {
|
||||||
|
isFailureFunc = defaultIsFailureFunc
|
||||||
}
|
}
|
||||||
queues := make(map[string]int)
|
queues := make(map[string]int)
|
||||||
for qname, p := range cfg.Queues {
|
for qname, p := range cfg.Queues {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
continue // ignore invalid queue names
|
||||||
|
}
|
||||||
if p > 0 {
|
if p > 0 {
|
||||||
queues[qname] = p
|
queues[qname] = p
|
||||||
}
|
}
|
||||||
@@ -272,10 +375,18 @@ func NewServer(r RedisConnOpt, cfg Config) *Server {
|
|||||||
if len(queues) == 0 {
|
if len(queues) == 0 {
|
||||||
queues = defaultQueueConfig
|
queues = defaultQueueConfig
|
||||||
}
|
}
|
||||||
|
var qnames []string
|
||||||
|
for q := range queues {
|
||||||
|
qnames = append(qnames, q)
|
||||||
|
}
|
||||||
shutdownTimeout := cfg.ShutdownTimeout
|
shutdownTimeout := cfg.ShutdownTimeout
|
||||||
if shutdownTimeout == 0 {
|
if shutdownTimeout == 0 {
|
||||||
shutdownTimeout = defaultShutdownTimeout
|
shutdownTimeout = defaultShutdownTimeout
|
||||||
}
|
}
|
||||||
|
healthcheckInterval := cfg.HealthCheckInterval
|
||||||
|
if healthcheckInterval == 0 {
|
||||||
|
healthcheckInterval = defaultHealthCheckInterval
|
||||||
|
}
|
||||||
logger := log.NewLogger(cfg.Logger)
|
logger := log.NewLogger(cfg.Logger)
|
||||||
loglevel := cfg.LogLevel
|
loglevel := cfg.LogLevel
|
||||||
if loglevel == level_unspecified {
|
if loglevel == level_unspecified {
|
||||||
@@ -283,15 +394,11 @@ func NewServer(r RedisConnOpt, cfg Config) *Server {
|
|||||||
}
|
}
|
||||||
logger.SetLevel(toInternalLogLevel(loglevel))
|
logger.SetLevel(toInternalLogLevel(loglevel))
|
||||||
|
|
||||||
host, err := os.Hostname()
|
rdb := rdb.NewRDB(c)
|
||||||
if err != nil {
|
starting := make(chan *workerInfo)
|
||||||
host = "unknown-host"
|
finished := make(chan *base.TaskMessage)
|
||||||
}
|
|
||||||
pid := os.Getpid()
|
|
||||||
|
|
||||||
rdb := rdb.NewRDB(createRedisClient(r))
|
|
||||||
ss := base.NewServerState(host, pid, n, queues, cfg.StrictPriority)
|
|
||||||
syncCh := make(chan *syncRequest)
|
syncCh := make(chan *syncRequest)
|
||||||
|
srvState := &serverState{value: srvStateNew}
|
||||||
cancels := base.NewCancelations()
|
cancels := base.NewCancelations()
|
||||||
|
|
||||||
syncer := newSyncer(syncerParams{
|
syncer := newSyncer(syncerParams{
|
||||||
@@ -302,14 +409,23 @@ func NewServer(r RedisConnOpt, cfg Config) *Server {
|
|||||||
heartbeater := newHeartbeater(heartbeaterParams{
|
heartbeater := newHeartbeater(heartbeaterParams{
|
||||||
logger: logger,
|
logger: logger,
|
||||||
broker: rdb,
|
broker: rdb,
|
||||||
serverState: ss,
|
|
||||||
interval: 5 * time.Second,
|
interval: 5 * time.Second,
|
||||||
|
concurrency: n,
|
||||||
|
queues: queues,
|
||||||
|
strictPriority: cfg.StrictPriority,
|
||||||
|
state: srvState,
|
||||||
|
starting: starting,
|
||||||
|
finished: finished,
|
||||||
})
|
})
|
||||||
scheduler := newScheduler(schedulerParams{
|
delayedTaskCheckInterval := cfg.DelayedTaskCheckInterval
|
||||||
|
if delayedTaskCheckInterval == 0 {
|
||||||
|
delayedTaskCheckInterval = defaultDelayedTaskCheckInterval
|
||||||
|
}
|
||||||
|
forwarder := newForwarder(forwarderParams{
|
||||||
logger: logger,
|
logger: logger,
|
||||||
broker: rdb,
|
broker: rdb,
|
||||||
interval: 5 * time.Second,
|
queues: qnames,
|
||||||
queues: queues,
|
interval: delayedTaskCheckInterval,
|
||||||
})
|
})
|
||||||
subscriber := newSubscriber(subscriberParams{
|
subscriber := newSubscriber(subscriberParams{
|
||||||
logger: logger,
|
logger: logger,
|
||||||
@@ -319,22 +435,51 @@ func NewServer(r RedisConnOpt, cfg Config) *Server {
|
|||||||
processor := newProcessor(processorParams{
|
processor := newProcessor(processorParams{
|
||||||
logger: logger,
|
logger: logger,
|
||||||
broker: rdb,
|
broker: rdb,
|
||||||
ss: ss,
|
|
||||||
retryDelayFunc: delayFunc,
|
retryDelayFunc: delayFunc,
|
||||||
|
baseCtxFn: baseCtxFn,
|
||||||
|
isFailureFunc: isFailureFunc,
|
||||||
syncCh: syncCh,
|
syncCh: syncCh,
|
||||||
cancelations: cancels,
|
cancelations: cancels,
|
||||||
|
concurrency: n,
|
||||||
|
queues: queues,
|
||||||
|
strictPriority: cfg.StrictPriority,
|
||||||
errHandler: cfg.ErrorHandler,
|
errHandler: cfg.ErrorHandler,
|
||||||
shutdownTimeout: shutdownTimeout,
|
shutdownTimeout: shutdownTimeout,
|
||||||
|
starting: starting,
|
||||||
|
finished: finished,
|
||||||
})
|
})
|
||||||
return &Server{
|
recoverer := newRecoverer(recovererParams{
|
||||||
ss: ss,
|
|
||||||
logger: logger,
|
logger: logger,
|
||||||
broker: rdb,
|
broker: rdb,
|
||||||
scheduler: scheduler,
|
retryDelayFunc: delayFunc,
|
||||||
|
isFailureFunc: isFailureFunc,
|
||||||
|
queues: qnames,
|
||||||
|
interval: 1 * time.Minute,
|
||||||
|
})
|
||||||
|
healthchecker := newHealthChecker(healthcheckerParams{
|
||||||
|
logger: logger,
|
||||||
|
broker: rdb,
|
||||||
|
interval: healthcheckInterval,
|
||||||
|
healthcheckFunc: cfg.HealthCheckFunc,
|
||||||
|
})
|
||||||
|
janitor := newJanitor(janitorParams{
|
||||||
|
logger: logger,
|
||||||
|
broker: rdb,
|
||||||
|
queues: qnames,
|
||||||
|
interval: 8 * time.Second,
|
||||||
|
})
|
||||||
|
return &Server{
|
||||||
|
logger: logger,
|
||||||
|
broker: rdb,
|
||||||
|
state: srvState,
|
||||||
|
forwarder: forwarder,
|
||||||
processor: processor,
|
processor: processor,
|
||||||
syncer: syncer,
|
syncer: syncer,
|
||||||
heartbeater: heartbeater,
|
heartbeater: heartbeater,
|
||||||
subscriber: subscriber,
|
subscriber: subscriber,
|
||||||
|
recoverer: recoverer,
|
||||||
|
healthchecker: healthchecker,
|
||||||
|
janitor: janitor,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -343,8 +488,13 @@ func NewServer(r RedisConnOpt, cfg Config) *Server {
|
|||||||
// ProcessTask should return nil if the processing of a task
|
// ProcessTask should return nil if the processing of a task
|
||||||
// is successful.
|
// is successful.
|
||||||
//
|
//
|
||||||
// If ProcessTask return a non-nil error or panics, the task
|
// If ProcessTask returns a non-nil error or panics, the task
|
||||||
// will be retried after delay.
|
// will be retried after delay if retry-count is remaining,
|
||||||
|
// otherwise the task will be archived.
|
||||||
|
//
|
||||||
|
// One exception to this rule is when ProcessTask returns a SkipRetry error.
|
||||||
|
// If the returned error is SkipRetry or an error wraps SkipRetry, retry is
|
||||||
|
// skipped and the task will be immediately archived instead.
|
||||||
type Handler interface {
|
type Handler interface {
|
||||||
ProcessTask(context.Context, *Task) error
|
ProcessTask(context.Context, *Task) error
|
||||||
}
|
}
|
||||||
@@ -360,89 +510,121 @@ func (fn HandlerFunc) ProcessTask(ctx context.Context, task *Task) error {
|
|||||||
return fn(ctx, task)
|
return fn(ctx, task)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ErrServerStopped indicates that the operation is now illegal because of the server being stopped.
|
// ErrServerClosed indicates that the operation is now illegal because of the server has been shutdown.
|
||||||
var ErrServerStopped = errors.New("asynq: the server has been stopped")
|
var ErrServerClosed = errors.New("asynq: Server closed")
|
||||||
|
|
||||||
// Run starts the background-task processing and blocks until
|
// Run starts the task processing and blocks until
|
||||||
// an os signal to exit the program is received. Once it receives
|
// an os signal to exit the program is received. Once it receives
|
||||||
// a signal, it gracefully shuts down all active workers and other
|
// a signal, it gracefully shuts down all active workers and other
|
||||||
// goroutines to process the tasks.
|
// goroutines to process the tasks.
|
||||||
//
|
//
|
||||||
// Run returns any error encountered during server startup time.
|
// Run returns any error encountered at server startup time.
|
||||||
// If the server has already been stopped, ErrServerStopped is returned.
|
// If the server has already been shutdown, ErrServerClosed is returned.
|
||||||
func (srv *Server) Run(handler Handler) error {
|
func (srv *Server) Run(handler Handler) error {
|
||||||
if err := srv.Start(handler); err != nil {
|
if err := srv.Start(handler); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
srv.waitForSignals()
|
srv.waitForSignals()
|
||||||
srv.Stop()
|
srv.Shutdown()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start starts the worker server. Once the server has started,
|
// Start starts the worker server. Once the server has started,
|
||||||
// it pulls tasks off queues and starts a worker goroutine for each task.
|
// it pulls tasks off queues and starts a worker goroutine for each task
|
||||||
|
// and then call Handler to process it.
|
||||||
// Tasks are processed concurrently by the workers up to the number of
|
// Tasks are processed concurrently by the workers up to the number of
|
||||||
// concurrency specified at the initialization time.
|
// concurrency specified in Config.Concurrency.
|
||||||
//
|
//
|
||||||
// Start returns any error encountered during server startup time.
|
// Start returns any error encountered at server startup time.
|
||||||
// If the server has already been stopped, ErrServerStopped is returned.
|
// If the server has already been shutdown, ErrServerClosed is returned.
|
||||||
func (srv *Server) Start(handler Handler) error {
|
func (srv *Server) Start(handler Handler) error {
|
||||||
if handler == nil {
|
if handler == nil {
|
||||||
return fmt.Errorf("asynq: server cannot run with nil handler")
|
return fmt.Errorf("asynq: server cannot run with nil handler")
|
||||||
}
|
}
|
||||||
switch srv.ss.Status() {
|
|
||||||
case base.StatusRunning:
|
|
||||||
return fmt.Errorf("asynq: the server is already running")
|
|
||||||
case base.StatusStopped:
|
|
||||||
return ErrServerStopped
|
|
||||||
}
|
|
||||||
srv.ss.SetStatus(base.StatusRunning)
|
|
||||||
srv.processor.handler = handler
|
srv.processor.handler = handler
|
||||||
|
|
||||||
|
if err := srv.start(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
srv.logger.Info("Starting processing")
|
srv.logger.Info("Starting processing")
|
||||||
|
|
||||||
srv.heartbeater.start(&srv.wg)
|
srv.heartbeater.start(&srv.wg)
|
||||||
|
srv.healthchecker.start(&srv.wg)
|
||||||
srv.subscriber.start(&srv.wg)
|
srv.subscriber.start(&srv.wg)
|
||||||
srv.syncer.start(&srv.wg)
|
srv.syncer.start(&srv.wg)
|
||||||
srv.scheduler.start(&srv.wg)
|
srv.recoverer.start(&srv.wg)
|
||||||
|
srv.forwarder.start(&srv.wg)
|
||||||
srv.processor.start(&srv.wg)
|
srv.processor.start(&srv.wg)
|
||||||
|
srv.janitor.start(&srv.wg)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stop stops the worker server.
|
// Checks server state and returns an error if pre-condition is not met.
|
||||||
|
// Otherwise it sets the server state to active.
|
||||||
|
func (srv *Server) start() error {
|
||||||
|
srv.state.mu.Lock()
|
||||||
|
defer srv.state.mu.Unlock()
|
||||||
|
switch srv.state.value {
|
||||||
|
case srvStateActive:
|
||||||
|
return fmt.Errorf("asynq: the server is already running")
|
||||||
|
case srvStateStopped:
|
||||||
|
return fmt.Errorf("asynq: the server is in the stopped state. Waiting for shutdown.")
|
||||||
|
case srvStateClosed:
|
||||||
|
return ErrServerClosed
|
||||||
|
}
|
||||||
|
srv.state.value = srvStateActive
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Shutdown gracefully shuts down the server.
|
||||||
// It gracefully closes all active workers. The server will wait for
|
// It gracefully closes all active workers. The server will wait for
|
||||||
// active workers to finish processing tasks for duration specified in Config.ShutdownTimeout.
|
// active workers to finish processing tasks for duration specified in Config.ShutdownTimeout.
|
||||||
// If worker didn't finish processing a task during the timeout, the task will be pushed back to Redis.
|
// If worker didn't finish processing a task during the timeout, the task will be pushed back to Redis.
|
||||||
func (srv *Server) Stop() {
|
func (srv *Server) Shutdown() {
|
||||||
switch srv.ss.Status() {
|
srv.state.mu.Lock()
|
||||||
case base.StatusIdle, base.StatusStopped:
|
if srv.state.value == srvStateNew || srv.state.value == srvStateClosed {
|
||||||
|
srv.state.mu.Unlock()
|
||||||
// server is not running, do nothing and return.
|
// server is not running, do nothing and return.
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
srv.state.value = srvStateClosed
|
||||||
|
srv.state.mu.Unlock()
|
||||||
|
|
||||||
srv.logger.Info("Starting graceful shutdown")
|
srv.logger.Info("Starting graceful shutdown")
|
||||||
// Note: The order of termination is important.
|
// Note: The order of shutdown is important.
|
||||||
// Sender goroutines should be terminated before the receiver goroutines.
|
// Sender goroutines should be terminated before the receiver goroutines.
|
||||||
// processor -> syncer (via syncCh)
|
// processor -> syncer (via syncCh)
|
||||||
srv.scheduler.terminate()
|
// processor -> heartbeater (via starting, finished channels)
|
||||||
srv.processor.terminate()
|
srv.forwarder.shutdown()
|
||||||
srv.syncer.terminate()
|
srv.processor.shutdown()
|
||||||
srv.subscriber.terminate()
|
srv.recoverer.shutdown()
|
||||||
srv.heartbeater.terminate()
|
srv.syncer.shutdown()
|
||||||
|
srv.subscriber.shutdown()
|
||||||
|
srv.janitor.shutdown()
|
||||||
|
srv.healthchecker.shutdown()
|
||||||
|
srv.heartbeater.shutdown()
|
||||||
srv.wg.Wait()
|
srv.wg.Wait()
|
||||||
|
|
||||||
srv.broker.Close()
|
srv.broker.Close()
|
||||||
srv.ss.SetStatus(base.StatusStopped)
|
|
||||||
|
|
||||||
srv.logger.Info("Exiting")
|
srv.logger.Info("Exiting")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Quiet signals the server to stop pulling new tasks off queues.
|
// Stop signals the server to stop pulling new tasks off queues.
|
||||||
// Quiet should be used before stopping the server.
|
// Stop can be used before shutting down the server to ensure that all
|
||||||
func (srv *Server) Quiet() {
|
// currently active tasks are processed before server shutdown.
|
||||||
|
//
|
||||||
|
// Stop does not shutdown the server, make sure to call Shutdown before exit.
|
||||||
|
func (srv *Server) Stop() {
|
||||||
|
srv.state.mu.Lock()
|
||||||
|
if srv.state.value != srvStateActive {
|
||||||
|
// Invalid calll to Stop, server can only go from Active state to Stopped state.
|
||||||
|
srv.state.mu.Unlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
srv.state.value = srvStateStopped
|
||||||
|
srv.state.mu.Unlock()
|
||||||
|
|
||||||
srv.logger.Info("Stopping processor")
|
srv.logger.Info("Stopping processor")
|
||||||
srv.processor.stop()
|
srv.processor.stop()
|
||||||
srv.ss.SetStatus(base.StatusQuiet)
|
|
||||||
srv.logger.Info("Processor stopped")
|
srv.logger.Info("Processor stopped")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -11,6 +11,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/hibiken/asynq/internal/asynqtest"
|
||||||
"github.com/hibiken/asynq/internal/rdb"
|
"github.com/hibiken/asynq/internal/rdb"
|
||||||
"github.com/hibiken/asynq/internal/testbroker"
|
"github.com/hibiken/asynq/internal/testbroker"
|
||||||
"go.uber.org/goleak"
|
"go.uber.org/goleak"
|
||||||
@@ -18,15 +19,13 @@ import (
|
|||||||
|
|
||||||
func TestServer(t *testing.T) {
|
func TestServer(t *testing.T) {
|
||||||
// https://github.com/go-redis/redis/issues/1029
|
// https://github.com/go-redis/redis/issues/1029
|
||||||
ignoreOpt := goleak.IgnoreTopFunction("github.com/go-redis/redis/v7/internal/pool.(*ConnPool).reaper")
|
ignoreOpt := goleak.IgnoreTopFunction("github.com/go-redis/redis/v8/internal/pool.(*ConnPool).reaper")
|
||||||
defer goleak.VerifyNoLeaks(t, ignoreOpt)
|
defer goleak.VerifyNoLeaks(t, ignoreOpt)
|
||||||
|
|
||||||
r := &RedisClientOpt{
|
redisConnOpt := getRedisConnOpt(t)
|
||||||
Addr: "localhost:6379",
|
c := NewClient(redisConnOpt)
|
||||||
DB: 15,
|
defer c.Close()
|
||||||
}
|
srv := NewServer(redisConnOpt, Config{
|
||||||
c := NewClient(r)
|
|
||||||
srv := NewServer(r, Config{
|
|
||||||
Concurrency: 10,
|
Concurrency: 10,
|
||||||
LogLevel: testLogLevel,
|
LogLevel: testLogLevel,
|
||||||
})
|
})
|
||||||
@@ -41,22 +40,22 @@ func TestServer(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = c.Enqueue(NewTask("send_email", map[string]interface{}{"recipient_id": 123}))
|
_, err = c.Enqueue(NewTask("send_email", asynqtest.JSON(map[string]interface{}{"recipient_id": 123})))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("could not enqueue a task: %v", err)
|
t.Errorf("could not enqueue a task: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = c.EnqueueAt(time.Now().Add(time.Hour), NewTask("send_email", map[string]interface{}{"recipient_id": 456}))
|
_, err = c.Enqueue(NewTask("send_email", asynqtest.JSON(map[string]interface{}{"recipient_id": 456})), ProcessIn(1*time.Hour))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("could not enqueue a task: %v", err)
|
t.Errorf("could not enqueue a task: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
srv.Stop()
|
srv.Shutdown()
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestServerRun(t *testing.T) {
|
func TestServerRun(t *testing.T) {
|
||||||
// https://github.com/go-redis/redis/issues/1029
|
// https://github.com/go-redis/redis/issues/1029
|
||||||
ignoreOpt := goleak.IgnoreTopFunction("github.com/go-redis/redis/v7/internal/pool.(*ConnPool).reaper")
|
ignoreOpt := goleak.IgnoreTopFunction("github.com/go-redis/redis/v8/internal/pool.(*ConnPool).reaper")
|
||||||
defer goleak.VerifyNoLeaks(t, ignoreOpt)
|
defer goleak.VerifyNoLeaks(t, ignoreOpt)
|
||||||
|
|
||||||
srv := NewServer(RedisClientOpt{Addr: ":6379"}, Config{LogLevel: testLogLevel})
|
srv := NewServer(RedisClientOpt{Addr: ":6379"}, Config{LogLevel: testLogLevel})
|
||||||
@@ -72,7 +71,7 @@ func TestServerRun(t *testing.T) {
|
|||||||
go func() {
|
go func() {
|
||||||
select {
|
select {
|
||||||
case <-time.After(10 * time.Second):
|
case <-time.After(10 * time.Second):
|
||||||
t.Fatal("server did not stop after receiving TERM signal")
|
panic("server did not stop after receiving TERM signal")
|
||||||
case <-done:
|
case <-done:
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
@@ -83,16 +82,16 @@ func TestServerRun(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestServerErrServerStopped(t *testing.T) {
|
func TestServerErrServerClosed(t *testing.T) {
|
||||||
srv := NewServer(RedisClientOpt{Addr: ":6379"}, Config{LogLevel: testLogLevel})
|
srv := NewServer(RedisClientOpt{Addr: ":6379"}, Config{LogLevel: testLogLevel})
|
||||||
handler := NewServeMux()
|
handler := NewServeMux()
|
||||||
if err := srv.Start(handler); err != nil {
|
if err := srv.Start(handler); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
srv.Stop()
|
srv.Shutdown()
|
||||||
err := srv.Start(handler)
|
err := srv.Start(handler)
|
||||||
if err != ErrServerStopped {
|
if err != ErrServerClosed {
|
||||||
t.Errorf("Restarting server: (*Server).Start(handler) = %v, want ErrServerStopped error", err)
|
t.Errorf("Restarting server: (*Server).Start(handler) = %v, want ErrServerClosed error", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -101,7 +100,7 @@ func TestServerErrNilHandler(t *testing.T) {
|
|||||||
err := srv.Start(nil)
|
err := srv.Start(nil)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Error("Starting server with nil handler: (*Server).Start(nil) did not return error")
|
t.Error("Starting server with nil handler: (*Server).Start(nil) did not return error")
|
||||||
srv.Stop()
|
srv.Shutdown()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -115,7 +114,7 @@ func TestServerErrServerRunning(t *testing.T) {
|
|||||||
if err == nil {
|
if err == nil {
|
||||||
t.Error("Calling (*Server).Start(handler) on already running server did not return error")
|
t.Error("Calling (*Server).Start(handler) on already running server did not return error")
|
||||||
}
|
}
|
||||||
srv.Stop()
|
srv.Shutdown()
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestServerWithRedisDown(t *testing.T) {
|
func TestServerWithRedisDown(t *testing.T) {
|
||||||
@@ -129,7 +128,7 @@ func TestServerWithRedisDown(t *testing.T) {
|
|||||||
testBroker := testbroker.NewTestBroker(r)
|
testBroker := testbroker.NewTestBroker(r)
|
||||||
srv := NewServer(RedisClientOpt{Addr: ":6379"}, Config{LogLevel: testLogLevel})
|
srv := NewServer(RedisClientOpt{Addr: ":6379"}, Config{LogLevel: testLogLevel})
|
||||||
srv.broker = testBroker
|
srv.broker = testBroker
|
||||||
srv.scheduler.broker = testBroker
|
srv.forwarder.broker = testBroker
|
||||||
srv.heartbeater.broker = testBroker
|
srv.heartbeater.broker = testBroker
|
||||||
srv.processor.broker = testBroker
|
srv.processor.broker = testBroker
|
||||||
srv.subscriber.broker = testBroker
|
srv.subscriber.broker = testBroker
|
||||||
@@ -147,7 +146,7 @@ func TestServerWithRedisDown(t *testing.T) {
|
|||||||
|
|
||||||
time.Sleep(3 * time.Second)
|
time.Sleep(3 * time.Second)
|
||||||
|
|
||||||
srv.Stop()
|
srv.Shutdown()
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestServerWithFlakyBroker(t *testing.T) {
|
func TestServerWithFlakyBroker(t *testing.T) {
|
||||||
@@ -159,19 +158,20 @@ func TestServerWithFlakyBroker(t *testing.T) {
|
|||||||
}()
|
}()
|
||||||
r := rdb.NewRDB(setup(t))
|
r := rdb.NewRDB(setup(t))
|
||||||
testBroker := testbroker.NewTestBroker(r)
|
testBroker := testbroker.NewTestBroker(r)
|
||||||
srv := NewServer(RedisClientOpt{Addr: redisAddr, DB: redisDB}, Config{LogLevel: testLogLevel})
|
redisConnOpt := getRedisConnOpt(t)
|
||||||
|
srv := NewServer(redisConnOpt, Config{LogLevel: testLogLevel})
|
||||||
srv.broker = testBroker
|
srv.broker = testBroker
|
||||||
srv.scheduler.broker = testBroker
|
srv.forwarder.broker = testBroker
|
||||||
srv.heartbeater.broker = testBroker
|
srv.heartbeater.broker = testBroker
|
||||||
srv.processor.broker = testBroker
|
srv.processor.broker = testBroker
|
||||||
srv.subscriber.broker = testBroker
|
srv.subscriber.broker = testBroker
|
||||||
|
|
||||||
c := NewClient(RedisClientOpt{Addr: redisAddr, DB: redisDB})
|
c := NewClient(redisConnOpt)
|
||||||
|
|
||||||
h := func(ctx context.Context, task *Task) error {
|
h := func(ctx context.Context, task *Task) error {
|
||||||
// force task retry.
|
// force task retry.
|
||||||
if task.Type == "bad_task" {
|
if task.Type() == "bad_task" {
|
||||||
return fmt.Errorf("could not process %q", task.Type)
|
return fmt.Errorf("could not process %q", task.Type())
|
||||||
}
|
}
|
||||||
time.Sleep(2 * time.Second)
|
time.Sleep(2 * time.Second)
|
||||||
return nil
|
return nil
|
||||||
@@ -183,15 +183,15 @@ func TestServerWithFlakyBroker(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < 10; i++ {
|
for i := 0; i < 10; i++ {
|
||||||
err := c.Enqueue(NewTask("enqueued", nil), MaxRetry(i))
|
_, err := c.Enqueue(NewTask("enqueued", nil), MaxRetry(i))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
err = c.Enqueue(NewTask("bad_task", nil))
|
_, err = c.Enqueue(NewTask("bad_task", nil))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
err = c.EnqueueIn(time.Duration(i)*time.Second, NewTask("scheduled", nil))
|
_, err = c.Enqueue(NewTask("scheduled", nil), ProcessIn(time.Duration(i)*time.Second))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -207,7 +207,7 @@ func TestServerWithFlakyBroker(t *testing.T) {
|
|||||||
|
|
||||||
time.Sleep(3 * time.Second)
|
time.Sleep(3 * time.Second)
|
||||||
|
|
||||||
srv.Stop()
|
srv.Shutdown()
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestLogLevel(t *testing.T) {
|
func TestLogLevel(t *testing.T) {
|
||||||
|
|||||||
@@ -22,9 +22,16 @@ func (srv *Server) waitForSignals() {
|
|||||||
for {
|
for {
|
||||||
sig := <-sigs
|
sig := <-sigs
|
||||||
if sig == unix.SIGTSTP {
|
if sig == unix.SIGTSTP {
|
||||||
srv.Quiet()
|
srv.Stop()
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *Scheduler) waitForSignals() {
|
||||||
|
s.logger.Info("Send signal TERM or INT to stop the scheduler")
|
||||||
|
sigs := make(chan os.Signal, 1)
|
||||||
|
signal.Notify(sigs, unix.SIGTERM, unix.SIGINT)
|
||||||
|
<-sigs
|
||||||
|
}
|
||||||
|
|||||||
@@ -20,3 +20,10 @@ func (srv *Server) waitForSignals() {
|
|||||||
signal.Notify(sigs, windows.SIGTERM, windows.SIGINT)
|
signal.Notify(sigs, windows.SIGTERM, windows.SIGINT)
|
||||||
<-sigs
|
<-sigs
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *Scheduler) waitForSignals() {
|
||||||
|
s.logger.Info("Send signal TERM or INT to stop the scheduler")
|
||||||
|
sigs := make(chan os.Signal, 1)
|
||||||
|
signal.Notify(sigs, windows.SIGTERM, windows.SIGINT)
|
||||||
|
<-sigs
|
||||||
|
}
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-redis/redis/v7"
|
"github.com/go-redis/redis/v8"
|
||||||
"github.com/hibiken/asynq/internal/base"
|
"github.com/hibiken/asynq/internal/base"
|
||||||
"github.com/hibiken/asynq/internal/log"
|
"github.com/hibiken/asynq/internal/log"
|
||||||
)
|
)
|
||||||
@@ -20,7 +20,7 @@ type subscriber struct {
|
|||||||
// channel to communicate back to the long running "subscriber" goroutine.
|
// channel to communicate back to the long running "subscriber" goroutine.
|
||||||
done chan struct{}
|
done chan struct{}
|
||||||
|
|
||||||
// cancelations hold cancel functions for all in-progress tasks.
|
// cancelations hold cancel functions for all active tasks.
|
||||||
cancelations *base.Cancelations
|
cancelations *base.Cancelations
|
||||||
|
|
||||||
// time to wait before retrying to connect to redis.
|
// time to wait before retrying to connect to redis.
|
||||||
@@ -43,7 +43,7 @@ func newSubscriber(params subscriberParams) *subscriber {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *subscriber) terminate() {
|
func (s *subscriber) shutdown() {
|
||||||
s.logger.Debug("Subscriber shutting down...")
|
s.logger.Debug("Subscriber shutting down...")
|
||||||
// Signal the subscriber goroutine to stop.
|
// Signal the subscriber goroutine to stop.
|
||||||
s.done <- struct{}{}
|
s.done <- struct{}{}
|
||||||
|
|||||||
@@ -16,6 +16,7 @@ import (
|
|||||||
|
|
||||||
func TestSubscriber(t *testing.T) {
|
func TestSubscriber(t *testing.T) {
|
||||||
r := setup(t)
|
r := setup(t)
|
||||||
|
defer r.Close()
|
||||||
rdbClient := rdb.NewRDB(r)
|
rdbClient := rdb.NewRDB(r)
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
@@ -45,7 +46,7 @@ func TestSubscriber(t *testing.T) {
|
|||||||
})
|
})
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
subscriber.start(&wg)
|
subscriber.start(&wg)
|
||||||
defer subscriber.terminate()
|
defer subscriber.shutdown()
|
||||||
|
|
||||||
// wait for subscriber to establish connection to pubsub channel
|
// wait for subscriber to establish connection to pubsub channel
|
||||||
time.Sleep(time.Second)
|
time.Sleep(time.Second)
|
||||||
@@ -76,6 +77,7 @@ func TestSubscriberWithRedisDown(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
r := rdb.NewRDB(setup(t))
|
r := rdb.NewRDB(setup(t))
|
||||||
|
defer r.Close()
|
||||||
testBroker := testbroker.NewTestBroker(r)
|
testBroker := testbroker.NewTestBroker(r)
|
||||||
|
|
||||||
cancelations := base.NewCancelations()
|
cancelations := base.NewCancelations()
|
||||||
@@ -89,7 +91,7 @@ func TestSubscriberWithRedisDown(t *testing.T) {
|
|||||||
testBroker.Sleep() // simulate a situation where subscriber cannot connect to redis.
|
testBroker.Sleep() // simulate a situation where subscriber cannot connect to redis.
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
subscriber.start(&wg)
|
subscriber.start(&wg)
|
||||||
defer subscriber.terminate()
|
defer subscriber.shutdown()
|
||||||
|
|
||||||
time.Sleep(2 * time.Second) // subscriber should wait and retry connecting to redis.
|
time.Sleep(2 * time.Second) // subscriber should wait and retry connecting to redis.
|
||||||
|
|
||||||
|
|||||||
@@ -28,6 +28,7 @@ type syncer struct {
|
|||||||
type syncRequest struct {
|
type syncRequest struct {
|
||||||
fn func() error // sync operation
|
fn func() error // sync operation
|
||||||
errMsg string // error message
|
errMsg string // error message
|
||||||
|
deadline time.Time // request should be dropped if deadline has been exceeded
|
||||||
}
|
}
|
||||||
|
|
||||||
type syncerParams struct {
|
type syncerParams struct {
|
||||||
@@ -45,7 +46,7 @@ func newSyncer(params syncerParams) *syncer {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *syncer) terminate() {
|
func (s *syncer) shutdown() {
|
||||||
s.logger.Debug("Syncer shutting down...")
|
s.logger.Debug("Syncer shutting down...")
|
||||||
// Signal the syncer goroutine to stop.
|
// Signal the syncer goroutine to stop.
|
||||||
s.done <- struct{}{}
|
s.done <- struct{}{}
|
||||||
@@ -72,6 +73,9 @@ func (s *syncer) start(wg *sync.WaitGroup) {
|
|||||||
case <-time.After(s.interval):
|
case <-time.After(s.interval):
|
||||||
var temp []*syncRequest
|
var temp []*syncRequest
|
||||||
for _, req := range requests {
|
for _, req := range requests {
|
||||||
|
if req.deadline.Before(time.Now()) {
|
||||||
|
continue // drop stale request
|
||||||
|
}
|
||||||
if err := req.fn(); err != nil {
|
if err := req.fn(); err != nil {
|
||||||
temp = append(temp, req)
|
temp = append(temp, req)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,6 +5,7 @@
|
|||||||
package asynq
|
package asynq
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
@@ -22,8 +23,9 @@ func TestSyncer(t *testing.T) {
|
|||||||
h.NewTaskMessage("gen_thumbnail", nil),
|
h.NewTaskMessage("gen_thumbnail", nil),
|
||||||
}
|
}
|
||||||
r := setup(t)
|
r := setup(t)
|
||||||
|
defer r.Close()
|
||||||
rdbClient := rdb.NewRDB(r)
|
rdbClient := rdb.NewRDB(r)
|
||||||
h.SeedInProgressQueue(t, r, inProgress)
|
h.SeedActiveQueue(t, r, inProgress, base.DefaultQueueName)
|
||||||
|
|
||||||
const interval = time.Second
|
const interval = time.Second
|
||||||
syncRequestCh := make(chan *syncRequest)
|
syncRequestCh := make(chan *syncRequest)
|
||||||
@@ -34,22 +36,23 @@ func TestSyncer(t *testing.T) {
|
|||||||
})
|
})
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
syncer.start(&wg)
|
syncer.start(&wg)
|
||||||
defer syncer.terminate()
|
defer syncer.shutdown()
|
||||||
|
|
||||||
for _, msg := range inProgress {
|
for _, msg := range inProgress {
|
||||||
m := msg
|
m := msg
|
||||||
syncRequestCh <- &syncRequest{
|
syncRequestCh <- &syncRequest{
|
||||||
fn: func() error {
|
fn: func() error {
|
||||||
return rdbClient.Done(m)
|
return rdbClient.Done(context.Background(), m)
|
||||||
},
|
},
|
||||||
|
deadline: time.Now().Add(5 * time.Minute),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
time.Sleep(2 * interval) // ensure that syncer runs at least once
|
time.Sleep(2 * interval) // ensure that syncer runs at least once
|
||||||
|
|
||||||
gotInProgress := h.GetInProgressMessages(t, r)
|
gotActive := h.GetActiveMessages(t, r, base.DefaultQueueName)
|
||||||
if l := len(gotInProgress); l != 0 {
|
if l := len(gotActive); l != 0 {
|
||||||
t.Errorf("%q has length %d; want 0", base.InProgressQueue, l)
|
t.Errorf("%q has length %d; want 0", base.ActiveKey(base.DefaultQueueName), l)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -64,7 +67,7 @@ func TestSyncerRetry(t *testing.T) {
|
|||||||
|
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
syncer.start(&wg)
|
syncer.start(&wg)
|
||||||
defer syncer.terminate()
|
defer syncer.shutdown()
|
||||||
|
|
||||||
var (
|
var (
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
@@ -87,6 +90,7 @@ func TestSyncerRetry(t *testing.T) {
|
|||||||
syncRequestCh <- &syncRequest{
|
syncRequestCh <- &syncRequest{
|
||||||
fn: requestFunc,
|
fn: requestFunc,
|
||||||
errMsg: "error",
|
errMsg: "error",
|
||||||
|
deadline: time.Now().Add(5 * time.Minute),
|
||||||
}
|
}
|
||||||
|
|
||||||
// allow syncer to retry
|
// allow syncer to retry
|
||||||
@@ -98,3 +102,41 @@ func TestSyncerRetry(t *testing.T) {
|
|||||||
}
|
}
|
||||||
mu.Unlock()
|
mu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestSyncerDropsStaleRequests(t *testing.T) {
|
||||||
|
const interval = time.Second
|
||||||
|
syncRequestCh := make(chan *syncRequest)
|
||||||
|
syncer := newSyncer(syncerParams{
|
||||||
|
logger: testLogger,
|
||||||
|
requestsCh: syncRequestCh,
|
||||||
|
interval: interval,
|
||||||
|
})
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
syncer.start(&wg)
|
||||||
|
|
||||||
|
var (
|
||||||
|
mu sync.Mutex
|
||||||
|
n int // number of times request has been processed
|
||||||
|
)
|
||||||
|
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
syncRequestCh <- &syncRequest{
|
||||||
|
fn: func() error {
|
||||||
|
mu.Lock()
|
||||||
|
n++
|
||||||
|
mu.Unlock()
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
deadline: time.Now().Add(time.Duration(-i) * time.Second), // already exceeded deadline
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
time.Sleep(2 * interval) // ensure that syncer runs at least once
|
||||||
|
syncer.shutdown()
|
||||||
|
|
||||||
|
mu.Lock()
|
||||||
|
if n != 0 {
|
||||||
|
t.Errorf("requests has been processed %d times, want 0", n)
|
||||||
|
}
|
||||||
|
mu.Unlock()
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,19 +1,11 @@
|
|||||||
# Asynq CLI
|
# Asynq CLI
|
||||||
|
|
||||||
Asynq CLI is a command line tool to monitor the tasks managed by `asynq` package.
|
Asynq CLI is a command line tool to monitor the queues and tasks managed by `asynq` package.
|
||||||
|
|
||||||
## Table of Contents
|
## Table of Contents
|
||||||
|
|
||||||
- [Installation](#installation)
|
- [Installation](#installation)
|
||||||
- [Quick Start](#quick-start)
|
- [Usage](#usage)
|
||||||
- [Stats](#stats)
|
|
||||||
- [History](#history)
|
|
||||||
- [Servers](#servers)
|
|
||||||
- [List](#list)
|
|
||||||
- [Enqueue](#enqueue)
|
|
||||||
- [Delete](#delete)
|
|
||||||
- [Kill](#kill)
|
|
||||||
- [Cancel](#cancel)
|
|
||||||
- [Config File](#config-file)
|
- [Config File](#config-file)
|
||||||
|
|
||||||
## Installation
|
## Installation
|
||||||
@@ -24,133 +16,41 @@ In order to use the tool, compile it using the following command:
|
|||||||
|
|
||||||
This will create the asynq executable under your `$GOPATH/bin` directory.
|
This will create the asynq executable under your `$GOPATH/bin` directory.
|
||||||
|
|
||||||
## Quickstart
|
## Usage
|
||||||
|
|
||||||
The tool has a few commands to inspect the state of tasks and queues.
|
### Commands
|
||||||
|
|
||||||
Run `asynq help` to see all the available commands.
|
To view details on any command, use `asynq help <command> <subcommand>`.
|
||||||
|
|
||||||
|
- `asynq stats`
|
||||||
|
- `asynq queue [ls inspect history rm pause unpause]`
|
||||||
|
- `asynq task [ls cancel delete archive run delete-all archive-all run-all]`
|
||||||
|
- `asynq server [ls]`
|
||||||
|
|
||||||
|
### Global flags
|
||||||
|
|
||||||
Asynq CLI needs to connect to a redis-server to inspect the state of queues and tasks. Use flags to specify the options to connect to the redis-server used by your application.
|
Asynq CLI needs to connect to a redis-server to inspect the state of queues and tasks. Use flags to specify the options to connect to the redis-server used by your application.
|
||||||
|
To connect to a redis cluster, pass `--cluster` and `--cluster_addrs` flags.
|
||||||
|
|
||||||
By default, CLI will try to connect to a redis server running at `localhost:6379`.
|
By default, CLI will try to connect to a redis server running at `localhost:6379`.
|
||||||
|
|
||||||
### Stats
|
```
|
||||||
|
--config string config file to set flag defaut values (default is $HOME/.asynq.yaml)
|
||||||
|
-n, --db int redis database number (default is 0)
|
||||||
|
-h, --help help for asynq
|
||||||
|
-p, --password string password to use when connecting to redis server
|
||||||
|
-u, --uri string redis server URI (default "127.0.0.1:6379")
|
||||||
|
|
||||||
Stats command gives the overview of the current state of tasks and queues. You can run it in conjunction with `watch` command to repeatedly run `stats`.
|
--cluster connect to redis cluster
|
||||||
|
--cluster_addrs string list of comma-separated redis server addresses
|
||||||
Example:
|
```
|
||||||
|
|
||||||
watch -n 3 asynq stats
|
|
||||||
|
|
||||||
This will run `asynq stats` command every 3 seconds.
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
### History
|
|
||||||
|
|
||||||
History command shows the number of processed and failed tasks from the last x days.
|
|
||||||
|
|
||||||
By default, it shows the stats from the last 10 days. Use `--days` to specify the number of days.
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
asynq history --days=30
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
### Servers
|
|
||||||
|
|
||||||
Servers command shows the list of running worker servers pulling tasks from the given redis instance.
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
asynq servers
|
|
||||||
|
|
||||||
### List
|
|
||||||
|
|
||||||
List command shows all tasks in the specified state in a table format
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
asynq ls retry
|
|
||||||
asynq ls scheduled
|
|
||||||
asynq ls dead
|
|
||||||
asynq ls enqueued:default
|
|
||||||
asynq ls inprogress
|
|
||||||
|
|
||||||
### Enqueue
|
|
||||||
|
|
||||||
There are two commands to enqueue tasks.
|
|
||||||
|
|
||||||
Command `enq` takes a task ID and moves the task to **Enqueued** state. You can obtain the task ID by running `ls` command.
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
asynq enq d:1575732274:bnogo8gt6toe23vhef0g
|
|
||||||
|
|
||||||
Command `enqall` moves all tasks to **Enqueued** state from the specified state.
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
asynq enqall retry
|
|
||||||
|
|
||||||
Running the above command will move all **Retry** tasks to **Enqueued** state.
|
|
||||||
|
|
||||||
### Delete
|
|
||||||
|
|
||||||
There are two commands for task deletion.
|
|
||||||
|
|
||||||
Command `del` takes a task ID and deletes the task. You can obtain the task ID by running `ls` command.
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
asynq del r:1575732274:bnogo8gt6toe23vhef0g
|
|
||||||
|
|
||||||
Command `delall` deletes all tasks which are in the specified state.
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
asynq delall retry
|
|
||||||
|
|
||||||
Running the above command will delete all **Retry** tasks.
|
|
||||||
|
|
||||||
### Kill
|
|
||||||
|
|
||||||
There are two commands to kill (i.e. move to dead state) tasks.
|
|
||||||
|
|
||||||
Command `kill` takes a task ID and kills the task. You can obtain the task ID by running `ls` command.
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
asynq kill r:1575732274:bnogo8gt6toe23vhef0g
|
|
||||||
|
|
||||||
Command `killall` kills all tasks which are in the specified state.
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
asynq killall retry
|
|
||||||
|
|
||||||
Running the above command will move all **Retry** tasks to **Dead** state.
|
|
||||||
|
|
||||||
### Cancel
|
|
||||||
|
|
||||||
Command `cancel` takes a task ID and sends a cancelation signal to the goroutine processing the specified task.
|
|
||||||
You can obtain the task ID by running `ls` command.
|
|
||||||
|
|
||||||
The task should be in "in-progress" state.
|
|
||||||
Handler implementation needs to be context aware in order to actually stop processing.
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
asynq cancel bnogo8gt6toe23vhef0g
|
|
||||||
|
|
||||||
## Config File
|
## Config File
|
||||||
|
|
||||||
You can use a config file to set default values for the flags.
|
You can use a config file to set default values for the flags.
|
||||||
This is useful, for example when you have to connect to a remote redis server.
|
|
||||||
|
|
||||||
By default, `asynq` will try to read config file located in
|
By default, `asynq` will try to read config file located in
|
||||||
`$HOME/.asynq.(yaml|json)`. You can specify the file location via `--config` flag.
|
`$HOME/.asynq.(yml|json)`. You can specify the file location via `--config` flag.
|
||||||
|
|
||||||
Config file example:
|
Config file example:
|
||||||
|
|
||||||
|
|||||||
@@ -1,53 +0,0 @@
|
|||||||
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
|
||||||
// Use of this source code is governed by a MIT license
|
|
||||||
// that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package cmd
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/go-redis/redis/v7"
|
|
||||||
"github.com/hibiken/asynq/internal/rdb"
|
|
||||||
"github.com/spf13/cobra"
|
|
||||||
"github.com/spf13/viper"
|
|
||||||
)
|
|
||||||
|
|
||||||
// cancelCmd represents the cancel command
|
|
||||||
var cancelCmd = &cobra.Command{
|
|
||||||
Use: "cancel [task id]",
|
|
||||||
Short: "Sends a cancelation signal to the goroutine processing the specified task",
|
|
||||||
Long: `Cancel (asynq cancel) will send a cancelation signal to the goroutine processing
|
|
||||||
the specified task.
|
|
||||||
|
|
||||||
The command takes one argument which specifies the task to cancel.
|
|
||||||
The task should be in in-progress state.
|
|
||||||
Identifier for a task should be obtained by running "asynq ls" command.
|
|
||||||
|
|
||||||
Handler implementation needs to be context aware for cancelation signal to
|
|
||||||
actually cancel the processing.
|
|
||||||
|
|
||||||
Example: asynq cancel bnogo8gt6toe23vhef0g`,
|
|
||||||
Args: cobra.ExactArgs(1),
|
|
||||||
Run: cancel,
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
rootCmd.AddCommand(cancelCmd)
|
|
||||||
}
|
|
||||||
|
|
||||||
func cancel(cmd *cobra.Command, args []string) {
|
|
||||||
r := rdb.NewRDB(redis.NewClient(&redis.Options{
|
|
||||||
Addr: viper.GetString("uri"),
|
|
||||||
DB: viper.GetInt("db"),
|
|
||||||
Password: viper.GetString("password"),
|
|
||||||
}))
|
|
||||||
|
|
||||||
err := r.PublishCancelation(args[0])
|
|
||||||
if err != nil {
|
|
||||||
fmt.Printf("could not send cancelation signal: %v\n", err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
fmt.Printf("Successfully sent cancelation siganl for task %s\n", args[0])
|
|
||||||
}
|
|
||||||
129
tools/asynq/cmd/cron.go
Normal file
129
tools/asynq/cmd/cron.go
Normal file
@@ -0,0 +1,129 @@
|
|||||||
|
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license
|
||||||
|
// that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package cmd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"sort"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/hibiken/asynq"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
rootCmd.AddCommand(cronCmd)
|
||||||
|
cronCmd.AddCommand(cronListCmd)
|
||||||
|
cronCmd.AddCommand(cronHistoryCmd)
|
||||||
|
cronHistoryCmd.Flags().Int("page", 1, "page number")
|
||||||
|
cronHistoryCmd.Flags().Int("size", 30, "page size")
|
||||||
|
}
|
||||||
|
|
||||||
|
var cronCmd = &cobra.Command{
|
||||||
|
Use: "cron",
|
||||||
|
Short: "Manage cron",
|
||||||
|
}
|
||||||
|
|
||||||
|
var cronListCmd = &cobra.Command{
|
||||||
|
Use: "ls",
|
||||||
|
Short: "List cron entries",
|
||||||
|
Run: cronList,
|
||||||
|
}
|
||||||
|
|
||||||
|
var cronHistoryCmd = &cobra.Command{
|
||||||
|
Use: "history [ENTRY_ID...]",
|
||||||
|
Short: "Show history of each cron tasks",
|
||||||
|
Args: cobra.MinimumNArgs(1),
|
||||||
|
Run: cronHistory,
|
||||||
|
}
|
||||||
|
|
||||||
|
func cronList(cmd *cobra.Command, args []string) {
|
||||||
|
inspector := createInspector()
|
||||||
|
|
||||||
|
entries, err := inspector.SchedulerEntries()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
if len(entries) == 0 {
|
||||||
|
fmt.Println("No scheduler entries")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort entries by spec.
|
||||||
|
sort.Slice(entries, func(i, j int) bool {
|
||||||
|
x, y := entries[i], entries[j]
|
||||||
|
return x.Spec < y.Spec
|
||||||
|
})
|
||||||
|
|
||||||
|
cols := []string{"EntryID", "Spec", "Type", "Payload", "Options", "Next", "Prev"}
|
||||||
|
printRows := func(w io.Writer, tmpl string) {
|
||||||
|
for _, e := range entries {
|
||||||
|
fmt.Fprintf(w, tmpl, e.ID, e.Spec, e.Task.Type(), sprintBytes(e.Task.Payload()), e.Opts,
|
||||||
|
nextEnqueue(e.Next), prevEnqueue(e.Prev))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
printTable(cols, printRows)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns a string describing when the next enqueue will happen.
|
||||||
|
func nextEnqueue(nextEnqueueAt time.Time) string {
|
||||||
|
d := nextEnqueueAt.Sub(time.Now()).Round(time.Second)
|
||||||
|
if d < 0 {
|
||||||
|
return "Now"
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("In %v", d)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns a string describing when the previous enqueue was.
|
||||||
|
func prevEnqueue(prevEnqueuedAt time.Time) string {
|
||||||
|
if prevEnqueuedAt.IsZero() {
|
||||||
|
return "N/A"
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%v ago", time.Since(prevEnqueuedAt).Round(time.Second))
|
||||||
|
}
|
||||||
|
|
||||||
|
func cronHistory(cmd *cobra.Command, args []string) {
|
||||||
|
pageNum, err := cmd.Flags().GetInt("page")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
pageSize, err := cmd.Flags().GetInt("size")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
inspector := createInspector()
|
||||||
|
for i, entryID := range args {
|
||||||
|
if i > 0 {
|
||||||
|
fmt.Printf("\n%s\n", separator)
|
||||||
|
}
|
||||||
|
fmt.Println()
|
||||||
|
|
||||||
|
fmt.Printf("Entry: %s\n\n", entryID)
|
||||||
|
|
||||||
|
events, err := inspector.ListSchedulerEnqueueEvents(
|
||||||
|
entryID, asynq.PageSize(pageSize), asynq.Page(pageNum))
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("error: %v\n", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if len(events) == 0 {
|
||||||
|
fmt.Printf("No scheduler enqueue events found for entry: %s\n", entryID)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
cols := []string{"TaskID", "EnqueuedAt"}
|
||||||
|
printRows := func(w io.Writer, tmpl string) {
|
||||||
|
for _, e := range events {
|
||||||
|
fmt.Fprintf(w, tmpl, e.TaskID, e.EnqueuedAt)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
printTable(cols, printRows)
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,73 +0,0 @@
|
|||||||
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
|
||||||
// Use of this source code is governed by a MIT license
|
|
||||||
// that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package cmd
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/go-redis/redis/v7"
|
|
||||||
"github.com/hibiken/asynq/internal/rdb"
|
|
||||||
"github.com/spf13/cobra"
|
|
||||||
"github.com/spf13/viper"
|
|
||||||
)
|
|
||||||
|
|
||||||
// delCmd represents the del command
|
|
||||||
var delCmd = &cobra.Command{
|
|
||||||
Use: "del [task id]",
|
|
||||||
Short: "Deletes a task given an identifier",
|
|
||||||
Long: `Del (asynq del) will delete a task given an identifier.
|
|
||||||
|
|
||||||
The command takes one argument which specifies the task to delete.
|
|
||||||
The task should be in either scheduled, retry or dead state.
|
|
||||||
Identifier for a task should be obtained by running "asynq ls" command.
|
|
||||||
|
|
||||||
Example: asynq enq d:1575732274:bnogo8gt6toe23vhef0g`,
|
|
||||||
Args: cobra.ExactArgs(1),
|
|
||||||
Run: del,
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
rootCmd.AddCommand(delCmd)
|
|
||||||
|
|
||||||
// Here you will define your flags and configuration settings.
|
|
||||||
|
|
||||||
// Cobra supports Persistent Flags which will work for this command
|
|
||||||
// and all subcommands, e.g.:
|
|
||||||
// delCmd.PersistentFlags().String("foo", "", "A help for foo")
|
|
||||||
|
|
||||||
// Cobra supports local flags which will only run when this command
|
|
||||||
// is called directly, e.g.:
|
|
||||||
// delCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
|
|
||||||
}
|
|
||||||
|
|
||||||
func del(cmd *cobra.Command, args []string) {
|
|
||||||
id, score, qtype, err := parseQueryID(args[0])
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
r := rdb.NewRDB(redis.NewClient(&redis.Options{
|
|
||||||
Addr: viper.GetString("uri"),
|
|
||||||
DB: viper.GetInt("db"),
|
|
||||||
Password: viper.GetString("password"),
|
|
||||||
}))
|
|
||||||
switch qtype {
|
|
||||||
case "s":
|
|
||||||
err = r.DeleteScheduledTask(id, score)
|
|
||||||
case "r":
|
|
||||||
err = r.DeleteRetryTask(id, score)
|
|
||||||
case "d":
|
|
||||||
err = r.DeleteDeadTask(id, score)
|
|
||||||
default:
|
|
||||||
fmt.Println("invalid argument")
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
fmt.Printf("Successfully deleted %v\n", args[0])
|
|
||||||
}
|
|
||||||
@@ -1,71 +0,0 @@
|
|||||||
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
|
||||||
// Use of this source code is governed by a MIT license
|
|
||||||
// that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package cmd
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/go-redis/redis/v7"
|
|
||||||
"github.com/hibiken/asynq/internal/rdb"
|
|
||||||
"github.com/spf13/cobra"
|
|
||||||
"github.com/spf13/viper"
|
|
||||||
)
|
|
||||||
|
|
||||||
var delallValidArgs = []string{"scheduled", "retry", "dead"}
|
|
||||||
|
|
||||||
// delallCmd represents the delall command
|
|
||||||
var delallCmd = &cobra.Command{
|
|
||||||
Use: "delall [state]",
|
|
||||||
Short: "Deletes all tasks in the specified state",
|
|
||||||
Long: `Delall (asynq delall) will delete all tasks in the specified state.
|
|
||||||
|
|
||||||
The argument should be one of "scheduled", "retry", or "dead".
|
|
||||||
|
|
||||||
Example: asynq delall dead -> Deletes all dead tasks`,
|
|
||||||
ValidArgs: delallValidArgs,
|
|
||||||
Args: cobra.ExactValidArgs(1),
|
|
||||||
Run: delall,
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
rootCmd.AddCommand(delallCmd)
|
|
||||||
|
|
||||||
// Here you will define your flags and configuration settings.
|
|
||||||
|
|
||||||
// Cobra supports Persistent Flags which will work for this command
|
|
||||||
// and all subcommands, e.g.:
|
|
||||||
// delallCmd.PersistentFlags().String("foo", "", "A help for foo")
|
|
||||||
|
|
||||||
// Cobra supports local flags which will only run when this command
|
|
||||||
// is called directly, e.g.:
|
|
||||||
// delallCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
|
|
||||||
}
|
|
||||||
|
|
||||||
func delall(cmd *cobra.Command, args []string) {
|
|
||||||
c := redis.NewClient(&redis.Options{
|
|
||||||
Addr: viper.GetString("uri"),
|
|
||||||
DB: viper.GetInt("db"),
|
|
||||||
Password: viper.GetString("password"),
|
|
||||||
})
|
|
||||||
r := rdb.NewRDB(c)
|
|
||||||
var err error
|
|
||||||
switch args[0] {
|
|
||||||
case "scheduled":
|
|
||||||
err = r.DeleteAllScheduledTasks()
|
|
||||||
case "retry":
|
|
||||||
err = r.DeleteAllRetryTasks()
|
|
||||||
case "dead":
|
|
||||||
err = r.DeleteAllDeadTasks()
|
|
||||||
default:
|
|
||||||
fmt.Printf("error: `asynq delall [state]` only accepts %v as the argument.\n", delallValidArgs)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
fmt.Printf("Deleted all tasks in %q state\n", args[0])
|
|
||||||
}
|
|
||||||
@@ -1,76 +0,0 @@
|
|||||||
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
|
||||||
// Use of this source code is governed by a MIT license
|
|
||||||
// that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package cmd
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/go-redis/redis/v7"
|
|
||||||
"github.com/hibiken/asynq/internal/rdb"
|
|
||||||
"github.com/spf13/cobra"
|
|
||||||
"github.com/spf13/viper"
|
|
||||||
)
|
|
||||||
|
|
||||||
// enqCmd represents the enq command
|
|
||||||
var enqCmd = &cobra.Command{
|
|
||||||
Use: "enq [task id]",
|
|
||||||
Short: "Enqueues a task given an identifier",
|
|
||||||
Long: `Enq (asynq enq) will enqueue a task given an identifier.
|
|
||||||
|
|
||||||
The command takes one argument which specifies the task to enqueue.
|
|
||||||
The task should be in either scheduled, retry or dead state.
|
|
||||||
Identifier for a task should be obtained by running "asynq ls" command.
|
|
||||||
|
|
||||||
The task enqueued by this command will be processed as soon as the task
|
|
||||||
gets dequeued by a processor.
|
|
||||||
|
|
||||||
Example: asynq enq d:1575732274:bnogo8gt6toe23vhef0g`,
|
|
||||||
Args: cobra.ExactArgs(1),
|
|
||||||
Run: enq,
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
rootCmd.AddCommand(enqCmd)
|
|
||||||
|
|
||||||
// Here you will define your flags and configuration settings.
|
|
||||||
|
|
||||||
// Cobra supports Persistent Flags which will work for this command
|
|
||||||
// and all subcommands, e.g.:
|
|
||||||
// enqCmd.PersistentFlags().String("foo", "", "A help for foo")
|
|
||||||
|
|
||||||
// Cobra supports local flags which will only run when this command
|
|
||||||
// is called directly, e.g.:
|
|
||||||
// enqCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
|
|
||||||
}
|
|
||||||
|
|
||||||
func enq(cmd *cobra.Command, args []string) {
|
|
||||||
id, score, qtype, err := parseQueryID(args[0])
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
r := rdb.NewRDB(redis.NewClient(&redis.Options{
|
|
||||||
Addr: viper.GetString("uri"),
|
|
||||||
DB: viper.GetInt("db"),
|
|
||||||
Password: viper.GetString("password"),
|
|
||||||
}))
|
|
||||||
switch qtype {
|
|
||||||
case "s":
|
|
||||||
err = r.EnqueueScheduledTask(id, score)
|
|
||||||
case "r":
|
|
||||||
err = r.EnqueueRetryTask(id, score)
|
|
||||||
case "d":
|
|
||||||
err = r.EnqueueDeadTask(id, score)
|
|
||||||
default:
|
|
||||||
fmt.Println("invalid argument")
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
fmt.Printf("Successfully enqueued %v\n", args[0])
|
|
||||||
}
|
|
||||||
@@ -1,75 +0,0 @@
|
|||||||
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
|
||||||
// Use of this source code is governed by a MIT license
|
|
||||||
// that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package cmd
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/go-redis/redis/v7"
|
|
||||||
"github.com/hibiken/asynq/internal/rdb"
|
|
||||||
"github.com/spf13/cobra"
|
|
||||||
"github.com/spf13/viper"
|
|
||||||
)
|
|
||||||
|
|
||||||
var enqallValidArgs = []string{"scheduled", "retry", "dead"}
|
|
||||||
|
|
||||||
// enqallCmd represents the enqall command
|
|
||||||
var enqallCmd = &cobra.Command{
|
|
||||||
Use: "enqall [state]",
|
|
||||||
Short: "Enqueues all tasks in the specified state",
|
|
||||||
Long: `Enqall (asynq enqall) will enqueue all tasks in the specified state.
|
|
||||||
|
|
||||||
The argument should be one of "scheduled", "retry", or "dead".
|
|
||||||
|
|
||||||
The tasks enqueued by this command will be processed as soon as it
|
|
||||||
gets dequeued by a processor.
|
|
||||||
|
|
||||||
Example: asynq enqall dead -> Enqueues all dead tasks`,
|
|
||||||
ValidArgs: enqallValidArgs,
|
|
||||||
Args: cobra.ExactValidArgs(1),
|
|
||||||
Run: enqall,
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
rootCmd.AddCommand(enqallCmd)
|
|
||||||
|
|
||||||
// Here you will define your flags and configuration settings.
|
|
||||||
|
|
||||||
// Cobra supports Persistent Flags which will work for this command
|
|
||||||
// and all subcommands, e.g.:
|
|
||||||
// enqallCmd.PersistentFlags().String("foo", "", "A help for foo")
|
|
||||||
|
|
||||||
// Cobra supports local flags which will only run when this command
|
|
||||||
// is called directly, e.g.:
|
|
||||||
// enqallCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
|
|
||||||
}
|
|
||||||
|
|
||||||
func enqall(cmd *cobra.Command, args []string) {
|
|
||||||
c := redis.NewClient(&redis.Options{
|
|
||||||
Addr: viper.GetString("uri"),
|
|
||||||
DB: viper.GetInt("db"),
|
|
||||||
Password: viper.GetString("password"),
|
|
||||||
})
|
|
||||||
r := rdb.NewRDB(c)
|
|
||||||
var n int64
|
|
||||||
var err error
|
|
||||||
switch args[0] {
|
|
||||||
case "scheduled":
|
|
||||||
n, err = r.EnqueueAllScheduledTasks()
|
|
||||||
case "retry":
|
|
||||||
n, err = r.EnqueueAllRetryTasks()
|
|
||||||
case "dead":
|
|
||||||
n, err = r.EnqueueAllDeadTasks()
|
|
||||||
default:
|
|
||||||
fmt.Printf("error: `asynq enqall [state]` only accepts %v as the argument.\n", enqallValidArgs)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
fmt.Printf("Enqueued %d tasks in %q state\n", n, args[0])
|
|
||||||
}
|
|
||||||
@@ -1,71 +0,0 @@
|
|||||||
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
|
||||||
// Use of this source code is governed by a MIT license
|
|
||||||
// that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package cmd
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"strings"
|
|
||||||
"text/tabwriter"
|
|
||||||
|
|
||||||
"github.com/go-redis/redis/v7"
|
|
||||||
"github.com/hibiken/asynq/internal/rdb"
|
|
||||||
"github.com/spf13/cobra"
|
|
||||||
"github.com/spf13/viper"
|
|
||||||
)
|
|
||||||
|
|
||||||
var days int
|
|
||||||
|
|
||||||
// historyCmd represents the history command
|
|
||||||
var historyCmd = &cobra.Command{
|
|
||||||
Use: "history",
|
|
||||||
Short: "Shows historical aggregate data",
|
|
||||||
Long: `History (asynq history) will show the number of processed and failed tasks
|
|
||||||
from the last x days.
|
|
||||||
|
|
||||||
By default, it will show the data from the last 10 days.
|
|
||||||
|
|
||||||
Example: asynq history -x=30 -> Shows stats from the last 30 days`,
|
|
||||||
Args: cobra.NoArgs,
|
|
||||||
Run: history,
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
rootCmd.AddCommand(historyCmd)
|
|
||||||
historyCmd.Flags().IntVarP(&days, "days", "x", 10, "show data from last x days")
|
|
||||||
}
|
|
||||||
|
|
||||||
func history(cmd *cobra.Command, args []string) {
|
|
||||||
c := redis.NewClient(&redis.Options{
|
|
||||||
Addr: viper.GetString("uri"),
|
|
||||||
DB: viper.GetInt("db"),
|
|
||||||
Password: viper.GetString("password"),
|
|
||||||
})
|
|
||||||
r := rdb.NewRDB(c)
|
|
||||||
|
|
||||||
stats, err := r.HistoricalStats(days)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
printDailyStats(stats)
|
|
||||||
}
|
|
||||||
|
|
||||||
func printDailyStats(stats []*rdb.DailyStats) {
|
|
||||||
format := strings.Repeat("%v\t", 4) + "\n"
|
|
||||||
tw := new(tabwriter.Writer).Init(os.Stdout, 0, 8, 2, ' ', 0)
|
|
||||||
fmt.Fprintf(tw, format, "Date (UTC)", "Processed", "Failed", "Error Rate")
|
|
||||||
fmt.Fprintf(tw, format, "----------", "---------", "------", "----------")
|
|
||||||
for _, s := range stats {
|
|
||||||
var errrate string
|
|
||||||
if s.Processed == 0 {
|
|
||||||
errrate = "N/A"
|
|
||||||
} else {
|
|
||||||
errrate = fmt.Sprintf("%.2f%%", float64(s.Failed)/float64(s.Processed)*100)
|
|
||||||
}
|
|
||||||
fmt.Fprintf(tw, format, s.Time.Format("2006-01-02"), s.Processed, s.Failed, errrate)
|
|
||||||
}
|
|
||||||
tw.Flush()
|
|
||||||
}
|
|
||||||
@@ -1,72 +0,0 @@
|
|||||||
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
|
||||||
// Use of this source code is governed by a MIT license
|
|
||||||
// that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package cmd
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/go-redis/redis/v7"
|
|
||||||
"github.com/hibiken/asynq/internal/rdb"
|
|
||||||
"github.com/spf13/cobra"
|
|
||||||
"github.com/spf13/viper"
|
|
||||||
)
|
|
||||||
|
|
||||||
// killCmd represents the kill command
|
|
||||||
var killCmd = &cobra.Command{
|
|
||||||
Use: "kill [task id]",
|
|
||||||
Short: "Kills a task given an identifier",
|
|
||||||
Long: `Kill (asynq kill) will put a task in dead state given an identifier.
|
|
||||||
|
|
||||||
The command takes one argument which specifies the task to kill.
|
|
||||||
The task should be in either scheduled or retry state.
|
|
||||||
Identifier for a task should be obtained by running "asynq ls" command.
|
|
||||||
|
|
||||||
Example: asynq kill r:1575732274:bnogo8gt6toe23vhef0g`,
|
|
||||||
Args: cobra.ExactArgs(1),
|
|
||||||
Run: kill,
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
rootCmd.AddCommand(killCmd)
|
|
||||||
|
|
||||||
// Here you will define your flags and configuration settings.
|
|
||||||
|
|
||||||
// Cobra supports Persistent Flags which will work for this command
|
|
||||||
// and all subcommands, e.g.:
|
|
||||||
// killCmd.PersistentFlags().String("foo", "", "A help for foo")
|
|
||||||
|
|
||||||
// Cobra supports local flags which will only run when this command
|
|
||||||
// is called directly, e.g.:
|
|
||||||
// killCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
|
|
||||||
}
|
|
||||||
|
|
||||||
func kill(cmd *cobra.Command, args []string) {
|
|
||||||
id, score, qtype, err := parseQueryID(args[0])
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
r := rdb.NewRDB(redis.NewClient(&redis.Options{
|
|
||||||
Addr: viper.GetString("uri"),
|
|
||||||
DB: viper.GetInt("db"),
|
|
||||||
Password: viper.GetString("password"),
|
|
||||||
}))
|
|
||||||
switch qtype {
|
|
||||||
case "s":
|
|
||||||
err = r.KillScheduledTask(id, score)
|
|
||||||
case "r":
|
|
||||||
err = r.KillRetryTask(id, score)
|
|
||||||
default:
|
|
||||||
fmt.Println("invalid argument")
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
fmt.Printf("Successfully killed %v\n", args[0])
|
|
||||||
|
|
||||||
}
|
|
||||||
@@ -1,70 +0,0 @@
|
|||||||
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
|
||||||
// Use of this source code is governed by a MIT license
|
|
||||||
// that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package cmd
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/go-redis/redis/v7"
|
|
||||||
"github.com/hibiken/asynq/internal/rdb"
|
|
||||||
"github.com/spf13/cobra"
|
|
||||||
"github.com/spf13/viper"
|
|
||||||
)
|
|
||||||
|
|
||||||
var killallValidArgs = []string{"scheduled", "retry"}
|
|
||||||
|
|
||||||
// killallCmd represents the killall command
|
|
||||||
var killallCmd = &cobra.Command{
|
|
||||||
Use: "killall [state]",
|
|
||||||
Short: "Kills all tasks in the specified state",
|
|
||||||
Long: `Killall (asynq killall) will update all tasks from the specified state to dead state.
|
|
||||||
|
|
||||||
The argument should be either "scheduled" or "retry".
|
|
||||||
|
|
||||||
Example: asynq killall retry -> Update all retry tasks to dead tasks`,
|
|
||||||
ValidArgs: killallValidArgs,
|
|
||||||
Args: cobra.ExactValidArgs(1),
|
|
||||||
Run: killall,
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
rootCmd.AddCommand(killallCmd)
|
|
||||||
|
|
||||||
// Here you will define your flags and configuration settings.
|
|
||||||
|
|
||||||
// Cobra supports Persistent Flags which will work for this command
|
|
||||||
// and all subcommands, e.g.:
|
|
||||||
// killallCmd.PersistentFlags().String("foo", "", "A help for foo")
|
|
||||||
|
|
||||||
// Cobra supports local flags which will only run when this command
|
|
||||||
// is called directly, e.g.:
|
|
||||||
// killallCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
|
|
||||||
}
|
|
||||||
|
|
||||||
func killall(cmd *cobra.Command, args []string) {
|
|
||||||
c := redis.NewClient(&redis.Options{
|
|
||||||
Addr: viper.GetString("uri"),
|
|
||||||
DB: viper.GetInt("db"),
|
|
||||||
Password: viper.GetString("password"),
|
|
||||||
})
|
|
||||||
r := rdb.NewRDB(c)
|
|
||||||
var n int64
|
|
||||||
var err error
|
|
||||||
switch args[0] {
|
|
||||||
case "scheduled":
|
|
||||||
n, err = r.KillAllScheduledTasks()
|
|
||||||
case "retry":
|
|
||||||
n, err = r.KillAllRetryTasks()
|
|
||||||
default:
|
|
||||||
fmt.Printf("error: `asynq killall [state]` only accepts %v as the argument.\n", killallValidArgs)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
fmt.Printf("Successfully updated %d tasks to \"dead\" state\n", n)
|
|
||||||
}
|
|
||||||
@@ -1,229 +0,0 @@
|
|||||||
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
|
||||||
// Use of this source code is governed by a MIT license
|
|
||||||
// that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package cmd
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/go-redis/redis/v7"
|
|
||||||
"github.com/hibiken/asynq/internal/rdb"
|
|
||||||
"github.com/rs/xid"
|
|
||||||
"github.com/spf13/cobra"
|
|
||||||
"github.com/spf13/viper"
|
|
||||||
)
|
|
||||||
|
|
||||||
var lsValidArgs = []string{"enqueued", "inprogress", "scheduled", "retry", "dead"}
|
|
||||||
|
|
||||||
// lsCmd represents the ls command
|
|
||||||
var lsCmd = &cobra.Command{
|
|
||||||
Use: "ls [state]",
|
|
||||||
Short: "Lists tasks in the specified state",
|
|
||||||
Long: `Ls (asynq ls) will list all tasks in the specified state in a table format.
|
|
||||||
|
|
||||||
The command takes one argument which specifies the state of tasks.
|
|
||||||
The argument value should be one of "enqueued", "inprogress", "scheduled",
|
|
||||||
"retry", or "dead".
|
|
||||||
|
|
||||||
Example:
|
|
||||||
asynq ls dead -> Lists all tasks in dead state
|
|
||||||
|
|
||||||
Enqueued tasks requires a queue name after ":"
|
|
||||||
Example:
|
|
||||||
asynq ls enqueued:default -> List tasks from default queue
|
|
||||||
asynq ls enqueued:critical -> List tasks from critical queue
|
|
||||||
`,
|
|
||||||
Args: cobra.ExactValidArgs(1),
|
|
||||||
Run: ls,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Flags
|
|
||||||
var pageSize int
|
|
||||||
var pageNum int
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
rootCmd.AddCommand(lsCmd)
|
|
||||||
lsCmd.Flags().IntVar(&pageSize, "size", 30, "page size")
|
|
||||||
lsCmd.Flags().IntVar(&pageNum, "page", 0, "page number - zero indexed (default 0)")
|
|
||||||
}
|
|
||||||
|
|
||||||
func ls(cmd *cobra.Command, args []string) {
|
|
||||||
if pageSize < 0 {
|
|
||||||
fmt.Println("page size cannot be negative.")
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
if pageNum < 0 {
|
|
||||||
fmt.Println("page number cannot be negative.")
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
c := redis.NewClient(&redis.Options{
|
|
||||||
Addr: viper.GetString("uri"),
|
|
||||||
DB: viper.GetInt("db"),
|
|
||||||
Password: viper.GetString("password"),
|
|
||||||
})
|
|
||||||
r := rdb.NewRDB(c)
|
|
||||||
parts := strings.Split(args[0], ":")
|
|
||||||
switch parts[0] {
|
|
||||||
case "enqueued":
|
|
||||||
if len(parts) != 2 {
|
|
||||||
fmt.Printf("error: Missing queue name\n`asynq ls enqueued:[queue name]`\n")
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
listEnqueued(r, parts[1])
|
|
||||||
case "inprogress":
|
|
||||||
listInProgress(r)
|
|
||||||
case "scheduled":
|
|
||||||
listScheduled(r)
|
|
||||||
case "retry":
|
|
||||||
listRetry(r)
|
|
||||||
case "dead":
|
|
||||||
listDead(r)
|
|
||||||
default:
|
|
||||||
fmt.Printf("error: `asynq ls [state]`\nonly accepts %v as the argument.\n", lsValidArgs)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// queryID returns an identifier used for "enq" command.
|
|
||||||
// score is the zset score and queryType should be one
|
|
||||||
// of "s", "r" or "d" (scheduled, retry, dead respectively).
|
|
||||||
func queryID(id xid.ID, score int64, qtype string) string {
|
|
||||||
const format = "%v:%v:%v"
|
|
||||||
return fmt.Sprintf(format, qtype, score, id)
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseQueryID is a reverse operation of queryID function.
|
|
||||||
// It takes a queryID and return each part of id with proper
|
|
||||||
// type if valid, otherwise it reports an error.
|
|
||||||
func parseQueryID(queryID string) (id xid.ID, score int64, qtype string, err error) {
|
|
||||||
parts := strings.Split(queryID, ":")
|
|
||||||
if len(parts) != 3 {
|
|
||||||
return xid.NilID(), 0, "", fmt.Errorf("invalid id")
|
|
||||||
}
|
|
||||||
id, err = xid.FromString(parts[2])
|
|
||||||
if err != nil {
|
|
||||||
return xid.NilID(), 0, "", fmt.Errorf("invalid id")
|
|
||||||
}
|
|
||||||
score, err = strconv.ParseInt(parts[1], 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return xid.NilID(), 0, "", fmt.Errorf("invalid id")
|
|
||||||
}
|
|
||||||
qtype = parts[0]
|
|
||||||
if len(qtype) != 1 || !strings.Contains("srd", qtype) {
|
|
||||||
return xid.NilID(), 0, "", fmt.Errorf("invalid id")
|
|
||||||
}
|
|
||||||
return id, score, qtype, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func listEnqueued(r *rdb.RDB, qname string) {
|
|
||||||
tasks, err := r.ListEnqueued(qname, rdb.Pagination{Size: pageSize, Page: pageNum})
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
if len(tasks) == 0 {
|
|
||||||
fmt.Printf("No enqueued tasks in %q queue\n", qname)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
cols := []string{"ID", "Type", "Payload", "Queue"}
|
|
||||||
printRows := func(w io.Writer, tmpl string) {
|
|
||||||
for _, t := range tasks {
|
|
||||||
fmt.Fprintf(w, tmpl, t.ID, t.Type, t.Payload, t.Queue)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
printTable(cols, printRows)
|
|
||||||
fmt.Printf("\nShowing %d tasks from page %d\n", len(tasks), pageNum)
|
|
||||||
}
|
|
||||||
|
|
||||||
func listInProgress(r *rdb.RDB) {
|
|
||||||
tasks, err := r.ListInProgress(rdb.Pagination{Size: pageSize, Page: pageNum})
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
if len(tasks) == 0 {
|
|
||||||
fmt.Println("No in-progress tasks")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
cols := []string{"ID", "Type", "Payload"}
|
|
||||||
printRows := func(w io.Writer, tmpl string) {
|
|
||||||
for _, t := range tasks {
|
|
||||||
fmt.Fprintf(w, tmpl, t.ID, t.Type, t.Payload)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
printTable(cols, printRows)
|
|
||||||
fmt.Printf("\nShowing %d tasks from page %d\n", len(tasks), pageNum)
|
|
||||||
}
|
|
||||||
|
|
||||||
func listScheduled(r *rdb.RDB) {
|
|
||||||
tasks, err := r.ListScheduled(rdb.Pagination{Size: pageSize, Page: pageNum})
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
if len(tasks) == 0 {
|
|
||||||
fmt.Println("No scheduled tasks")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
cols := []string{"ID", "Type", "Payload", "Process In", "Queue"}
|
|
||||||
printRows := func(w io.Writer, tmpl string) {
|
|
||||||
for _, t := range tasks {
|
|
||||||
processIn := fmt.Sprintf("%.0f seconds", t.ProcessAt.Sub(time.Now()).Seconds())
|
|
||||||
fmt.Fprintf(w, tmpl, queryID(t.ID, t.Score, "s"), t.Type, t.Payload, processIn, t.Queue)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
printTable(cols, printRows)
|
|
||||||
fmt.Printf("\nShowing %d tasks from page %d\n", len(tasks), pageNum)
|
|
||||||
}
|
|
||||||
|
|
||||||
func listRetry(r *rdb.RDB) {
|
|
||||||
tasks, err := r.ListRetry(rdb.Pagination{Size: pageSize, Page: pageNum})
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
if len(tasks) == 0 {
|
|
||||||
fmt.Println("No retry tasks")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
cols := []string{"ID", "Type", "Payload", "Next Retry", "Last Error", "Retried", "Max Retry", "Queue"}
|
|
||||||
printRows := func(w io.Writer, tmpl string) {
|
|
||||||
for _, t := range tasks {
|
|
||||||
var nextRetry string
|
|
||||||
if d := t.ProcessAt.Sub(time.Now()); d > 0 {
|
|
||||||
nextRetry = fmt.Sprintf("in %v", d.Round(time.Second))
|
|
||||||
} else {
|
|
||||||
nextRetry = "right now"
|
|
||||||
}
|
|
||||||
fmt.Fprintf(w, tmpl, queryID(t.ID, t.Score, "r"), t.Type, t.Payload, nextRetry, t.ErrorMsg, t.Retried, t.Retry, t.Queue)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
printTable(cols, printRows)
|
|
||||||
fmt.Printf("\nShowing %d tasks from page %d\n", len(tasks), pageNum)
|
|
||||||
}
|
|
||||||
|
|
||||||
func listDead(r *rdb.RDB) {
|
|
||||||
tasks, err := r.ListDead(rdb.Pagination{Size: pageSize, Page: pageNum})
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
if len(tasks) == 0 {
|
|
||||||
fmt.Println("No dead tasks")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
cols := []string{"ID", "Type", "Payload", "Last Failed", "Last Error", "Queue"}
|
|
||||||
printRows := func(w io.Writer, tmpl string) {
|
|
||||||
for _, t := range tasks {
|
|
||||||
fmt.Fprintf(w, tmpl, queryID(t.ID, t.Score, "d"), t.Type, t.Payload, t.LastFailedAt, t.ErrorMsg, t.Queue)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
printTable(cols, printRows)
|
|
||||||
fmt.Printf("\nShowing %d tasks from page %d\n", len(tasks), pageNum)
|
|
||||||
}
|
|
||||||
255
tools/asynq/cmd/queue.go
Normal file
255
tools/asynq/cmd/queue.go
Normal file
@@ -0,0 +1,255 @@
|
|||||||
|
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license
|
||||||
|
// that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package cmd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/fatih/color"
|
||||||
|
"github.com/hibiken/asynq"
|
||||||
|
"github.com/hibiken/asynq/internal/errors"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
const separator = "================================================="
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
rootCmd.AddCommand(queueCmd)
|
||||||
|
queueCmd.AddCommand(queueListCmd)
|
||||||
|
queueCmd.AddCommand(queueInspectCmd)
|
||||||
|
queueCmd.AddCommand(queueHistoryCmd)
|
||||||
|
queueHistoryCmd.Flags().IntP("days", "x", 10, "show data from last x days")
|
||||||
|
|
||||||
|
queueCmd.AddCommand(queuePauseCmd)
|
||||||
|
queueCmd.AddCommand(queueUnpauseCmd)
|
||||||
|
queueCmd.AddCommand(queueRemoveCmd)
|
||||||
|
queueRemoveCmd.Flags().BoolP("force", "f", false, "remove the queue regardless of its size")
|
||||||
|
}
|
||||||
|
|
||||||
|
var queueCmd = &cobra.Command{
|
||||||
|
Use: "queue",
|
||||||
|
Short: "Manage queues",
|
||||||
|
}
|
||||||
|
|
||||||
|
var queueListCmd = &cobra.Command{
|
||||||
|
Use: "ls",
|
||||||
|
Short: "List queues",
|
||||||
|
// TODO: Use RunE instead?
|
||||||
|
Run: queueList,
|
||||||
|
}
|
||||||
|
|
||||||
|
var queueInspectCmd = &cobra.Command{
|
||||||
|
Use: "inspect QUEUE [QUEUE...]",
|
||||||
|
Short: "Display detailed information on one or more queues",
|
||||||
|
Args: cobra.MinimumNArgs(1),
|
||||||
|
// TODO: Use RunE instead?
|
||||||
|
Run: queueInspect,
|
||||||
|
}
|
||||||
|
|
||||||
|
var queueHistoryCmd = &cobra.Command{
|
||||||
|
Use: "history QUEUE [QUEUE...]",
|
||||||
|
Short: "Display historical aggregate data from one or more queues",
|
||||||
|
Args: cobra.MinimumNArgs(1),
|
||||||
|
Run: queueHistory,
|
||||||
|
}
|
||||||
|
|
||||||
|
var queuePauseCmd = &cobra.Command{
|
||||||
|
Use: "pause QUEUE [QUEUE...]",
|
||||||
|
Short: "Pause one or more queues",
|
||||||
|
Args: cobra.MinimumNArgs(1),
|
||||||
|
Run: queuePause,
|
||||||
|
}
|
||||||
|
|
||||||
|
var queueUnpauseCmd = &cobra.Command{
|
||||||
|
Use: "unpause QUEUE [QUEUE...]",
|
||||||
|
Short: "Unpause one or more queues",
|
||||||
|
Args: cobra.MinimumNArgs(1),
|
||||||
|
Run: queueUnpause,
|
||||||
|
}
|
||||||
|
|
||||||
|
var queueRemoveCmd = &cobra.Command{
|
||||||
|
Use: "rm QUEUE [QUEUE...]",
|
||||||
|
Short: "Remove one or more queues",
|
||||||
|
Args: cobra.MinimumNArgs(1),
|
||||||
|
Run: queueRemove,
|
||||||
|
}
|
||||||
|
|
||||||
|
func queueList(cmd *cobra.Command, args []string) {
|
||||||
|
type queueInfo struct {
|
||||||
|
name string
|
||||||
|
keyslot int64
|
||||||
|
nodes []*asynq.ClusterNode
|
||||||
|
}
|
||||||
|
inspector := createInspector()
|
||||||
|
queues, err := inspector.Queues()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("error: Could not fetch list of queues: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
var qs []*queueInfo
|
||||||
|
for _, qname := range queues {
|
||||||
|
q := queueInfo{name: qname}
|
||||||
|
if useRedisCluster {
|
||||||
|
keyslot, err := inspector.ClusterKeySlot(qname)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Errorf("error: Could not get cluster keyslot for %q\n", qname)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
q.keyslot = keyslot
|
||||||
|
nodes, err := inspector.ClusterNodes(qname)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Errorf("error: Could not get cluster nodes for %q\n", qname)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
q.nodes = nodes
|
||||||
|
}
|
||||||
|
qs = append(qs, &q)
|
||||||
|
}
|
||||||
|
if useRedisCluster {
|
||||||
|
printTable(
|
||||||
|
[]string{"Queue", "Cluster KeySlot", "Cluster Nodes"},
|
||||||
|
func(w io.Writer, tmpl string) {
|
||||||
|
for _, q := range qs {
|
||||||
|
fmt.Fprintf(w, tmpl, q.name, q.keyslot, q.nodes)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
for _, q := range qs {
|
||||||
|
fmt.Println(q.name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func queueInspect(cmd *cobra.Command, args []string) {
|
||||||
|
inspector := createInspector()
|
||||||
|
for i, qname := range args {
|
||||||
|
if i > 0 {
|
||||||
|
fmt.Printf("\n%s\n\n", separator)
|
||||||
|
}
|
||||||
|
info, err := inspector.GetQueueInfo(qname)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("error: %v\n", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
printQueueInfo(info)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func printQueueInfo(info *asynq.QueueInfo) {
|
||||||
|
bold := color.New(color.Bold)
|
||||||
|
bold.Println("Queue Info")
|
||||||
|
fmt.Printf("Name: %s\n", info.Queue)
|
||||||
|
fmt.Printf("Size: %d\n", info.Size)
|
||||||
|
fmt.Printf("Paused: %t\n\n", info.Paused)
|
||||||
|
bold.Println("Task Count by State")
|
||||||
|
printTable(
|
||||||
|
[]string{"active", "pending", "scheduled", "retry", "archived", "completed"},
|
||||||
|
func(w io.Writer, tmpl string) {
|
||||||
|
fmt.Fprintf(w, tmpl, info.Active, info.Pending, info.Scheduled, info.Retry, info.Archived, info.Completed)
|
||||||
|
},
|
||||||
|
)
|
||||||
|
fmt.Println()
|
||||||
|
bold.Printf("Daily Stats %s UTC\n", info.Timestamp.UTC().Format("2006-01-02"))
|
||||||
|
printTable(
|
||||||
|
[]string{"processed", "failed", "error rate"},
|
||||||
|
func(w io.Writer, tmpl string) {
|
||||||
|
var errRate string
|
||||||
|
if info.Processed == 0 {
|
||||||
|
errRate = "N/A"
|
||||||
|
} else {
|
||||||
|
errRate = fmt.Sprintf("%.2f%%", float64(info.Failed)/float64(info.Processed)*100)
|
||||||
|
}
|
||||||
|
fmt.Fprintf(w, tmpl, info.Processed, info.Failed, errRate)
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func queueHistory(cmd *cobra.Command, args []string) {
|
||||||
|
days, err := cmd.Flags().GetInt("days")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("error: Internal error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
inspector := createInspector()
|
||||||
|
for i, qname := range args {
|
||||||
|
if i > 0 {
|
||||||
|
fmt.Printf("\n%s\n\n", separator)
|
||||||
|
}
|
||||||
|
fmt.Printf("Queue: %s\n\n", qname)
|
||||||
|
stats, err := inspector.History(qname, days)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("error: %v\n", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
printDailyStats(stats)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func printDailyStats(stats []*asynq.DailyStats) {
|
||||||
|
printTable(
|
||||||
|
[]string{"date (UTC)", "processed", "failed", "error rate"},
|
||||||
|
func(w io.Writer, tmpl string) {
|
||||||
|
for _, s := range stats {
|
||||||
|
var errRate string
|
||||||
|
if s.Processed == 0 {
|
||||||
|
errRate = "N/A"
|
||||||
|
} else {
|
||||||
|
errRate = fmt.Sprintf("%.2f%%", float64(s.Failed)/float64(s.Processed)*100)
|
||||||
|
}
|
||||||
|
fmt.Fprintf(w, tmpl, s.Date.Format("2006-01-02"), s.Processed, s.Failed, errRate)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func queuePause(cmd *cobra.Command, args []string) {
|
||||||
|
inspector := createInspector()
|
||||||
|
for _, qname := range args {
|
||||||
|
err := inspector.PauseQueue(qname)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fmt.Printf("Successfully paused queue %q\n", qname)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func queueUnpause(cmd *cobra.Command, args []string) {
|
||||||
|
inspector := createInspector()
|
||||||
|
for _, qname := range args {
|
||||||
|
err := inspector.UnpauseQueue(qname)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fmt.Printf("Successfully unpaused queue %q\n", qname)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func queueRemove(cmd *cobra.Command, args []string) {
|
||||||
|
// TODO: Use inspector once RemoveQueue become public API.
|
||||||
|
force, err := cmd.Flags().GetBool("force")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("error: Internal error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
r := createRDB()
|
||||||
|
for _, qname := range args {
|
||||||
|
err = r.RemoveQueue(qname, force)
|
||||||
|
if err != nil {
|
||||||
|
if errors.IsQueueNotEmpty(err) {
|
||||||
|
fmt.Printf("error: %v\nIf you are sure you want to delete it, run 'asynq queue rm --force %s'\n", err, qname)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fmt.Printf("error: %v\n", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fmt.Printf("Successfully removed queue %q\n", qname)
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,54 +0,0 @@
|
|||||||
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
|
||||||
// Use of this source code is governed by a MIT license
|
|
||||||
// that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package cmd
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/go-redis/redis/v7"
|
|
||||||
"github.com/hibiken/asynq/internal/rdb"
|
|
||||||
"github.com/spf13/cobra"
|
|
||||||
"github.com/spf13/viper"
|
|
||||||
)
|
|
||||||
|
|
||||||
// rmqCmd represents the rmq command
|
|
||||||
var rmqCmd = &cobra.Command{
|
|
||||||
Use: "rmq [queue name]",
|
|
||||||
Short: "Removes the specified queue",
|
|
||||||
Long: `Rmq (asynq rmq) will remove the specified queue.
|
|
||||||
By default, it will remove the queue only if it's empty.
|
|
||||||
Use --force option to override this behavior.
|
|
||||||
|
|
||||||
Example: asynq rmq low -> Removes "low" queue`,
|
|
||||||
Args: cobra.ExactValidArgs(1),
|
|
||||||
Run: rmq,
|
|
||||||
}
|
|
||||||
|
|
||||||
var rmqForce bool
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
rootCmd.AddCommand(rmqCmd)
|
|
||||||
rmqCmd.Flags().BoolVarP(&rmqForce, "force", "f", false, "remove the queue regardless of its size")
|
|
||||||
}
|
|
||||||
|
|
||||||
func rmq(cmd *cobra.Command, args []string) {
|
|
||||||
c := redis.NewClient(&redis.Options{
|
|
||||||
Addr: viper.GetString("uri"),
|
|
||||||
DB: viper.GetInt("db"),
|
|
||||||
Password: viper.GetString("password"),
|
|
||||||
})
|
|
||||||
r := rdb.NewRDB(c)
|
|
||||||
err := r.RemoveQueue(args[0], rmqForce)
|
|
||||||
if err != nil {
|
|
||||||
if _, ok := err.(*rdb.ErrQueueNotEmpty); ok {
|
|
||||||
fmt.Printf("error: %v\nIf you are sure you want to delete it, run 'asynq rmq --force %s'\n", err, args[0])
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
fmt.Printf("error: %v", err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
fmt.Printf("Successfully removed queue %q\n", args[0])
|
|
||||||
}
|
|
||||||
@@ -5,12 +5,19 @@
|
|||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"crypto/tls"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
"text/tabwriter"
|
"text/tabwriter"
|
||||||
|
"unicode"
|
||||||
|
"unicode/utf8"
|
||||||
|
|
||||||
|
"github.com/go-redis/redis/v8"
|
||||||
|
"github.com/hibiken/asynq"
|
||||||
|
"github.com/hibiken/asynq/internal/base"
|
||||||
|
"github.com/hibiken/asynq/internal/rdb"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
|
|
||||||
homedir "github.com/mitchellh/go-homedir"
|
homedir "github.com/mitchellh/go-homedir"
|
||||||
@@ -19,16 +26,33 @@ import (
|
|||||||
|
|
||||||
var cfgFile string
|
var cfgFile string
|
||||||
|
|
||||||
// Flags
|
// Global flag variables
|
||||||
var uri string
|
var (
|
||||||
var db int
|
uri string
|
||||||
var password string
|
db int
|
||||||
|
password string
|
||||||
|
|
||||||
|
useRedisCluster bool
|
||||||
|
clusterAddrs string
|
||||||
|
tlsServerName string
|
||||||
|
)
|
||||||
|
|
||||||
// rootCmd represents the base command when called without any subcommands
|
// rootCmd represents the base command when called without any subcommands
|
||||||
var rootCmd = &cobra.Command{
|
var rootCmd = &cobra.Command{
|
||||||
Use: "asynq",
|
Use: "asynq",
|
||||||
Short: "A monitoring tool for asynq queues",
|
Short: "A monitoring tool for asynq queues",
|
||||||
Long: `Asynq is a montoring CLI to inspect tasks and queues managed by asynq.`,
|
Long: `Asynq is a montoring CLI to inspect tasks and queues managed by asynq.`,
|
||||||
|
Version: base.Version,
|
||||||
|
}
|
||||||
|
|
||||||
|
var versionOutput = fmt.Sprintf("asynq version %s\n", base.Version)
|
||||||
|
|
||||||
|
var versionCmd = &cobra.Command{
|
||||||
|
Use: "version",
|
||||||
|
Hidden: true,
|
||||||
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
|
fmt.Print(versionOutput)
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
// Execute adds all child commands to the root command and sets flags appropriately.
|
// Execute adds all child commands to the root command and sets flags appropriately.
|
||||||
@@ -43,13 +67,26 @@ func Execute() {
|
|||||||
func init() {
|
func init() {
|
||||||
cobra.OnInitialize(initConfig)
|
cobra.OnInitialize(initConfig)
|
||||||
|
|
||||||
|
rootCmd.AddCommand(versionCmd)
|
||||||
|
rootCmd.SetVersionTemplate(versionOutput)
|
||||||
|
|
||||||
rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file to set flag defaut values (default is $HOME/.asynq.yaml)")
|
rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file to set flag defaut values (default is $HOME/.asynq.yaml)")
|
||||||
rootCmd.PersistentFlags().StringVarP(&uri, "uri", "u", "127.0.0.1:6379", "redis server URI")
|
rootCmd.PersistentFlags().StringVarP(&uri, "uri", "u", "127.0.0.1:6379", "redis server URI")
|
||||||
rootCmd.PersistentFlags().IntVarP(&db, "db", "n", 0, "redis database number (default is 0)")
|
rootCmd.PersistentFlags().IntVarP(&db, "db", "n", 0, "redis database number (default is 0)")
|
||||||
rootCmd.PersistentFlags().StringVarP(&password, "password", "p", "", "password to use when connecting to redis server")
|
rootCmd.PersistentFlags().StringVarP(&password, "password", "p", "", "password to use when connecting to redis server")
|
||||||
|
rootCmd.PersistentFlags().BoolVar(&useRedisCluster, "cluster", false, "connect to redis cluster")
|
||||||
|
rootCmd.PersistentFlags().StringVar(&clusterAddrs, "cluster_addrs",
|
||||||
|
"127.0.0.1:7000,127.0.0.1:7001,127.0.0.1:7002,127.0.0.1:7003,127.0.0.1:7004,127.0.0.1:7005",
|
||||||
|
"list of comma-separated redis server addresses")
|
||||||
|
rootCmd.PersistentFlags().StringVar(&tlsServerName, "tls_server",
|
||||||
|
"", "server name for TLS validation")
|
||||||
|
// Bind flags with config.
|
||||||
viper.BindPFlag("uri", rootCmd.PersistentFlags().Lookup("uri"))
|
viper.BindPFlag("uri", rootCmd.PersistentFlags().Lookup("uri"))
|
||||||
viper.BindPFlag("db", rootCmd.PersistentFlags().Lookup("db"))
|
viper.BindPFlag("db", rootCmd.PersistentFlags().Lookup("db"))
|
||||||
viper.BindPFlag("password", rootCmd.PersistentFlags().Lookup("password"))
|
viper.BindPFlag("password", rootCmd.PersistentFlags().Lookup("password"))
|
||||||
|
viper.BindPFlag("cluster", rootCmd.PersistentFlags().Lookup("cluster"))
|
||||||
|
viper.BindPFlag("cluster_addrs", rootCmd.PersistentFlags().Lookup("cluster_addrs"))
|
||||||
|
viper.BindPFlag("tls_server", rootCmd.PersistentFlags().Lookup("tls_server"))
|
||||||
}
|
}
|
||||||
|
|
||||||
// initConfig reads in config file and ENV variables if set.
|
// initConfig reads in config file and ENV variables if set.
|
||||||
@@ -78,6 +115,57 @@ func initConfig() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// createRDB creates a RDB instance using flag values and returns it.
|
||||||
|
func createRDB() *rdb.RDB {
|
||||||
|
var c redis.UniversalClient
|
||||||
|
if useRedisCluster {
|
||||||
|
addrs := strings.Split(viper.GetString("cluster_addrs"), ",")
|
||||||
|
c = redis.NewClusterClient(&redis.ClusterOptions{
|
||||||
|
Addrs: addrs,
|
||||||
|
Password: viper.GetString("password"),
|
||||||
|
TLSConfig: getTLSConfig(),
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
c = redis.NewClient(&redis.Options{
|
||||||
|
Addr: viper.GetString("uri"),
|
||||||
|
DB: viper.GetInt("db"),
|
||||||
|
Password: viper.GetString("password"),
|
||||||
|
TLSConfig: getTLSConfig(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return rdb.NewRDB(c)
|
||||||
|
}
|
||||||
|
|
||||||
|
// createRDB creates a Inspector instance using flag values and returns it.
|
||||||
|
func createInspector() *asynq.Inspector {
|
||||||
|
return asynq.NewInspector(getRedisConnOpt())
|
||||||
|
}
|
||||||
|
|
||||||
|
func getRedisConnOpt() asynq.RedisConnOpt {
|
||||||
|
if useRedisCluster {
|
||||||
|
addrs := strings.Split(viper.GetString("cluster_addrs"), ",")
|
||||||
|
return asynq.RedisClusterClientOpt{
|
||||||
|
Addrs: addrs,
|
||||||
|
Password: viper.GetString("password"),
|
||||||
|
TLSConfig: getTLSConfig(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return asynq.RedisClientOpt{
|
||||||
|
Addr: viper.GetString("uri"),
|
||||||
|
DB: viper.GetInt("db"),
|
||||||
|
Password: viper.GetString("password"),
|
||||||
|
TLSConfig: getTLSConfig(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getTLSConfig() *tls.Config {
|
||||||
|
tlsServer := viper.GetString("tls_server")
|
||||||
|
if tlsServer == "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &tls.Config{ServerName: tlsServer}
|
||||||
|
}
|
||||||
|
|
||||||
// printTable is a helper function to print data in table format.
|
// printTable is a helper function to print data in table format.
|
||||||
//
|
//
|
||||||
// cols is a list of headers and printRow specifies how to print rows.
|
// cols is a list of headers and printRow specifies how to print rows.
|
||||||
@@ -110,3 +198,28 @@ func printTable(cols []string, printRows func(w io.Writer, tmpl string)) {
|
|||||||
printRows(tw, format)
|
printRows(tw, format)
|
||||||
tw.Flush()
|
tw.Flush()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// sprintBytes returns a string representation of the given byte slice if data is printable.
|
||||||
|
// If data is not printable, it returns a string describing it is not printable.
|
||||||
|
func sprintBytes(payload []byte) string {
|
||||||
|
if !isPrintable(payload) {
|
||||||
|
return "non-printable bytes"
|
||||||
|
}
|
||||||
|
return string(payload)
|
||||||
|
}
|
||||||
|
|
||||||
|
func isPrintable(data []byte) bool {
|
||||||
|
if !utf8.Valid(data) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
isAllSpace := true
|
||||||
|
for _, r := range string(data) {
|
||||||
|
if !unicode.IsPrint(r) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if !unicode.IsSpace(r) {
|
||||||
|
isAllSpace = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return !isAllSpace
|
||||||
|
}
|
||||||
|
|||||||
@@ -12,42 +12,39 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-redis/redis/v7"
|
|
||||||
"github.com/hibiken/asynq/internal/rdb"
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"github.com/spf13/viper"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// serversCmd represents the servers command
|
func init() {
|
||||||
var serversCmd = &cobra.Command{
|
rootCmd.AddCommand(serverCmd)
|
||||||
Use: "servers",
|
serverCmd.AddCommand(serverListCmd)
|
||||||
Short: "Shows all running worker servers",
|
}
|
||||||
Long: `Servers (asynq servers) will show all running worker servers
|
|
||||||
pulling tasks from the specified redis instance.
|
var serverCmd = &cobra.Command{
|
||||||
|
Use: "server",
|
||||||
|
Short: "Manage servers",
|
||||||
|
}
|
||||||
|
|
||||||
|
var serverListCmd = &cobra.Command{
|
||||||
|
Use: "ls",
|
||||||
|
Short: "List servers",
|
||||||
|
Long: `Server list (asynq server ls) shows all running worker servers
|
||||||
|
pulling tasks from the given redis instance.
|
||||||
|
|
||||||
The command shows the following for each server:
|
The command shows the following for each server:
|
||||||
* Host and PID of the process in which the server is running
|
* Host and PID of the process in which the server is running
|
||||||
* Number of active workers out of worker pool
|
* Number of active workers out of worker pool
|
||||||
* Queue configuration
|
* Queue configuration
|
||||||
* State of the worker server ("running" | "quiet")
|
* State of the worker server ("active" | "stopped")
|
||||||
* Time the server was started
|
* Time the server was started
|
||||||
|
|
||||||
A "running" server is pulling tasks from queues and processing them.
|
A "active" server is pulling tasks from queues and processing them.
|
||||||
A "quiet" server is no longer pulling new tasks from queues`,
|
A "stopped" server is no longer pulling new tasks from queues`,
|
||||||
Args: cobra.NoArgs,
|
Run: serverList,
|
||||||
Run: servers,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func serverList(cmd *cobra.Command, args []string) {
|
||||||
rootCmd.AddCommand(serversCmd)
|
r := createRDB()
|
||||||
}
|
|
||||||
|
|
||||||
func servers(cmd *cobra.Command, args []string) {
|
|
||||||
r := rdb.NewRDB(redis.NewClient(&redis.Options{
|
|
||||||
Addr: viper.GetString("uri"),
|
|
||||||
DB: viper.GetInt("db"),
|
|
||||||
Password: viper.GetString("password"),
|
|
||||||
}))
|
|
||||||
|
|
||||||
servers, err := r.ListServers()
|
servers, err := r.ListServers()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -81,12 +78,6 @@ func servers(cmd *cobra.Command, args []string) {
|
|||||||
printTable(cols, printRows)
|
printTable(cols, printRows)
|
||||||
}
|
}
|
||||||
|
|
||||||
// timeAgo takes a time and returns a string of the format "<duration> ago".
|
|
||||||
func timeAgo(since time.Time) string {
|
|
||||||
d := time.Since(since).Round(time.Second)
|
|
||||||
return fmt.Sprintf("%v ago", d)
|
|
||||||
}
|
|
||||||
|
|
||||||
func formatQueues(qmap map[string]int) string {
|
func formatQueues(qmap map[string]int) string {
|
||||||
// sort queues by priority and name
|
// sort queues by priority and name
|
||||||
type queue struct {
|
type queue struct {
|
||||||
@@ -116,3 +107,9 @@ func formatQueues(qmap map[string]int) string {
|
|||||||
}
|
}
|
||||||
return b.String()
|
return b.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// timeAgo takes a time and returns a string of the format "<duration> ago".
|
||||||
|
func timeAgo(since time.Time) string {
|
||||||
|
d := time.Since(since).Round(time.Second)
|
||||||
|
return fmt.Sprintf("%v ago", d)
|
||||||
|
}
|
||||||
@@ -5,24 +5,27 @@
|
|||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"math"
|
||||||
"os"
|
"os"
|
||||||
"sort"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"text/tabwriter"
|
"text/tabwriter"
|
||||||
|
"time"
|
||||||
|
"unicode/utf8"
|
||||||
|
|
||||||
"github.com/go-redis/redis/v7"
|
"github.com/fatih/color"
|
||||||
"github.com/hibiken/asynq/internal/rdb"
|
"github.com/hibiken/asynq/internal/rdb"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"github.com/spf13/viper"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// statsCmd represents the stats command
|
// statsCmd represents the stats command
|
||||||
var statsCmd = &cobra.Command{
|
var statsCmd = &cobra.Command{
|
||||||
Use: "stats",
|
Use: "stats",
|
||||||
Short: "Shows current state of the tasks and queues",
|
Short: "Shows current state of the tasks and queues",
|
||||||
Long: `Stats (aysnqmon stats) will show the overview of tasks and queues at that instant.
|
Long: `Stats (aysnq stats) will show the overview of tasks and queues at that instant.
|
||||||
|
|
||||||
Specifically, the command shows the following:
|
Specifically, the command shows the following:
|
||||||
* Number of tasks in each state
|
* Number of tasks in each state
|
||||||
@@ -38,8 +41,11 @@ Example: watch -n 3 asynq stats -> Shows current state of tasks every three seco
|
|||||||
Run: stats,
|
Run: stats,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var jsonFlag bool
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
rootCmd.AddCommand(statsCmd)
|
rootCmd.AddCommand(statsCmd)
|
||||||
|
statsCmd.Flags().BoolVar(&jsonFlag, "json", false, "Output stats in JSON format.")
|
||||||
|
|
||||||
// Here you will define your flags and configuration settings.
|
// Here you will define your flags and configuration settings.
|
||||||
|
|
||||||
@@ -52,72 +58,168 @@ func init() {
|
|||||||
// statsCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
|
// statsCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type AggregateStats struct {
|
||||||
|
Active int `json:"active"`
|
||||||
|
Pending int `json:"pending"`
|
||||||
|
Scheduled int `json:"scheduled"`
|
||||||
|
Retry int `json:"retry"`
|
||||||
|
Archived int `json:"archived"`
|
||||||
|
Completed int `json:"completed"`
|
||||||
|
Processed int `json:"processed"`
|
||||||
|
Failed int `json:"failed"`
|
||||||
|
Timestamp time.Time `json:"timestamp"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type FullStats struct {
|
||||||
|
Aggregate AggregateStats `json:"aggregate"`
|
||||||
|
QueueStats []*rdb.Stats `json:"queues"`
|
||||||
|
RedisInfo map[string]string `json:"redis"`
|
||||||
|
}
|
||||||
|
|
||||||
func stats(cmd *cobra.Command, args []string) {
|
func stats(cmd *cobra.Command, args []string) {
|
||||||
c := redis.NewClient(&redis.Options{
|
r := createRDB()
|
||||||
Addr: viper.GetString("uri"),
|
|
||||||
DB: viper.GetInt("db"),
|
queues, err := r.AllQueues()
|
||||||
Password: viper.GetString("password"),
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
var aggStats AggregateStats
|
||||||
|
var stats []*rdb.Stats
|
||||||
|
for _, qname := range queues {
|
||||||
|
s, err := r.CurrentStats(qname)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
aggStats.Active += s.Active
|
||||||
|
aggStats.Pending += s.Pending
|
||||||
|
aggStats.Scheduled += s.Scheduled
|
||||||
|
aggStats.Retry += s.Retry
|
||||||
|
aggStats.Archived += s.Archived
|
||||||
|
aggStats.Completed += s.Completed
|
||||||
|
aggStats.Processed += s.Processed
|
||||||
|
aggStats.Failed += s.Failed
|
||||||
|
aggStats.Timestamp = s.Timestamp
|
||||||
|
stats = append(stats, s)
|
||||||
|
}
|
||||||
|
var info map[string]string
|
||||||
|
if useRedisCluster {
|
||||||
|
info, err = r.RedisClusterInfo()
|
||||||
|
} else {
|
||||||
|
info, err = r.RedisInfo()
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
if jsonFlag {
|
||||||
|
statsJSON, err := json.Marshal(FullStats{
|
||||||
|
Aggregate: aggStats,
|
||||||
|
QueueStats: stats,
|
||||||
|
RedisInfo: info,
|
||||||
})
|
})
|
||||||
r := rdb.NewRDB(c)
|
|
||||||
|
|
||||||
stats, err := r.CurrentStats()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println(err)
|
fmt.Println(err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
info, err := r.RedisInfo()
|
|
||||||
if err != nil {
|
fmt.Println(string(statsJSON))
|
||||||
fmt.Println(err)
|
return
|
||||||
os.Exit(1)
|
|
||||||
}
|
}
|
||||||
fmt.Println("STATES")
|
|
||||||
printStates(stats)
|
bold := color.New(color.Bold)
|
||||||
|
bold.Println("Task Count by State")
|
||||||
|
printStatsByState(&aggStats)
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
|
|
||||||
fmt.Println("QUEUES")
|
bold.Println("Task Count by Queue")
|
||||||
printQueues(stats.Queues)
|
printStatsByQueue(stats)
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
|
|
||||||
fmt.Printf("STATS FOR %s UTC\n", stats.Timestamp.UTC().Format("2006-01-02"))
|
bold.Printf("Daily Stats %s UTC\n", aggStats.Timestamp.UTC().Format("2006-01-02"))
|
||||||
printStats(stats)
|
printSuccessFailureStats(&aggStats)
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
|
|
||||||
fmt.Println("REDIS INFO")
|
if useRedisCluster {
|
||||||
|
bold.Println("Redis Cluster Info")
|
||||||
|
printClusterInfo(info)
|
||||||
|
} else {
|
||||||
|
bold.Println("Redis Info")
|
||||||
printInfo(info)
|
printInfo(info)
|
||||||
|
}
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
}
|
}
|
||||||
|
|
||||||
func printStates(s *rdb.Stats) {
|
func printStatsByState(s *AggregateStats) {
|
||||||
format := strings.Repeat("%v\t", 5) + "\n"
|
format := strings.Repeat("%v\t", 6) + "\n"
|
||||||
tw := new(tabwriter.Writer).Init(os.Stdout, 0, 8, 2, ' ', 0)
|
tw := new(tabwriter.Writer).Init(os.Stdout, 0, 8, 2, ' ', 0)
|
||||||
fmt.Fprintf(tw, format, "InProgress", "Enqueued", "Scheduled", "Retry", "Dead")
|
fmt.Fprintf(tw, format, "active", "pending", "scheduled", "retry", "archived", "completed")
|
||||||
fmt.Fprintf(tw, format, "----------", "--------", "---------", "-----", "----")
|
width := maxInt(9 /* defaultWidth */, maxWidthOf(s.Active, s.Pending, s.Scheduled, s.Retry, s.Archived, s.Completed)) // length of widest column
|
||||||
fmt.Fprintf(tw, format, s.InProgress, s.Enqueued, s.Scheduled, s.Retry, s.Dead)
|
sep := strings.Repeat("-", width)
|
||||||
|
fmt.Fprintf(tw, format, sep, sep, sep, sep, sep, sep)
|
||||||
|
fmt.Fprintf(tw, format, s.Active, s.Pending, s.Scheduled, s.Retry, s.Archived, s.Completed)
|
||||||
tw.Flush()
|
tw.Flush()
|
||||||
}
|
}
|
||||||
|
|
||||||
func printQueues(queues map[string]int) {
|
// numDigits returns the number of digits in n.
|
||||||
var qnames, seps, counts []string
|
func numDigits(n int) int {
|
||||||
for q := range queues {
|
return len(strconv.Itoa(n))
|
||||||
qnames = append(qnames, strings.Title(q))
|
|
||||||
}
|
}
|
||||||
sort.Strings(qnames) // sort for stable order
|
|
||||||
for _, q := range qnames {
|
// maxWidthOf returns the max number of digits amount the provided vals.
|
||||||
seps = append(seps, strings.Repeat("-", len(q)))
|
func maxWidthOf(vals ...int) int {
|
||||||
counts = append(counts, strconv.Itoa(queues[strings.ToLower(q)]))
|
max := 0
|
||||||
|
for _, v := range vals {
|
||||||
|
if vw := numDigits(v); vw > max {
|
||||||
|
max = vw
|
||||||
}
|
}
|
||||||
format := strings.Repeat("%v\t", len(qnames)) + "\n"
|
}
|
||||||
|
return max
|
||||||
|
}
|
||||||
|
|
||||||
|
func maxInt(a, b int) int {
|
||||||
|
return int(math.Max(float64(a), float64(b)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func printStatsByQueue(stats []*rdb.Stats) {
|
||||||
|
var headers, seps, counts []string
|
||||||
|
maxHeaderWidth := 0
|
||||||
|
for _, s := range stats {
|
||||||
|
title := queueTitle(s)
|
||||||
|
headers = append(headers, title)
|
||||||
|
if w := utf8.RuneCountInString(title); w > maxHeaderWidth {
|
||||||
|
maxHeaderWidth = w
|
||||||
|
}
|
||||||
|
counts = append(counts, strconv.Itoa(s.Size))
|
||||||
|
}
|
||||||
|
for i := 0; i < len(headers); i++ {
|
||||||
|
seps = append(seps, strings.Repeat("-", maxHeaderWidth))
|
||||||
|
}
|
||||||
|
format := strings.Repeat("%v\t", len(headers)) + "\n"
|
||||||
tw := new(tabwriter.Writer).Init(os.Stdout, 0, 8, 2, ' ', 0)
|
tw := new(tabwriter.Writer).Init(os.Stdout, 0, 8, 2, ' ', 0)
|
||||||
fmt.Fprintf(tw, format, toInterfaceSlice(qnames)...)
|
fmt.Fprintf(tw, format, toInterfaceSlice(headers)...)
|
||||||
fmt.Fprintf(tw, format, toInterfaceSlice(seps)...)
|
fmt.Fprintf(tw, format, toInterfaceSlice(seps)...)
|
||||||
fmt.Fprintf(tw, format, toInterfaceSlice(counts)...)
|
fmt.Fprintf(tw, format, toInterfaceSlice(counts)...)
|
||||||
tw.Flush()
|
tw.Flush()
|
||||||
}
|
}
|
||||||
|
|
||||||
func printStats(s *rdb.Stats) {
|
func queueTitle(s *rdb.Stats) string {
|
||||||
|
var b strings.Builder
|
||||||
|
b.WriteString(s.Queue)
|
||||||
|
if s.Paused {
|
||||||
|
b.WriteString(" (paused)")
|
||||||
|
}
|
||||||
|
return b.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func printSuccessFailureStats(s *AggregateStats) {
|
||||||
format := strings.Repeat("%v\t", 3) + "\n"
|
format := strings.Repeat("%v\t", 3) + "\n"
|
||||||
tw := new(tabwriter.Writer).Init(os.Stdout, 0, 8, 2, ' ', 0)
|
tw := new(tabwriter.Writer).Init(os.Stdout, 0, 8, 2, ' ', 0)
|
||||||
fmt.Fprintf(tw, format, "Processed", "Failed", "Error Rate")
|
fmt.Fprintf(tw, format, "processed", "failed", "error rate")
|
||||||
fmt.Fprintf(tw, format, "---------", "------", "----------")
|
fmt.Fprintf(tw, format, "---------", "------", "----------")
|
||||||
var errrate string
|
var errrate string
|
||||||
if s.Processed == 0 {
|
if s.Processed == 0 {
|
||||||
@@ -132,7 +234,7 @@ func printStats(s *rdb.Stats) {
|
|||||||
func printInfo(info map[string]string) {
|
func printInfo(info map[string]string) {
|
||||||
format := strings.Repeat("%v\t", 5) + "\n"
|
format := strings.Repeat("%v\t", 5) + "\n"
|
||||||
tw := new(tabwriter.Writer).Init(os.Stdout, 0, 8, 2, ' ', 0)
|
tw := new(tabwriter.Writer).Init(os.Stdout, 0, 8, 2, ' ', 0)
|
||||||
fmt.Fprintf(tw, format, "Version", "Uptime", "Connections", "Memory Usage", "Peak Memory Usage")
|
fmt.Fprintf(tw, format, "version", "uptime", "connections", "memory usage", "peak memory usage")
|
||||||
fmt.Fprintf(tw, format, "-------", "------", "-----------", "------------", "-----------------")
|
fmt.Fprintf(tw, format, "-------", "------", "-----------", "------------", "-----------------")
|
||||||
fmt.Fprintf(tw, format,
|
fmt.Fprintf(tw, format,
|
||||||
info["redis_version"],
|
info["redis_version"],
|
||||||
@@ -144,6 +246,19 @@ func printInfo(info map[string]string) {
|
|||||||
tw.Flush()
|
tw.Flush()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func printClusterInfo(info map[string]string) {
|
||||||
|
printTable(
|
||||||
|
[]string{"State", "Known Nodes", "Cluster Size"},
|
||||||
|
func(w io.Writer, tmpl string) {
|
||||||
|
fmt.Fprintf(w, tmpl,
|
||||||
|
strings.ToUpper(info["cluster_state"]),
|
||||||
|
info["cluster_known_nodes"],
|
||||||
|
info["cluster_size"],
|
||||||
|
)
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
func toInterfaceSlice(strs []string) []interface{} {
|
func toInterfaceSlice(strs []string) []interface{} {
|
||||||
var res []interface{}
|
var res []interface{}
|
||||||
for _, s := range strs {
|
for _, s := range strs {
|
||||||
|
|||||||
566
tools/asynq/cmd/task.go
Normal file
566
tools/asynq/cmd/task.go
Normal file
@@ -0,0 +1,566 @@
|
|||||||
|
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license
|
||||||
|
// that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package cmd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/fatih/color"
|
||||||
|
"github.com/hibiken/asynq"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
rootCmd.AddCommand(taskCmd)
|
||||||
|
taskCmd.AddCommand(taskListCmd)
|
||||||
|
taskListCmd.Flags().StringP("queue", "q", "", "queue to inspect")
|
||||||
|
taskListCmd.Flags().StringP("state", "s", "", "state of the tasks to inspect")
|
||||||
|
taskListCmd.Flags().Int("page", 1, "page number")
|
||||||
|
taskListCmd.Flags().Int("size", 30, "page size")
|
||||||
|
taskListCmd.MarkFlagRequired("queue")
|
||||||
|
taskListCmd.MarkFlagRequired("state")
|
||||||
|
|
||||||
|
taskCmd.AddCommand(taskCancelCmd)
|
||||||
|
|
||||||
|
taskCmd.AddCommand(taskInspectCmd)
|
||||||
|
taskInspectCmd.Flags().StringP("queue", "q", "", "queue to which the task belongs")
|
||||||
|
taskInspectCmd.Flags().StringP("id", "i", "", "id of the task")
|
||||||
|
taskInspectCmd.MarkFlagRequired("queue")
|
||||||
|
taskInspectCmd.MarkFlagRequired("id")
|
||||||
|
|
||||||
|
taskCmd.AddCommand(taskArchiveCmd)
|
||||||
|
taskArchiveCmd.Flags().StringP("queue", "q", "", "queue to which the task belongs")
|
||||||
|
taskArchiveCmd.Flags().StringP("id", "i", "", "id of the task")
|
||||||
|
taskArchiveCmd.MarkFlagRequired("queue")
|
||||||
|
taskArchiveCmd.MarkFlagRequired("id")
|
||||||
|
|
||||||
|
taskCmd.AddCommand(taskDeleteCmd)
|
||||||
|
taskDeleteCmd.Flags().StringP("queue", "q", "", "queue to which the task belongs")
|
||||||
|
taskDeleteCmd.Flags().StringP("id", "i", "", "id of the task")
|
||||||
|
taskDeleteCmd.MarkFlagRequired("queue")
|
||||||
|
taskDeleteCmd.MarkFlagRequired("id")
|
||||||
|
|
||||||
|
taskCmd.AddCommand(taskRunCmd)
|
||||||
|
taskRunCmd.Flags().StringP("queue", "q", "", "queue to which the task belongs")
|
||||||
|
taskRunCmd.Flags().StringP("id", "i", "", "id of the task")
|
||||||
|
taskRunCmd.MarkFlagRequired("queue")
|
||||||
|
taskRunCmd.MarkFlagRequired("id")
|
||||||
|
|
||||||
|
taskCmd.AddCommand(taskArchiveAllCmd)
|
||||||
|
taskArchiveAllCmd.Flags().StringP("queue", "q", "", "queue to which the tasks belong")
|
||||||
|
taskArchiveAllCmd.Flags().StringP("state", "s", "", "state of the tasks")
|
||||||
|
taskArchiveAllCmd.MarkFlagRequired("queue")
|
||||||
|
taskArchiveAllCmd.MarkFlagRequired("state")
|
||||||
|
|
||||||
|
taskCmd.AddCommand(taskDeleteAllCmd)
|
||||||
|
taskDeleteAllCmd.Flags().StringP("queue", "q", "", "queue to which the tasks belong")
|
||||||
|
taskDeleteAllCmd.Flags().StringP("state", "s", "", "state of the tasks")
|
||||||
|
taskDeleteAllCmd.MarkFlagRequired("queue")
|
||||||
|
taskDeleteAllCmd.MarkFlagRequired("state")
|
||||||
|
|
||||||
|
taskCmd.AddCommand(taskRunAllCmd)
|
||||||
|
taskRunAllCmd.Flags().StringP("queue", "q", "", "queue to which the tasks belong")
|
||||||
|
taskRunAllCmd.Flags().StringP("state", "s", "", "state of the tasks")
|
||||||
|
taskRunAllCmd.MarkFlagRequired("queue")
|
||||||
|
taskRunAllCmd.MarkFlagRequired("state")
|
||||||
|
}
|
||||||
|
|
||||||
|
var taskCmd = &cobra.Command{
|
||||||
|
Use: "task",
|
||||||
|
Short: "Manage tasks",
|
||||||
|
}
|
||||||
|
|
||||||
|
var taskListCmd = &cobra.Command{
|
||||||
|
Use: "ls --queue=QUEUE --state=STATE",
|
||||||
|
Short: "List tasks",
|
||||||
|
Long: `List tasks of the given state from the specified queue.
|
||||||
|
|
||||||
|
The value for the state flag should be one of:
|
||||||
|
- active
|
||||||
|
- pending
|
||||||
|
- scheduled
|
||||||
|
- retry
|
||||||
|
- archived
|
||||||
|
- completed
|
||||||
|
|
||||||
|
List opeartion paginates the result set.
|
||||||
|
By default, the command fetches the first 30 tasks.
|
||||||
|
Use --page and --size flags to specify the page number and size.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
To list pending tasks from "default" queue, run
|
||||||
|
asynq task ls --queue=default --state=pending
|
||||||
|
|
||||||
|
To list the tasks from the second page, run
|
||||||
|
asynq task ls --queue=default --state=pending --page=1`,
|
||||||
|
Run: taskList,
|
||||||
|
}
|
||||||
|
|
||||||
|
var taskInspectCmd = &cobra.Command{
|
||||||
|
Use: "inspect --queue=QUEUE --id=TASK_ID",
|
||||||
|
Short: "Display detailed information on the specified task",
|
||||||
|
Args: cobra.NoArgs,
|
||||||
|
Run: taskInspect,
|
||||||
|
}
|
||||||
|
|
||||||
|
var taskCancelCmd = &cobra.Command{
|
||||||
|
Use: "cancel TASK_ID [TASK_ID...]",
|
||||||
|
Short: "Cancel one or more active tasks",
|
||||||
|
Args: cobra.MinimumNArgs(1),
|
||||||
|
Run: taskCancel,
|
||||||
|
}
|
||||||
|
|
||||||
|
var taskArchiveCmd = &cobra.Command{
|
||||||
|
Use: "archive --queue=QUEUE --id=TASK_ID",
|
||||||
|
Short: "Archive a task with the given id",
|
||||||
|
Args: cobra.NoArgs,
|
||||||
|
Run: taskArchive,
|
||||||
|
}
|
||||||
|
|
||||||
|
var taskDeleteCmd = &cobra.Command{
|
||||||
|
Use: "delete --queue=QUEUE --id=TASK_ID",
|
||||||
|
Short: "Delete a task with the given id",
|
||||||
|
Args: cobra.NoArgs,
|
||||||
|
Run: taskDelete,
|
||||||
|
}
|
||||||
|
|
||||||
|
var taskRunCmd = &cobra.Command{
|
||||||
|
Use: "run --queue=QUEUE --id=TASK_ID",
|
||||||
|
Short: "Run a task with the given id",
|
||||||
|
Args: cobra.NoArgs,
|
||||||
|
Run: taskRun,
|
||||||
|
}
|
||||||
|
|
||||||
|
var taskArchiveAllCmd = &cobra.Command{
|
||||||
|
Use: "archiveall --queue=QUEUE --state=STATE",
|
||||||
|
Short: "Archive all tasks in the given state",
|
||||||
|
Args: cobra.NoArgs,
|
||||||
|
Run: taskArchiveAll,
|
||||||
|
}
|
||||||
|
|
||||||
|
var taskDeleteAllCmd = &cobra.Command{
|
||||||
|
Use: "deleteall --queue=QUEUE --state=STATE",
|
||||||
|
Short: "Delete all tasks in the given state",
|
||||||
|
Args: cobra.NoArgs,
|
||||||
|
Run: taskDeleteAll,
|
||||||
|
}
|
||||||
|
|
||||||
|
var taskRunAllCmd = &cobra.Command{
|
||||||
|
Use: "runall --queue=QUEUE --state=STATE",
|
||||||
|
Short: "Run all tasks in the given state",
|
||||||
|
Args: cobra.NoArgs,
|
||||||
|
Run: taskRunAll,
|
||||||
|
}
|
||||||
|
|
||||||
|
func taskList(cmd *cobra.Command, args []string) {
|
||||||
|
qname, err := cmd.Flags().GetString("queue")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
state, err := cmd.Flags().GetString("state")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
pageNum, err := cmd.Flags().GetInt("page")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
pageSize, err := cmd.Flags().GetInt("size")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch state {
|
||||||
|
case "active":
|
||||||
|
listActiveTasks(qname, pageNum, pageSize)
|
||||||
|
case "pending":
|
||||||
|
listPendingTasks(qname, pageNum, pageSize)
|
||||||
|
case "scheduled":
|
||||||
|
listScheduledTasks(qname, pageNum, pageSize)
|
||||||
|
case "retry":
|
||||||
|
listRetryTasks(qname, pageNum, pageSize)
|
||||||
|
case "archived":
|
||||||
|
listArchivedTasks(qname, pageNum, pageSize)
|
||||||
|
case "completed":
|
||||||
|
listCompletedTasks(qname, pageNum, pageSize)
|
||||||
|
default:
|
||||||
|
fmt.Printf("error: state=%q is not supported\n", state)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func listActiveTasks(qname string, pageNum, pageSize int) {
|
||||||
|
i := createInspector()
|
||||||
|
tasks, err := i.ListActiveTasks(qname, asynq.PageSize(pageSize), asynq.Page(pageNum))
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
if len(tasks) == 0 {
|
||||||
|
fmt.Printf("No active tasks in %q queue\n", qname)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
printTable(
|
||||||
|
[]string{"ID", "Type", "Payload"},
|
||||||
|
func(w io.Writer, tmpl string) {
|
||||||
|
for _, t := range tasks {
|
||||||
|
fmt.Fprintf(w, tmpl, t.ID, t.Type, sprintBytes(t.Payload))
|
||||||
|
}
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func listPendingTasks(qname string, pageNum, pageSize int) {
|
||||||
|
i := createInspector()
|
||||||
|
tasks, err := i.ListPendingTasks(qname, asynq.PageSize(pageSize), asynq.Page(pageNum))
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
if len(tasks) == 0 {
|
||||||
|
fmt.Printf("No pending tasks in %q queue\n", qname)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
printTable(
|
||||||
|
[]string{"ID", "Type", "Payload"},
|
||||||
|
func(w io.Writer, tmpl string) {
|
||||||
|
for _, t := range tasks {
|
||||||
|
fmt.Fprintf(w, tmpl, t.ID, t.Type, sprintBytes(t.Payload))
|
||||||
|
}
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func listScheduledTasks(qname string, pageNum, pageSize int) {
|
||||||
|
i := createInspector()
|
||||||
|
tasks, err := i.ListScheduledTasks(qname, asynq.PageSize(pageSize), asynq.Page(pageNum))
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
if len(tasks) == 0 {
|
||||||
|
fmt.Printf("No scheduled tasks in %q queue\n", qname)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
printTable(
|
||||||
|
[]string{"ID", "Type", "Payload", "Process In"},
|
||||||
|
func(w io.Writer, tmpl string) {
|
||||||
|
for _, t := range tasks {
|
||||||
|
fmt.Fprintf(w, tmpl, t.ID, t.Type, sprintBytes(t.Payload), formatProcessAt(t.NextProcessAt))
|
||||||
|
}
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// formatProcessAt formats next process at time to human friendly string.
|
||||||
|
// If processAt time is in the past, returns "right now".
|
||||||
|
// If processAt time is in the future, returns "in xxx" where xxx is the duration from now.
|
||||||
|
func formatProcessAt(processAt time.Time) string {
|
||||||
|
d := processAt.Sub(time.Now())
|
||||||
|
if d < 0 {
|
||||||
|
return "right now"
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("in %v", d.Round(time.Second))
|
||||||
|
}
|
||||||
|
|
||||||
|
func listRetryTasks(qname string, pageNum, pageSize int) {
|
||||||
|
i := createInspector()
|
||||||
|
tasks, err := i.ListRetryTasks(qname, asynq.PageSize(pageSize), asynq.Page(pageNum))
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
if len(tasks) == 0 {
|
||||||
|
fmt.Printf("No retry tasks in %q queue\n", qname)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
printTable(
|
||||||
|
[]string{"ID", "Type", "Payload", "Next Retry", "Last Error", "Last Failed", "Retried", "Max Retry"},
|
||||||
|
func(w io.Writer, tmpl string) {
|
||||||
|
for _, t := range tasks {
|
||||||
|
fmt.Fprintf(w, tmpl, t.ID, t.Type, sprintBytes(t.Payload), formatProcessAt(t.NextProcessAt),
|
||||||
|
t.LastErr, formatPastTime(t.LastFailedAt), t.Retried, t.MaxRetry)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func listArchivedTasks(qname string, pageNum, pageSize int) {
|
||||||
|
i := createInspector()
|
||||||
|
tasks, err := i.ListArchivedTasks(qname, asynq.PageSize(pageSize), asynq.Page(pageNum))
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
if len(tasks) == 0 {
|
||||||
|
fmt.Printf("No archived tasks in %q queue\n", qname)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
printTable(
|
||||||
|
[]string{"ID", "Type", "Payload", "Last Failed", "Last Error"},
|
||||||
|
func(w io.Writer, tmpl string) {
|
||||||
|
for _, t := range tasks {
|
||||||
|
fmt.Fprintf(w, tmpl, t.ID, t.Type, sprintBytes(t.Payload), formatPastTime(t.LastFailedAt), t.LastErr)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func listCompletedTasks(qname string, pageNum, pageSize int) {
|
||||||
|
i := createInspector()
|
||||||
|
tasks, err := i.ListCompletedTasks(qname, asynq.PageSize(pageSize), asynq.Page(pageNum))
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
if len(tasks) == 0 {
|
||||||
|
fmt.Printf("No completed tasks in %q queue\n", qname)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
printTable(
|
||||||
|
[]string{"ID", "Type", "Payload", "CompletedAt", "Result"},
|
||||||
|
func(w io.Writer, tmpl string) {
|
||||||
|
for _, t := range tasks {
|
||||||
|
fmt.Fprintf(w, tmpl, t.ID, t.Type, sprintBytes(t.Payload), formatPastTime(t.CompletedAt), sprintBytes(t.Result))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func taskCancel(cmd *cobra.Command, args []string) {
|
||||||
|
i := createInspector()
|
||||||
|
for _, id := range args {
|
||||||
|
if err := i.CancelProcessing(id); err != nil {
|
||||||
|
fmt.Printf("error: could not send cancelation signal: %v\n", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fmt.Printf("Sent cancelation signal for task %s\n", id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func taskInspect(cmd *cobra.Command, args []string) {
|
||||||
|
qname, err := cmd.Flags().GetString("queue")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
id, err := cmd.Flags().GetString("id")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
i := createInspector()
|
||||||
|
info, err := i.GetTaskInfo(qname, id)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
printTaskInfo(info)
|
||||||
|
}
|
||||||
|
|
||||||
|
func printTaskInfo(info *asynq.TaskInfo) {
|
||||||
|
bold := color.New(color.Bold)
|
||||||
|
bold.Println("Task Info")
|
||||||
|
fmt.Printf("Queue: %s\n", info.Queue)
|
||||||
|
fmt.Printf("ID: %s\n", info.ID)
|
||||||
|
fmt.Printf("Type: %s\n", info.Type)
|
||||||
|
fmt.Printf("State: %v\n", info.State)
|
||||||
|
fmt.Printf("Retried: %d/%d\n", info.Retried, info.MaxRetry)
|
||||||
|
fmt.Println()
|
||||||
|
fmt.Printf("Next process time: %s\n", formatNextProcessAt(info.NextProcessAt))
|
||||||
|
if len(info.LastErr) != 0 {
|
||||||
|
fmt.Println()
|
||||||
|
bold.Println("Last Failure")
|
||||||
|
fmt.Printf("Failed at: %s\n", formatPastTime(info.LastFailedAt))
|
||||||
|
fmt.Printf("Error message: %s\n", info.LastErr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func formatNextProcessAt(processAt time.Time) string {
|
||||||
|
if processAt.IsZero() || processAt.Unix() == 0 {
|
||||||
|
return "n/a"
|
||||||
|
}
|
||||||
|
if processAt.Before(time.Now()) {
|
||||||
|
return "now"
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%s (in %v)", processAt.Format(time.UnixDate), processAt.Sub(time.Now()).Round(time.Second))
|
||||||
|
}
|
||||||
|
|
||||||
|
// formatPastTime takes t which is time in the past and returns a user-friendly string.
|
||||||
|
func formatPastTime(t time.Time) string {
|
||||||
|
if t.IsZero() || t.Unix() == 0 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return t.Format(time.UnixDate)
|
||||||
|
}
|
||||||
|
|
||||||
|
func taskArchive(cmd *cobra.Command, args []string) {
|
||||||
|
qname, err := cmd.Flags().GetString("queue")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
id, err := cmd.Flags().GetString("id")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
i := createInspector()
|
||||||
|
err = i.ArchiveTask(qname, id)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
fmt.Println("task archived")
|
||||||
|
}
|
||||||
|
|
||||||
|
func taskDelete(cmd *cobra.Command, args []string) {
|
||||||
|
qname, err := cmd.Flags().GetString("queue")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
id, err := cmd.Flags().GetString("id")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
i := createInspector()
|
||||||
|
err = i.DeleteTask(qname, id)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
fmt.Println("task deleted")
|
||||||
|
}
|
||||||
|
|
||||||
|
func taskRun(cmd *cobra.Command, args []string) {
|
||||||
|
qname, err := cmd.Flags().GetString("queue")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
id, err := cmd.Flags().GetString("id")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
i := createInspector()
|
||||||
|
err = i.RunTask(qname, id)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
fmt.Println("task is now pending")
|
||||||
|
}
|
||||||
|
|
||||||
|
func taskArchiveAll(cmd *cobra.Command, args []string) {
|
||||||
|
qname, err := cmd.Flags().GetString("queue")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
state, err := cmd.Flags().GetString("state")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
i := createInspector()
|
||||||
|
var n int
|
||||||
|
switch state {
|
||||||
|
case "pending":
|
||||||
|
n, err = i.ArchiveAllPendingTasks(qname)
|
||||||
|
case "scheduled":
|
||||||
|
n, err = i.ArchiveAllScheduledTasks(qname)
|
||||||
|
case "retry":
|
||||||
|
n, err = i.ArchiveAllRetryTasks(qname)
|
||||||
|
default:
|
||||||
|
fmt.Printf("error: unsupported state %q\n", state)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
fmt.Printf("%d tasks archived\n", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
func taskDeleteAll(cmd *cobra.Command, args []string) {
|
||||||
|
qname, err := cmd.Flags().GetString("queue")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
state, err := cmd.Flags().GetString("state")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
i := createInspector()
|
||||||
|
var n int
|
||||||
|
switch state {
|
||||||
|
case "pending":
|
||||||
|
n, err = i.DeleteAllPendingTasks(qname)
|
||||||
|
case "scheduled":
|
||||||
|
n, err = i.DeleteAllScheduledTasks(qname)
|
||||||
|
case "retry":
|
||||||
|
n, err = i.DeleteAllRetryTasks(qname)
|
||||||
|
case "archived":
|
||||||
|
n, err = i.DeleteAllArchivedTasks(qname)
|
||||||
|
case "completed":
|
||||||
|
n, err = i.DeleteAllCompletedTasks(qname)
|
||||||
|
default:
|
||||||
|
fmt.Printf("error: unsupported state %q\n", state)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
fmt.Printf("%d tasks deleted\n", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
func taskRunAll(cmd *cobra.Command, args []string) {
|
||||||
|
qname, err := cmd.Flags().GetString("queue")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
state, err := cmd.Flags().GetString("state")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
i := createInspector()
|
||||||
|
var n int
|
||||||
|
switch state {
|
||||||
|
case "scheduled":
|
||||||
|
n, err = i.RunAllScheduledTasks(qname)
|
||||||
|
case "retry":
|
||||||
|
n, err = i.RunAllRetryTasks(qname)
|
||||||
|
case "archived":
|
||||||
|
n, err = i.RunAllArchivedTasks(qname)
|
||||||
|
default:
|
||||||
|
fmt.Printf("error: unsupported state %q\n", state)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
fmt.Printf("%d tasks are now pending\n", n)
|
||||||
|
}
|
||||||
@@ -1,75 +0,0 @@
|
|||||||
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
|
||||||
// Use of this source code is governed by a MIT license
|
|
||||||
// that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package cmd
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"sort"
|
|
||||||
|
|
||||||
"github.com/go-redis/redis/v7"
|
|
||||||
"github.com/hibiken/asynq/internal/rdb"
|
|
||||||
"github.com/spf13/cobra"
|
|
||||||
"github.com/spf13/viper"
|
|
||||||
)
|
|
||||||
|
|
||||||
// workersCmd represents the workers command
|
|
||||||
var workersCmd = &cobra.Command{
|
|
||||||
Use: "workers",
|
|
||||||
Short: "Shows all running workers information",
|
|
||||||
Long: `Workers (asynq workers) will show all running workers information.
|
|
||||||
|
|
||||||
The command shows the following for each worker:
|
|
||||||
* Process in which the worker is running
|
|
||||||
* ID of the task worker is processing
|
|
||||||
* Type of the task worker is processing
|
|
||||||
* Payload of the task worker is processing
|
|
||||||
* Queue that the task was pulled from.
|
|
||||||
* Time the worker started processing the task`,
|
|
||||||
Args: cobra.NoArgs,
|
|
||||||
Run: workers,
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
rootCmd.AddCommand(workersCmd)
|
|
||||||
}
|
|
||||||
|
|
||||||
func workers(cmd *cobra.Command, args []string) {
|
|
||||||
r := rdb.NewRDB(redis.NewClient(&redis.Options{
|
|
||||||
Addr: viper.GetString("uri"),
|
|
||||||
DB: viper.GetInt("db"),
|
|
||||||
Password: viper.GetString("password"),
|
|
||||||
}))
|
|
||||||
|
|
||||||
workers, err := r.ListWorkers()
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(workers) == 0 {
|
|
||||||
fmt.Println("No workers")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// sort by started timestamp or ID.
|
|
||||||
sort.Slice(workers, func(i, j int) bool {
|
|
||||||
x, y := workers[i], workers[j]
|
|
||||||
if x.Started != y.Started {
|
|
||||||
return x.Started.Before(y.Started)
|
|
||||||
}
|
|
||||||
return x.ID.String() < y.ID.String()
|
|
||||||
})
|
|
||||||
|
|
||||||
cols := []string{"Process", "ID", "Type", "Payload", "Queue", "Started"}
|
|
||||||
printRows := func(w io.Writer, tmpl string) {
|
|
||||||
for _, wk := range workers {
|
|
||||||
fmt.Fprintf(w, tmpl,
|
|
||||||
fmt.Sprintf("%s:%d", wk.Host, wk.PID), wk.ID, wk.Type, wk.Payload, wk.Queue, timeAgo(wk.Started))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
printTable(cols, printRows)
|
|
||||||
}
|
|
||||||
15
tools/go.mod
15
tools/go.mod
@@ -3,12 +3,13 @@ module github.com/hibiken/asynq/tools
|
|||||||
go 1.13
|
go 1.13
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/go-redis/redis/v7 v7.2.0
|
github.com/fatih/color v1.9.0
|
||||||
github.com/hibiken/asynq v0.4.0
|
github.com/go-redis/redis/v8 v8.11.4
|
||||||
|
github.com/hibiken/asynq v0.21.0
|
||||||
|
github.com/hibiken/asynq/x v0.0.0-20220131170841-349f4c50fb1d
|
||||||
github.com/mitchellh/go-homedir v1.1.0
|
github.com/mitchellh/go-homedir v1.1.0
|
||||||
github.com/rs/xid v1.2.1
|
github.com/prometheus/client_golang v1.11.0
|
||||||
github.com/spf13/cobra v0.0.5
|
github.com/spf13/afero v1.1.2 // indirect
|
||||||
github.com/spf13/viper v1.6.2
|
github.com/spf13/cobra v1.1.1
|
||||||
|
github.com/spf13/viper v1.7.0
|
||||||
)
|
)
|
||||||
|
|
||||||
replace github.com/hibiken/asynq => ./..
|
|
||||||
|
|||||||
391
tools/go.sum
391
tools/go.sum
@@ -1,195 +1,486 @@
|
|||||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||||
|
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||||
|
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
||||||
|
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
|
||||||
|
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
|
||||||
|
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
|
||||||
|
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
|
||||||
|
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||||
|
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||||
|
cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
|
||||||
|
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
||||||
|
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
|
||||||
|
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||||
|
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||||
|
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||||
|
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||||
|
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
|
||||||
|
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
|
||||||
|
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
|
||||||
|
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||||
|
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||||
|
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||||
|
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||||
|
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
|
||||||
|
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||||
|
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||||
|
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
|
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
|
||||||
|
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||||
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||||
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
|
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
|
||||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||||
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||||
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||||
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||||
|
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
|
||||||
|
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
|
||||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||||
|
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||||
|
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||||
|
github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s=
|
||||||
|
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
|
||||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||||
|
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
|
||||||
|
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||||
|
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||||
|
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||||
|
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
|
||||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||||
github.com/go-redis/redis v6.15.7+incompatible h1:3skhDh95XQMpnqeqNftPkQD9jL9e5e36z/1SUm6dy1U=
|
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||||
github.com/go-redis/redis/v7 v7.0.0-beta.4/go.mod h1:xhhSbUMTsleRPur+Vgx9sUHtyN33bdjxY+9/0n9Ig8s=
|
github.com/go-redis/redis/v8 v8.11.2/go.mod h1:DLomh7y2e3ggQXQLd1YgmvIfecPJoFl7WU5SOQ/r06M=
|
||||||
github.com/go-redis/redis/v7 v7.1.0 h1:I4C4a8UGbFejiVjtYVTRVOiMIJ5pm5Yru6ibvDX/OS0=
|
github.com/go-redis/redis/v8 v8.11.4 h1:kHoYkfZP6+pe04aFTnhDH6GDROa5yJdHJVNxV3F46Tg=
|
||||||
github.com/go-redis/redis/v7 v7.1.0/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRfnwdHj/Dcg=
|
github.com/go-redis/redis/v8 v8.11.4/go.mod h1:2Z2wHZXdQpCDXEGzqMockDpNyYvi2l4Pxt6RJr792+w=
|
||||||
github.com/go-redis/redis/v7 v7.2.0 h1:CrCexy/jYWZjW0AyVoHlcJUeZN19VWlbepTh1Vq6dJs=
|
|
||||||
github.com/go-redis/redis/v7 v7.2.0/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRfnwdHj/Dcg=
|
|
||||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||||
|
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
|
||||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||||
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||||
|
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||||
|
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
|
||||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||||
|
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||||
|
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||||
|
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||||
|
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||||
|
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||||
|
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
|
||||||
|
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||||
|
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||||
|
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
|
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
|
||||||
|
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||||
|
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||||
|
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||||
|
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||||
|
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||||
|
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
|
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||||
|
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
|
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||||
|
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||||
|
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
|
||||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||||
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||||
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||||
|
github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
|
||||||
|
github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
|
||||||
|
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||||
|
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||||
|
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||||
|
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
|
||||||
|
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
|
||||||
|
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
|
||||||
|
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
|
||||||
|
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
|
||||||
|
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||||
|
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||||
|
github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
|
||||||
|
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||||
|
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||||
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
|
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
|
||||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||||
github.com/hibiken/asynq v0.4.0 h1:NvAfYX0DRe04WgGMKRg5oX7bs6ktv2fu9YwB6O356FI=
|
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
|
||||||
github.com/hibiken/asynq v0.4.0/go.mod h1:dtrVkxCsGPVhVNHMDXAH7lFq64kbj43+G6lt4FQZfW4=
|
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
|
||||||
|
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
|
||||||
|
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
|
||||||
|
github.com/hibiken/asynq v0.19.0/go.mod h1:tyc63ojaW8SJ5SBm8mvI4DDONsguP5HE85EEl4Qr5Ig=
|
||||||
|
github.com/hibiken/asynq v0.21.0 h1:uH9XogJhjq/S39E0/DEPWLZQ6hHJ73UiblZTe4RzHwA=
|
||||||
|
github.com/hibiken/asynq v0.21.0/go.mod h1:tyc63ojaW8SJ5SBm8mvI4DDONsguP5HE85EEl4Qr5Ig=
|
||||||
|
github.com/hibiken/asynq/x v0.0.0-20220131170841-349f4c50fb1d h1:Er+U+9PmnyRHRDQjSjRQ24HoWvOY7w9Pk7bUPYM3Ags=
|
||||||
|
github.com/hibiken/asynq/x v0.0.0-20220131170841-349f4c50fb1d/go.mod h1:VmxwMfMKyb6gyv8xG0oOBMXIhquWKPx+zPtbVBd2Q1s=
|
||||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||||
|
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
|
||||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||||
|
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
|
||||||
|
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||||
|
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||||
|
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||||
|
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||||
|
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
|
||||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||||
|
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
||||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
|
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||||
|
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||||
|
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
|
||||||
github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=
|
github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=
|
||||||
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||||
|
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||||
|
github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA=
|
||||||
|
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||||
|
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||||
|
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||||
|
github.com/mattn/go-isatty v0.0.11 h1:FxPOTFNqGkuDUGi3H/qkUbQO4ZiBa2brKq5r0l8TGeM=
|
||||||
|
github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
|
||||||
|
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||||
|
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||||
|
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
|
||||||
|
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||||
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
||||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||||
|
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
|
||||||
|
github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
|
||||||
|
github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
|
||||||
|
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||||
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
|
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
|
||||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||||
|
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
|
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||||
|
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||||
|
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||||
|
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||||
|
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
|
||||||
|
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
|
||||||
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||||
github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||||
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
github.com/onsi/ginkgo v1.15.0/go.mod h1:hF8qUzuuC8DJGygJH3726JnCZX4MYbRB8yFfISqnKUg=
|
||||||
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc=
|
||||||
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
|
||||||
|
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||||
|
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||||
|
github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48=
|
||||||
|
github.com/onsi/gomega v1.16.0 h1:6gjqkI8iiRHMvdccRJM8rVKjCWk6ZIm6FTm3ddIe4/c=
|
||||||
|
github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
|
||||||
|
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||||
github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
|
github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
|
||||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||||
github.com/pelletier/go-toml v1.6.0 h1:aetoXYr0Tv7xRU/V4B4IZJ2QcbtMUFoNb3ORp7TzIK4=
|
|
||||||
github.com/pelletier/go-toml v1.6.0/go.mod h1:5N711Q9dKgbdkxHL+MEfF31hpT7l0S0s/t2kKREewys=
|
|
||||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
|
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
|
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
|
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
|
||||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||||
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
|
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
|
||||||
|
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||||
|
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||||
|
github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ=
|
||||||
|
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
|
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
|
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
|
||||||
|
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||||
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||||
|
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||||
|
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||||
|
github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ=
|
||||||
|
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||||
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||||
|
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||||
|
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||||
|
github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4=
|
||||||
|
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||||
|
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
|
||||||
|
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
|
||||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||||
github.com/rs/xid v1.2.1 h1:mhH9Nq+C1fY2l1XIpgxIiUOfNpRBYH1kKcr+qfKgjRc=
|
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||||
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
|
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||||
|
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||||
|
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||||
|
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||||
|
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||||
|
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
|
||||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||||
|
github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
|
||||||
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||||
github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI=
|
github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI=
|
||||||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||||
github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc=
|
|
||||||
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
|
|
||||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||||
github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng=
|
github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng=
|
||||||
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||||
github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s=
|
github.com/spf13/cobra v1.1.1 h1:KfztREH0tPxJJ+geloSLaAkaPkr4ki2Er5quFV1TDo4=
|
||||||
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
|
github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI=
|
||||||
github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=
|
github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=
|
||||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||||
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
|
|
||||||
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
|
|
||||||
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
|
|
||||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||||
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
|
github.com/spf13/viper v1.7.0 h1:xVKxvI7ouOI5I+U9s2eeiUfMaWBVoXA3AWskkrqK0VM=
|
||||||
github.com/spf13/viper v1.6.0/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k=
|
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
|
||||||
github.com/spf13/viper v1.6.2 h1:7aKfF+e8/k68gda3LOjo5RxiUqddoFxVq4BKBPrxk5E=
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
github.com/spf13/viper v1.6.2/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k=
|
|
||||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||||
|
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||||
|
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||||
|
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||||
|
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
|
||||||
|
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
|
github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
|
||||||
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
|
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
|
||||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||||
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
|
|
||||||
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
|
|
||||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||||
|
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||||
|
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||||
|
go.uber.org/goleak v0.10.0 h1:G3eWbSNIskeRqtsN/1uI5B+eP73y3JUuBsv9AZjehb4=
|
||||||
go.uber.org/goleak v0.10.0/go.mod h1:VCZuO8V8mFPlL0F5J5GK1rtHV3DrFcQ1R8ryq7FK0aI=
|
go.uber.org/goleak v0.10.0/go.mod h1:VCZuO8V8mFPlL0F5J5GK1rtHV3DrFcQ1R8ryq7FK0aI=
|
||||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||||
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
|
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
|
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
|
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
|
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
|
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
|
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
|
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||||
|
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
|
||||||
|
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
|
||||||
|
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||||
|
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||||
|
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||||
|
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||||
|
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||||
|
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||||
|
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||||
|
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
||||||
|
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
||||||
|
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||||
|
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
||||||
|
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
|
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
|
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
|
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||||
|
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||||
|
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||||
|
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||||
|
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||||
|
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781 h1:DzZ89McO9/gWPsQXS/FVKAlG02ZjaQ6AlZRBimEYOd0=
|
||||||
|
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
|
||||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e h1:9vRrk9YW2BTzLP0VCB9ZDjU4cPqkg+IDWL7XgxA1yxQ=
|
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40 h1:JWgyZ1qgdTaF3N3oxC+MdTV7qvEEgHo3otj+HB5CM7Q=
|
||||||
|
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||||
|
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
|
golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
|
||||||
|
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
|
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
|
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
|
||||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||||
|
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||||
|
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||||
|
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||||
|
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||||
|
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||||
|
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||||
|
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||||
|
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||||
|
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
|
||||||
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||||
|
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
|
||||||
|
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||||
|
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||||
|
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||||
|
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
|
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
|
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
||||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||||
|
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||||
|
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||||
|
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||||
|
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||||
|
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||||
|
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||||
|
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
|
||||||
|
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||||
|
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||||
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||||
|
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||||
|
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||||
|
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||||
|
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||||
|
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||||
|
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||||
|
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||||
|
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||||
|
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||||
|
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||||
|
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||||
|
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||||
|
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||||
|
google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=
|
||||||
|
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
|
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
|
||||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
|
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||||
gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno=
|
gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno=
|
||||||
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||||
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
|
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
|
||||||
|
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||||
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
|
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
|
||||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo=
|
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||||
|
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||||
|
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
||||||
|
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
|
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
|
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
|
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
|
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||||
|
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||||
|
|||||||
56
tools/metrics_exporter/main.go
Normal file
56
tools/metrics_exporter/main.go
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"net/http"
|
||||||
|
|
||||||
|
"github.com/hibiken/asynq"
|
||||||
|
"github.com/hibiken/asynq/x/metrics"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
"github.com/prometheus/client_golang/prometheus/collectors"
|
||||||
|
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Declare command-line flags.
|
||||||
|
// These variables are binded to flags in init().
|
||||||
|
var (
|
||||||
|
flagRedisAddr string
|
||||||
|
flagRedisDB int
|
||||||
|
flagRedisPassword string
|
||||||
|
flagRedisUsername string
|
||||||
|
flagPort int
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
flag.StringVar(&flagRedisAddr, "redis-addr", "127.0.0.1:6379", "host:port of redis server to connect to")
|
||||||
|
flag.IntVar(&flagRedisDB, "redis-db", 0, "redis DB number to use")
|
||||||
|
flag.StringVar(&flagRedisPassword, "redis-password", "", "password used to connect to redis server")
|
||||||
|
flag.StringVar(&flagRedisUsername, "redis-username", "", "username used to connect to redis server")
|
||||||
|
flag.IntVar(&flagPort, "port", 9876, "port to use for the HTTP server")
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
flag.Parse()
|
||||||
|
// Using NewPedanticRegistry here to test the implementation of Collectors and Metrics.
|
||||||
|
reg := prometheus.NewPedanticRegistry()
|
||||||
|
|
||||||
|
inspector := asynq.NewInspector(asynq.RedisClientOpt{
|
||||||
|
Addr: flagRedisAddr,
|
||||||
|
DB: flagRedisDB,
|
||||||
|
Password: flagRedisPassword,
|
||||||
|
Username: flagRedisUsername,
|
||||||
|
})
|
||||||
|
|
||||||
|
reg.MustRegister(
|
||||||
|
metrics.NewQueueMetricsCollector(inspector),
|
||||||
|
// Add the standard process and go metrics to the registry
|
||||||
|
collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}),
|
||||||
|
collectors.NewGoCollector(),
|
||||||
|
)
|
||||||
|
|
||||||
|
http.Handle("/metrics", promhttp.HandlerFor(reg, promhttp.HandlerOpts{}))
|
||||||
|
log.Printf("exporter server is listening on port: %d\n", flagPort)
|
||||||
|
log.Fatal(http.ListenAndServe(fmt.Sprintf(":%d", flagPort), nil))
|
||||||
|
}
|
||||||
10
x/go.mod
Normal file
10
x/go.mod
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
module github.com/hibiken/asynq/x
|
||||||
|
|
||||||
|
go 1.16
|
||||||
|
|
||||||
|
require (
|
||||||
|
github.com/go-redis/redis/v8 v8.11.4
|
||||||
|
github.com/google/uuid v1.3.0
|
||||||
|
github.com/hibiken/asynq v0.21.0
|
||||||
|
github.com/prometheus/client_golang v1.11.0
|
||||||
|
)
|
||||||
258
x/go.sum
Normal file
258
x/go.sum
Normal file
@@ -0,0 +1,258 @@
|
|||||||
|
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||||
|
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||||
|
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||||
|
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||||
|
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||||
|
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||||
|
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||||
|
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
|
||||||
|
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||||
|
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||||
|
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||||
|
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||||
|
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||||
|
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
|
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
|
||||||
|
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
|
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||||
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
|
||||||
|
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
|
||||||
|
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||||
|
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||||
|
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||||
|
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
|
||||||
|
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||||
|
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||||
|
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||||
|
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
|
||||||
|
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||||
|
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||||
|
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||||
|
github.com/go-redis/redis/v8 v8.11.2/go.mod h1:DLomh7y2e3ggQXQLd1YgmvIfecPJoFl7WU5SOQ/r06M=
|
||||||
|
github.com/go-redis/redis/v8 v8.11.4 h1:kHoYkfZP6+pe04aFTnhDH6GDROa5yJdHJVNxV3F46Tg=
|
||||||
|
github.com/go-redis/redis/v8 v8.11.4/go.mod h1:2Z2wHZXdQpCDXEGzqMockDpNyYvi2l4Pxt6RJr792+w=
|
||||||
|
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||||
|
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
|
||||||
|
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||||
|
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||||
|
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||||
|
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
|
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
|
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||||
|
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||||
|
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||||
|
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||||
|
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||||
|
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||||
|
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
|
||||||
|
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||||
|
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||||
|
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
|
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
|
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
|
||||||
|
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||||
|
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
|
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||||
|
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
|
github.com/hibiken/asynq v0.21.0 h1:uH9XogJhjq/S39E0/DEPWLZQ6hHJ73UiblZTe4RzHwA=
|
||||||
|
github.com/hibiken/asynq v0.21.0/go.mod h1:tyc63ojaW8SJ5SBm8mvI4DDONsguP5HE85EEl4Qr5Ig=
|
||||||
|
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||||
|
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
|
||||||
|
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||||
|
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||||
|
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||||
|
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||||
|
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
||||||
|
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
|
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
|
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||||
|
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||||
|
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||||
|
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||||
|
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||||
|
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||||
|
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
|
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||||
|
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||||
|
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||||
|
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||||
|
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||||
|
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
|
||||||
|
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
|
||||||
|
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||||
|
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||||
|
github.com/onsi/ginkgo v1.15.0/go.mod h1:hF8qUzuuC8DJGygJH3726JnCZX4MYbRB8yFfISqnKUg=
|
||||||
|
github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc=
|
||||||
|
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
|
||||||
|
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||||
|
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||||
|
github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48=
|
||||||
|
github.com/onsi/gomega v1.16.0 h1:6gjqkI8iiRHMvdccRJM8rVKjCWk6ZIm6FTm3ddIe4/c=
|
||||||
|
github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
|
||||||
|
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
|
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
|
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
|
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||||
|
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||||
|
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||||
|
github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ=
|
||||||
|
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||||
|
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||||
|
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
|
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
|
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
|
||||||
|
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
|
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||||
|
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||||
|
github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ=
|
||||||
|
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||||
|
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||||
|
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||||
|
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||||
|
github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4=
|
||||||
|
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||||
|
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
|
||||||
|
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
|
||||||
|
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||||
|
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||||
|
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||||
|
github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng=
|
||||||
|
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||||
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
|
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
|
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||||
|
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||||
|
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||||
|
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||||
|
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
|
||||||
|
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
|
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
|
go.uber.org/goleak v0.10.0 h1:G3eWbSNIskeRqtsN/1uI5B+eP73y3JUuBsv9AZjehb4=
|
||||||
|
go.uber.org/goleak v0.10.0/go.mod h1:VCZuO8V8mFPlL0F5J5GK1rtHV3DrFcQ1R8ryq7FK0aI=
|
||||||
|
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||||
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
|
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
|
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
|
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
|
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||||
|
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||||
|
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||||
|
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
|
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
|
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
|
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||||
|
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||||
|
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||||
|
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||||
|
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781 h1:DzZ89McO9/gWPsQXS/FVKAlG02ZjaQ6AlZRBimEYOd0=
|
||||||
|
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
|
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40 h1:JWgyZ1qgdTaF3N3oxC+MdTV7qvEEgHo3otj+HB5CM7Q=
|
||||||
|
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
|
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||||
|
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
|
golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
|
||||||
|
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
|
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
|
||||||
|
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||||
|
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||||
|
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||||
|
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
|
||||||
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||||
|
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
|
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||||
|
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||||
|
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||||
|
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||||
|
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||||
|
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||||
|
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||||
|
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||||
|
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||||
|
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||||
|
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||||
|
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||||
|
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||||
|
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||||
|
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||||
|
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||||
|
google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=
|
||||||
|
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||||
|
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||||
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
|
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
|
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||||
|
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||||
|
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||||
|
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||||
|
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||||
|
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
||||||
|
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
|
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
|
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
190
x/metrics/metrics.go
Normal file
190
x/metrics/metrics.go
Normal file
@@ -0,0 +1,190 @@
|
|||||||
|
// Package metrics provides implementations of prometheus.Collector to collect Asynq queue metrics.
|
||||||
|
package metrics
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
|
||||||
|
"github.com/hibiken/asynq"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Namespace used in fully-qualified metrics names.
|
||||||
|
const namespace = "asynq"
|
||||||
|
|
||||||
|
// QueueMetricsCollector gathers queue metrics.
|
||||||
|
// It implements prometheus.Collector interface.
|
||||||
|
//
|
||||||
|
// All metrics exported from this collector have prefix "asynq".
|
||||||
|
type QueueMetricsCollector struct {
|
||||||
|
inspector *asynq.Inspector
|
||||||
|
}
|
||||||
|
|
||||||
|
// collectQueueInfo gathers QueueInfo of all queues.
|
||||||
|
// Since this operation is expensive, it must be called once per collection.
|
||||||
|
func (qmc *QueueMetricsCollector) collectQueueInfo() ([]*asynq.QueueInfo, error) {
|
||||||
|
qnames, err := qmc.inspector.Queues()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to get queue names: %v", err)
|
||||||
|
}
|
||||||
|
infos := make([]*asynq.QueueInfo, len(qnames))
|
||||||
|
for i, qname := range qnames {
|
||||||
|
qinfo, err := qmc.inspector.GetQueueInfo(qname)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to get queue info: %v", err)
|
||||||
|
}
|
||||||
|
infos[i] = qinfo
|
||||||
|
}
|
||||||
|
return infos, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Descriptors used by QueueMetricsCollector
|
||||||
|
var (
|
||||||
|
tasksQueuedDesc = prometheus.NewDesc(
|
||||||
|
prometheus.BuildFQName(namespace, "", "tasks_enqueued_total"),
|
||||||
|
"Number of tasks enqueued; broken down by queue and state.",
|
||||||
|
[]string{"queue", "state"}, nil,
|
||||||
|
)
|
||||||
|
|
||||||
|
queueSizeDesc = prometheus.NewDesc(
|
||||||
|
prometheus.BuildFQName(namespace, "", "queue_size"),
|
||||||
|
"Number of tasks in a queue",
|
||||||
|
[]string{"queue"}, nil,
|
||||||
|
)
|
||||||
|
|
||||||
|
queueLatencyDesc = prometheus.NewDesc(
|
||||||
|
prometheus.BuildFQName(namespace, "", "queue_latency_seconds"),
|
||||||
|
"Number of seconds the oldest pending task is waiting in pending state to be processed.",
|
||||||
|
[]string{"queue"}, nil,
|
||||||
|
)
|
||||||
|
|
||||||
|
queueMemUsgDesc = prometheus.NewDesc(
|
||||||
|
prometheus.BuildFQName(namespace, "", "queue_memory_usage_approx_bytes"),
|
||||||
|
"Number of memory used by a given queue (approximated number by sampling).",
|
||||||
|
[]string{"queue"}, nil,
|
||||||
|
)
|
||||||
|
|
||||||
|
tasksProcessedTotalDesc = prometheus.NewDesc(
|
||||||
|
prometheus.BuildFQName(namespace, "", "tasks_processed_total"),
|
||||||
|
"Number of tasks processed (both succeeded and failed); broken down by queue",
|
||||||
|
[]string{"queue"}, nil,
|
||||||
|
)
|
||||||
|
|
||||||
|
tasksFailedTotalDesc = prometheus.NewDesc(
|
||||||
|
prometheus.BuildFQName(namespace, "", "tasks_failed_total"),
|
||||||
|
"Number of tasks failed; broken down by queue",
|
||||||
|
[]string{"queue"}, nil,
|
||||||
|
)
|
||||||
|
|
||||||
|
pausedQueues = prometheus.NewDesc(
|
||||||
|
prometheus.BuildFQName(namespace, "", "queue_paused_total"),
|
||||||
|
"Number of queues paused",
|
||||||
|
[]string{"queue"}, nil,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
func (qmc *QueueMetricsCollector) Describe(ch chan<- *prometheus.Desc) {
|
||||||
|
prometheus.DescribeByCollect(qmc, ch)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (qmc *QueueMetricsCollector) Collect(ch chan<- prometheus.Metric) {
|
||||||
|
queueInfos, err := qmc.collectQueueInfo()
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Failed to collect metrics data: %v", err)
|
||||||
|
}
|
||||||
|
for _, info := range queueInfos {
|
||||||
|
ch <- prometheus.MustNewConstMetric(
|
||||||
|
tasksQueuedDesc,
|
||||||
|
prometheus.GaugeValue,
|
||||||
|
float64(info.Active),
|
||||||
|
info.Queue,
|
||||||
|
"active",
|
||||||
|
)
|
||||||
|
ch <- prometheus.MustNewConstMetric(
|
||||||
|
tasksQueuedDesc,
|
||||||
|
prometheus.GaugeValue,
|
||||||
|
float64(info.Pending),
|
||||||
|
info.Queue,
|
||||||
|
"pending",
|
||||||
|
)
|
||||||
|
ch <- prometheus.MustNewConstMetric(
|
||||||
|
tasksQueuedDesc,
|
||||||
|
prometheus.GaugeValue,
|
||||||
|
float64(info.Scheduled),
|
||||||
|
info.Queue,
|
||||||
|
"scheduled",
|
||||||
|
)
|
||||||
|
ch <- prometheus.MustNewConstMetric(
|
||||||
|
tasksQueuedDesc,
|
||||||
|
prometheus.GaugeValue,
|
||||||
|
float64(info.Retry),
|
||||||
|
info.Queue,
|
||||||
|
"retry",
|
||||||
|
)
|
||||||
|
ch <- prometheus.MustNewConstMetric(
|
||||||
|
tasksQueuedDesc,
|
||||||
|
prometheus.GaugeValue,
|
||||||
|
float64(info.Archived),
|
||||||
|
info.Queue,
|
||||||
|
"archived",
|
||||||
|
)
|
||||||
|
ch <- prometheus.MustNewConstMetric(
|
||||||
|
tasksQueuedDesc,
|
||||||
|
prometheus.GaugeValue,
|
||||||
|
float64(info.Completed),
|
||||||
|
info.Queue,
|
||||||
|
"completed",
|
||||||
|
)
|
||||||
|
|
||||||
|
ch <- prometheus.MustNewConstMetric(
|
||||||
|
queueSizeDesc,
|
||||||
|
prometheus.GaugeValue,
|
||||||
|
float64(info.Size),
|
||||||
|
info.Queue,
|
||||||
|
)
|
||||||
|
|
||||||
|
ch <- prometheus.MustNewConstMetric(
|
||||||
|
queueLatencyDesc,
|
||||||
|
prometheus.GaugeValue,
|
||||||
|
info.Latency.Seconds(),
|
||||||
|
info.Queue,
|
||||||
|
)
|
||||||
|
|
||||||
|
ch <- prometheus.MustNewConstMetric(
|
||||||
|
queueMemUsgDesc,
|
||||||
|
prometheus.GaugeValue,
|
||||||
|
float64(info.MemoryUsage),
|
||||||
|
info.Queue,
|
||||||
|
)
|
||||||
|
|
||||||
|
ch <- prometheus.MustNewConstMetric(
|
||||||
|
tasksProcessedTotalDesc,
|
||||||
|
prometheus.CounterValue,
|
||||||
|
float64(info.ProcessedTotal),
|
||||||
|
info.Queue,
|
||||||
|
)
|
||||||
|
|
||||||
|
ch <- prometheus.MustNewConstMetric(
|
||||||
|
tasksFailedTotalDesc,
|
||||||
|
prometheus.CounterValue,
|
||||||
|
float64(info.FailedTotal),
|
||||||
|
info.Queue,
|
||||||
|
)
|
||||||
|
|
||||||
|
pausedValue := 0 // zero to indicate "not paused"
|
||||||
|
if info.Paused {
|
||||||
|
pausedValue = 1
|
||||||
|
}
|
||||||
|
ch <- prometheus.MustNewConstMetric(
|
||||||
|
pausedQueues,
|
||||||
|
prometheus.GaugeValue,
|
||||||
|
float64(pausedValue),
|
||||||
|
info.Queue,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewQueueMetricsCollector returns a collector that exports metrics about Asynq queues.
|
||||||
|
func NewQueueMetricsCollector(inspector *asynq.Inspector) *QueueMetricsCollector {
|
||||||
|
return &QueueMetricsCollector{inspector: inspector}
|
||||||
|
}
|
||||||
40
x/rate/example_test.go
Normal file
40
x/rate/example_test.go
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
package rate_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/hibiken/asynq"
|
||||||
|
"github.com/hibiken/asynq/x/rate"
|
||||||
|
)
|
||||||
|
|
||||||
|
type RateLimitError struct {
|
||||||
|
RetryIn time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *RateLimitError) Error() string {
|
||||||
|
return fmt.Sprintf("rate limited (retry in %v)", e.RetryIn)
|
||||||
|
}
|
||||||
|
|
||||||
|
func ExampleNewSemaphore() {
|
||||||
|
redisConnOpt := asynq.RedisClientOpt{Addr: ":6379"}
|
||||||
|
sema := rate.NewSemaphore(redisConnOpt, "my_queue", 10)
|
||||||
|
// call sema.Close() when appropriate
|
||||||
|
|
||||||
|
_ = asynq.HandlerFunc(func(ctx context.Context, task *asynq.Task) error {
|
||||||
|
ok, err := sema.Acquire(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !ok {
|
||||||
|
return &RateLimitError{RetryIn: 30 * time.Second}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make sure to release the token once we're done.
|
||||||
|
defer sema.Release(ctx)
|
||||||
|
|
||||||
|
// Process task
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
114
x/rate/semaphore.go
Normal file
114
x/rate/semaphore.go
Normal file
@@ -0,0 +1,114 @@
|
|||||||
|
// Package rate contains rate limiting strategies for asynq.Handler(s).
|
||||||
|
package rate
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/go-redis/redis/v8"
|
||||||
|
"github.com/hibiken/asynq"
|
||||||
|
asynqcontext "github.com/hibiken/asynq/internal/context"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewSemaphore creates a counting Semaphore for the given scope with the given number of tokens.
|
||||||
|
func NewSemaphore(rco asynq.RedisConnOpt, scope string, maxTokens int) *Semaphore {
|
||||||
|
rc, ok := rco.MakeRedisClient().(redis.UniversalClient)
|
||||||
|
if !ok {
|
||||||
|
panic(fmt.Sprintf("rate.NewSemaphore: unsupported RedisConnOpt type %T", rco))
|
||||||
|
}
|
||||||
|
|
||||||
|
if maxTokens < 1 {
|
||||||
|
panic("rate.NewSemaphore: maxTokens cannot be less than 1")
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(strings.TrimSpace(scope)) == 0 {
|
||||||
|
panic("rate.NewSemaphore: scope should not be empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Semaphore{
|
||||||
|
rc: rc,
|
||||||
|
scope: scope,
|
||||||
|
maxTokens: maxTokens,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Semaphore is a distributed counting semaphore which can be used to set maxTokens across multiple asynq servers.
|
||||||
|
type Semaphore struct {
|
||||||
|
rc redis.UniversalClient
|
||||||
|
maxTokens int
|
||||||
|
scope string
|
||||||
|
}
|
||||||
|
|
||||||
|
// KEYS[1] -> asynq:sema:<scope>
|
||||||
|
// ARGV[1] -> max concurrency
|
||||||
|
// ARGV[2] -> current time in unix time
|
||||||
|
// ARGV[3] -> deadline in unix time
|
||||||
|
// ARGV[4] -> task ID
|
||||||
|
var acquireCmd = redis.NewScript(`
|
||||||
|
redis.call("ZREMRANGEBYSCORE", KEYS[1], "-inf", tonumber(ARGV[2])-1)
|
||||||
|
local count = redis.call("ZCARD", KEYS[1])
|
||||||
|
|
||||||
|
if (count < tonumber(ARGV[1])) then
|
||||||
|
redis.call("ZADD", KEYS[1], ARGV[3], ARGV[4])
|
||||||
|
return 'true'
|
||||||
|
else
|
||||||
|
return 'false'
|
||||||
|
end
|
||||||
|
`)
|
||||||
|
|
||||||
|
// Acquire attempts to acquire a token from the semaphore.
|
||||||
|
// - Returns (true, nil), iff semaphore key exists and current value is less than maxTokens
|
||||||
|
// - Returns (false, nil) when token cannot be acquired
|
||||||
|
// - Returns (false, error) otherwise
|
||||||
|
//
|
||||||
|
// The context.Context passed to Acquire must have a deadline set,
|
||||||
|
// this ensures that token is released if the job goroutine crashes and does not call Release.
|
||||||
|
func (s *Semaphore) Acquire(ctx context.Context) (bool, error) {
|
||||||
|
d, ok := ctx.Deadline()
|
||||||
|
if !ok {
|
||||||
|
return false, fmt.Errorf("provided context must have a deadline")
|
||||||
|
}
|
||||||
|
|
||||||
|
taskID, ok := asynqcontext.GetTaskID(ctx)
|
||||||
|
if !ok {
|
||||||
|
return false, fmt.Errorf("provided context is missing task ID value")
|
||||||
|
}
|
||||||
|
|
||||||
|
return acquireCmd.Run(ctx, s.rc,
|
||||||
|
[]string{semaphoreKey(s.scope)},
|
||||||
|
s.maxTokens,
|
||||||
|
time.Now().Unix(),
|
||||||
|
d.Unix(),
|
||||||
|
taskID,
|
||||||
|
).Bool()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Release will release the token on the counting semaphore.
|
||||||
|
func (s *Semaphore) Release(ctx context.Context) error {
|
||||||
|
taskID, ok := asynqcontext.GetTaskID(ctx)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("provided context is missing task ID value")
|
||||||
|
}
|
||||||
|
|
||||||
|
n, err := s.rc.ZRem(ctx, semaphoreKey(s.scope), taskID).Result()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("redis command failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if n == 0 {
|
||||||
|
return fmt.Errorf("no token found for task %q", taskID)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes the connection to redis.
|
||||||
|
func (s *Semaphore) Close() error {
|
||||||
|
return s.rc.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func semaphoreKey(scope string) string {
|
||||||
|
return fmt.Sprintf("asynq:sema:%s", scope)
|
||||||
|
}
|
||||||
408
x/rate/semaphore_test.go
Normal file
408
x/rate/semaphore_test.go
Normal file
@@ -0,0 +1,408 @@
|
|||||||
|
package rate
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/go-redis/redis/v8"
|
||||||
|
"github.com/google/uuid"
|
||||||
|
"github.com/hibiken/asynq"
|
||||||
|
"github.com/hibiken/asynq/internal/base"
|
||||||
|
asynqcontext "github.com/hibiken/asynq/internal/context"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
redisAddr string
|
||||||
|
redisDB int
|
||||||
|
|
||||||
|
useRedisCluster bool
|
||||||
|
redisClusterAddrs string // comma-separated list of host:port
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
flag.StringVar(&redisAddr, "redis_addr", "localhost:6379", "redis address to use in testing")
|
||||||
|
flag.IntVar(&redisDB, "redis_db", 14, "redis db number to use in testing")
|
||||||
|
flag.BoolVar(&useRedisCluster, "redis_cluster", false, "use redis cluster as a broker in testing")
|
||||||
|
flag.StringVar(&redisClusterAddrs, "redis_cluster_addrs", "localhost:7000,localhost:7001,localhost:7002", "comma separated list of redis server addresses")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewSemaphore(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
desc string
|
||||||
|
name string
|
||||||
|
maxConcurrency int
|
||||||
|
wantPanic string
|
||||||
|
connOpt asynq.RedisConnOpt
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
desc: "Bad RedisConnOpt",
|
||||||
|
wantPanic: "rate.NewSemaphore: unsupported RedisConnOpt type *rate.badConnOpt",
|
||||||
|
connOpt: &badConnOpt{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "Zero maxTokens should panic",
|
||||||
|
wantPanic: "rate.NewSemaphore: maxTokens cannot be less than 1",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "Empty scope should panic",
|
||||||
|
maxConcurrency: 2,
|
||||||
|
name: " ",
|
||||||
|
wantPanic: "rate.NewSemaphore: scope should not be empty",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.desc, func(t *testing.T) {
|
||||||
|
if tt.wantPanic != "" {
|
||||||
|
defer func() {
|
||||||
|
if r := recover(); r.(string) != tt.wantPanic {
|
||||||
|
t.Errorf("%s;\nNewSemaphore should panic with msg: %s, got %s", tt.desc, tt.wantPanic, r.(string))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
opt := tt.connOpt
|
||||||
|
if tt.connOpt == nil {
|
||||||
|
opt = getRedisConnOpt(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
sema := NewSemaphore(opt, tt.name, tt.maxConcurrency)
|
||||||
|
defer sema.Close()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewSemaphore_Acquire(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
desc string
|
||||||
|
name string
|
||||||
|
maxConcurrency int
|
||||||
|
taskIDs []string
|
||||||
|
ctxFunc func(string) (context.Context, context.CancelFunc)
|
||||||
|
want []bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
desc: "Should acquire token when current token count is less than maxTokens",
|
||||||
|
name: "task-1",
|
||||||
|
maxConcurrency: 3,
|
||||||
|
taskIDs: []string{uuid.NewString(), uuid.NewString()},
|
||||||
|
ctxFunc: func(id string) (context.Context, context.CancelFunc) {
|
||||||
|
return asynqcontext.New(&base.TaskMessage{
|
||||||
|
ID: id,
|
||||||
|
Queue: "task-1",
|
||||||
|
}, time.Now().Add(time.Second))
|
||||||
|
},
|
||||||
|
want: []bool{true, true},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "Should fail acquiring token when current token count is equal to maxTokens",
|
||||||
|
name: "task-2",
|
||||||
|
maxConcurrency: 3,
|
||||||
|
taskIDs: []string{uuid.NewString(), uuid.NewString(), uuid.NewString(), uuid.NewString()},
|
||||||
|
ctxFunc: func(id string) (context.Context, context.CancelFunc) {
|
||||||
|
return asynqcontext.New(&base.TaskMessage{
|
||||||
|
ID: id,
|
||||||
|
Queue: "task-2",
|
||||||
|
}, time.Now().Add(time.Second))
|
||||||
|
},
|
||||||
|
want: []bool{true, true, true, false},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.desc, func(t *testing.T) {
|
||||||
|
opt := getRedisConnOpt(t)
|
||||||
|
rc := opt.MakeRedisClient().(redis.UniversalClient)
|
||||||
|
defer rc.Close()
|
||||||
|
|
||||||
|
if err := rc.Del(context.Background(), semaphoreKey(tt.name)).Err(); err != nil {
|
||||||
|
t.Errorf("%s;\nredis.UniversalClient.Del() got error %v", tt.desc, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
sema := NewSemaphore(opt, tt.name, tt.maxConcurrency)
|
||||||
|
defer sema.Close()
|
||||||
|
|
||||||
|
for i := 0; i < len(tt.taskIDs); i++ {
|
||||||
|
ctx, cancel := tt.ctxFunc(tt.taskIDs[i])
|
||||||
|
|
||||||
|
got, err := sema.Acquire(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("%s;\nSemaphore.Acquire() got error %v", tt.desc, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if got != tt.want[i] {
|
||||||
|
t.Errorf("%s;\nSemaphore.Acquire(ctx) returned %v, want %v", tt.desc, got, tt.want[i])
|
||||||
|
}
|
||||||
|
|
||||||
|
cancel()
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewSemaphore_Acquire_Error(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
desc string
|
||||||
|
name string
|
||||||
|
maxConcurrency int
|
||||||
|
taskIDs []string
|
||||||
|
ctxFunc func(string) (context.Context, context.CancelFunc)
|
||||||
|
errStr string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
desc: "Should return error if context has no deadline",
|
||||||
|
name: "task-3",
|
||||||
|
maxConcurrency: 1,
|
||||||
|
taskIDs: []string{uuid.NewString(), uuid.NewString()},
|
||||||
|
ctxFunc: func(id string) (context.Context, context.CancelFunc) {
|
||||||
|
return context.Background(), func() {}
|
||||||
|
},
|
||||||
|
errStr: "provided context must have a deadline",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "Should return error when context is missing taskID",
|
||||||
|
name: "task-4",
|
||||||
|
maxConcurrency: 1,
|
||||||
|
taskIDs: []string{uuid.NewString()},
|
||||||
|
ctxFunc: func(_ string) (context.Context, context.CancelFunc) {
|
||||||
|
return context.WithTimeout(context.Background(), time.Second)
|
||||||
|
},
|
||||||
|
errStr: "provided context is missing task ID value",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.desc, func(t *testing.T) {
|
||||||
|
opt := getRedisConnOpt(t)
|
||||||
|
rc := opt.MakeRedisClient().(redis.UniversalClient)
|
||||||
|
defer rc.Close()
|
||||||
|
|
||||||
|
if err := rc.Del(context.Background(), semaphoreKey(tt.name)).Err(); err != nil {
|
||||||
|
t.Errorf("%s;\nredis.UniversalClient.Del() got error %v", tt.desc, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
sema := NewSemaphore(opt, tt.name, tt.maxConcurrency)
|
||||||
|
defer sema.Close()
|
||||||
|
|
||||||
|
for i := 0; i < len(tt.taskIDs); i++ {
|
||||||
|
ctx, cancel := tt.ctxFunc(tt.taskIDs[i])
|
||||||
|
|
||||||
|
_, err := sema.Acquire(ctx)
|
||||||
|
if err == nil || err.Error() != tt.errStr {
|
||||||
|
t.Errorf("%s;\nSemaphore.Acquire() got error %v want error %v", tt.desc, err, tt.errStr)
|
||||||
|
}
|
||||||
|
|
||||||
|
cancel()
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewSemaphore_Acquire_StaleToken(t *testing.T) {
|
||||||
|
opt := getRedisConnOpt(t)
|
||||||
|
rc := opt.MakeRedisClient().(redis.UniversalClient)
|
||||||
|
defer rc.Close()
|
||||||
|
|
||||||
|
taskID := uuid.NewString()
|
||||||
|
|
||||||
|
// adding a set member to mimic the case where token is acquired but the goroutine crashed,
|
||||||
|
// in which case, the token will not be explicitly removed and should be present already
|
||||||
|
rc.ZAdd(context.Background(), semaphoreKey("stale-token"), &redis.Z{
|
||||||
|
Score: float64(time.Now().Add(-10 * time.Second).Unix()),
|
||||||
|
Member: taskID,
|
||||||
|
})
|
||||||
|
|
||||||
|
sema := NewSemaphore(opt, "stale-token", 1)
|
||||||
|
defer sema.Close()
|
||||||
|
|
||||||
|
ctx, cancel := asynqcontext.New(&base.TaskMessage{
|
||||||
|
ID: taskID,
|
||||||
|
Queue: "task-1",
|
||||||
|
}, time.Now().Add(time.Second))
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
got, err := sema.Acquire(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Acquire_StaleToken;\nSemaphore.Acquire() got error %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !got {
|
||||||
|
t.Error("Acquire_StaleToken;\nSemaphore.Acquire() got false want true")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewSemaphore_Release(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
desc string
|
||||||
|
name string
|
||||||
|
taskIDs []string
|
||||||
|
ctxFunc func(string) (context.Context, context.CancelFunc)
|
||||||
|
wantCount int64
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
desc: "Should decrease token count",
|
||||||
|
name: "task-5",
|
||||||
|
taskIDs: []string{uuid.NewString()},
|
||||||
|
ctxFunc: func(id string) (context.Context, context.CancelFunc) {
|
||||||
|
return asynqcontext.New(&base.TaskMessage{
|
||||||
|
ID: id,
|
||||||
|
Queue: "task-3",
|
||||||
|
}, time.Now().Add(time.Second))
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "Should decrease token count by 2",
|
||||||
|
name: "task-6",
|
||||||
|
taskIDs: []string{uuid.NewString(), uuid.NewString()},
|
||||||
|
ctxFunc: func(id string) (context.Context, context.CancelFunc) {
|
||||||
|
return asynqcontext.New(&base.TaskMessage{
|
||||||
|
ID: id,
|
||||||
|
Queue: "task-4",
|
||||||
|
}, time.Now().Add(time.Second))
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.desc, func(t *testing.T) {
|
||||||
|
opt := getRedisConnOpt(t)
|
||||||
|
rc := opt.MakeRedisClient().(redis.UniversalClient)
|
||||||
|
defer rc.Close()
|
||||||
|
|
||||||
|
if err := rc.Del(context.Background(), semaphoreKey(tt.name)).Err(); err != nil {
|
||||||
|
t.Errorf("%s;\nredis.UniversalClient.Del() got error %v", tt.desc, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var members []*redis.Z
|
||||||
|
for i := 0; i < len(tt.taskIDs); i++ {
|
||||||
|
members = append(members, &redis.Z{
|
||||||
|
Score: float64(time.Now().Add(time.Duration(i) * time.Second).Unix()),
|
||||||
|
Member: tt.taskIDs[i],
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if err := rc.ZAdd(context.Background(), semaphoreKey(tt.name), members...).Err(); err != nil {
|
||||||
|
t.Errorf("%s;\nredis.UniversalClient.ZAdd() got error %v", tt.desc, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
sema := NewSemaphore(opt, tt.name, 3)
|
||||||
|
defer sema.Close()
|
||||||
|
|
||||||
|
for i := 0; i < len(tt.taskIDs); i++ {
|
||||||
|
ctx, cancel := tt.ctxFunc(tt.taskIDs[i])
|
||||||
|
|
||||||
|
if err := sema.Release(ctx); err != nil {
|
||||||
|
t.Errorf("%s;\nSemaphore.Release() got error %v", tt.desc, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
cancel()
|
||||||
|
}
|
||||||
|
|
||||||
|
i, err := rc.ZCount(context.Background(), semaphoreKey(tt.name), "-inf", "+inf").Result()
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("%s;\nredis.UniversalClient.ZCount() got error %v", tt.desc, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if i != tt.wantCount {
|
||||||
|
t.Errorf("%s;\nSemaphore.Release(ctx) didn't release token, got %v want 0", tt.desc, i)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewSemaphore_Release_Error(t *testing.T) {
|
||||||
|
testID := uuid.NewString()
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
desc string
|
||||||
|
name string
|
||||||
|
taskIDs []string
|
||||||
|
ctxFunc func(string) (context.Context, context.CancelFunc)
|
||||||
|
errStr string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
desc: "Should return error when context is missing taskID",
|
||||||
|
name: "task-7",
|
||||||
|
taskIDs: []string{uuid.NewString()},
|
||||||
|
ctxFunc: func(_ string) (context.Context, context.CancelFunc) {
|
||||||
|
return context.WithTimeout(context.Background(), time.Second)
|
||||||
|
},
|
||||||
|
errStr: "provided context is missing task ID value",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "Should return error when context has taskID which never acquired token",
|
||||||
|
name: "task-8",
|
||||||
|
taskIDs: []string{uuid.NewString()},
|
||||||
|
ctxFunc: func(_ string) (context.Context, context.CancelFunc) {
|
||||||
|
return asynqcontext.New(&base.TaskMessage{
|
||||||
|
ID: testID,
|
||||||
|
Queue: "task-4",
|
||||||
|
}, time.Now().Add(time.Second))
|
||||||
|
},
|
||||||
|
errStr: fmt.Sprintf("no token found for task %q", testID),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.desc, func(t *testing.T) {
|
||||||
|
opt := getRedisConnOpt(t)
|
||||||
|
rc := opt.MakeRedisClient().(redis.UniversalClient)
|
||||||
|
defer rc.Close()
|
||||||
|
|
||||||
|
if err := rc.Del(context.Background(), semaphoreKey(tt.name)).Err(); err != nil {
|
||||||
|
t.Errorf("%s;\nredis.UniversalClient.Del() got error %v", tt.desc, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var members []*redis.Z
|
||||||
|
for i := 0; i < len(tt.taskIDs); i++ {
|
||||||
|
members = append(members, &redis.Z{
|
||||||
|
Score: float64(time.Now().Add(time.Duration(i) * time.Second).Unix()),
|
||||||
|
Member: tt.taskIDs[i],
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if err := rc.ZAdd(context.Background(), semaphoreKey(tt.name), members...).Err(); err != nil {
|
||||||
|
t.Errorf("%s;\nredis.UniversalClient.ZAdd() got error %v", tt.desc, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
sema := NewSemaphore(opt, tt.name, 3)
|
||||||
|
defer sema.Close()
|
||||||
|
|
||||||
|
for i := 0; i < len(tt.taskIDs); i++ {
|
||||||
|
ctx, cancel := tt.ctxFunc(tt.taskIDs[i])
|
||||||
|
|
||||||
|
if err := sema.Release(ctx); err == nil || err.Error() != tt.errStr {
|
||||||
|
t.Errorf("%s;\nSemaphore.Release() got error %v want error %v", tt.desc, err, tt.errStr)
|
||||||
|
}
|
||||||
|
|
||||||
|
cancel()
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getRedisConnOpt(tb testing.TB) asynq.RedisConnOpt {
|
||||||
|
tb.Helper()
|
||||||
|
if useRedisCluster {
|
||||||
|
addrs := strings.Split(redisClusterAddrs, ",")
|
||||||
|
if len(addrs) == 0 {
|
||||||
|
tb.Fatal("No redis cluster addresses provided. Please set addresses using --redis_cluster_addrs flag.")
|
||||||
|
}
|
||||||
|
return asynq.RedisClusterClientOpt{
|
||||||
|
Addrs: addrs,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return asynq.RedisClientOpt{
|
||||||
|
Addr: redisAddr,
|
||||||
|
DB: redisDB,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type badConnOpt struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b badConnOpt) MakeRedisClient() interface{} {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
Reference in New Issue
Block a user