Compare commits

...

173 Commits

Author SHA1 Message Date
Ken Hibino
c04fd41653 v0.22.1 2022-02-20 06:22:55 -08:00
Ken Hibino
7e5efb0e30 Drop GT option from RDB.ExtendLease
GT option in ZAdd is supported for redis v6.2.0 or above.
This Change fixes redis version compatibility (currently v4.0+)
2022-02-20 06:20:38 -08:00
Ken Hibino
cabf8d3627 Fix changelog 2022-02-19 06:21:56 -08:00
Ken Hibino
a19909f5f4 v0.22.0 2022-02-19 06:20:05 -08:00
Ken Hibino
cea5110d15 Add IsOrphaned field to TaskInfo 2022-02-19 06:15:44 -08:00
Ken Hibino
9b63e23274 Update log messages 2022-02-19 06:15:44 -08:00
Ken Hibino
de25201d9f Make timeutil.SimulatedClock concurrency safe 2022-02-19 06:15:44 -08:00
Ken Hibino
ec560afb01 Fix processor test 2022-02-19 06:15:44 -08:00
Ken Hibino
d4006894ad Remove base.DeadlinesKey 2022-02-19 06:15:44 -08:00
Ken Hibino
59927509d8 Remove timeout and deadline fields under task key 2022-02-19 06:15:44 -08:00
Ken Hibino
8211167de2 Update processor to create a lease and watch for expiration 2022-02-19 06:15:44 -08:00
Ken Hibino
d7169cd445 Update heartbeat to extend lease of active workers 2022-02-19 06:15:44 -08:00
Ken Hibino
dfae8638e1 Update RDB methods to work with lease 2022-02-19 06:15:44 -08:00
Ken Hibino
b9943de2ab Add Lease type to base package 2022-02-19 06:15:44 -08:00
Ken Hibino
871474f220 Update heartbeat goroutine to call ExtendLease on active tasks 2022-02-19 06:15:44 -08:00
Ken Hibino
87dc392c7f Add RDB.ExtendLease method 2022-02-19 06:15:44 -08:00
Ken Hibino
dabcb120d5 Update recoverer to use ListLeaseExpired 2022-02-19 06:15:44 -08:00
Ken Hibino
bc2f1986d7 Update ListDeadlineExceeded to ListLeaseExpired 2022-02-19 06:15:44 -08:00
Ken Hibino
b8cb579407 Update RDB methods to use lease instead of deadlines set 2022-02-19 06:15:44 -08:00
Ken Hibino
bca624792c Move task deadline compute logic to processor 2022-02-19 06:15:44 -08:00
Ken Hibino
d865d89900 Update RDB.Dequeue to insert task ID to lease set 2022-02-19 06:15:44 -08:00
Ken Hibino
852af7abd1 Add base.LeaseKey helper function 2022-02-19 06:15:44 -08:00
Ken Hibino
5490d2c625 Fix tests 2022-02-16 07:08:01 -08:00
Binaek Sarkar
ebd7a32c0f conventions 2022-02-16 06:43:08 -08:00
Binaek Sarkar
55d0610a03 test and changelog 2022-02-16 06:43:08 -08:00
Binaek Sarkar
ab8a4f5b1e review corrections 2022-02-16 06:43:08 -08:00
Binaek Sarkar
d7ceb0c090 first cut 2022-02-16 06:43:08 -08:00
Ken Hibino
8bd70c6f84 (ci): Run go (build|test) commands for each module 2022-02-01 07:00:00 -08:00
Ken Hibino
10ab4e3745 Remove replace directives in go.mod 2022-02-01 06:18:41 -08:00
Ken Hibino
349f4c50fb Add example for ResultWriter 2022-01-31 09:08:41 -08:00
Ken Hibino
dff2e3a336 v0.21.0 2022-01-22 06:15:29 -08:00
Ken Hibino
65040af7b5 Update changelog 2022-01-22 06:14:24 -08:00
Ken Hibino
053fe2d1ee Create PeriodicTaskManager 2022-01-22 05:59:33 -08:00
Ken Hibino
25832e5e95 Fix bug related to concurrently executing server state changes 2022-01-12 09:10:56 -08:00
Ken Hibino
aa26f3819e Fix flaky tests 2022-01-05 09:07:42 -08:00
Ken Hibino
d94614bb9b Add CODE_OF_CONDUCT.md 2022-01-04 06:17:48 -08:00
Mahdi Dibaiee
ce46b07652 Allow configuration of DelayedTaskCheckInterval 2022-01-03 14:44:00 -08:00
Mahdi Dibaiee
2d0170541c Add --json flag for asynq stats command 2022-01-02 07:24:29 -08:00
Andreas Thomas
c1f08106da fix: missing import statement in example code 2021-12-27 05:40:10 -08:00
Ken Hibino
74cf804197 Update readme 2021-12-20 05:51:51 -08:00
Ken Hibino
8dfabfccb3 Fix build 2021-12-19 07:06:37 -08:00
Ken Hibino
5f20edcbd1 v0.20.0 2021-12-19 07:00:21 -08:00
Ken Hibino
1ddb2f7bce Use math.MaxInt64 instead of custom const 2021-12-19 06:58:12 -08:00
Ken Hibino
82d18e3d91 Record total tasks processed/failed 2021-12-16 16:53:02 -08:00
Ken Hibino
43cb4ddf19 Add queue metrics exporter
Changes:
- Added `x/metrics` package
- Added `tools/metrics_exporter` binary
2021-12-16 06:01:01 -08:00
Francisco Miamoto
ddfc6747a1 Fix typo in Server doc 2021-12-13 16:23:30 -08:00
Ken Hibino
970cb7a606 v0.19.1 2021-12-12 06:16:13 -08:00
Ken Hibino
157e97e72e Update changelog 2021-12-11 10:29:43 -08:00
Ken Hibino
22e6c9d297 Delete "pending_since" under task-key when state changes to active 2021-12-11 10:29:43 -08:00
Ken Hibino
99a6750656 Add Latency field to QueueInfo 2021-12-11 10:29:43 -08:00
Ken Hibino
e7c1c3ad6f Use clock in RDB 2021-12-11 10:29:43 -08:00
Ken Hibino
c9183374c5 Add internal timeutil package 2021-12-11 10:29:43 -08:00
Ken Hibino
6e7106c8f2 Record time when task moved to pending state 2021-12-11 10:29:43 -08:00
Ken Hibino
9f2c321e98 Add EnqueueContext method to Client 2021-11-15 16:34:26 -08:00
Ken Hibino
e2b61c9056 Return error if Unique TTL is less than 1s 2021-11-09 16:37:02 -08:00
Ken Hibino
531d1ef089 Fix godoc around errors returned from Inspector 2021-11-09 15:45:20 -08:00
Ken Hibino
413afc2ab6 v0.19.0 2021-11-06 15:20:09 -07:00
Ken Hibino
6bb4818509 Update readme 2021-11-06 15:18:42 -07:00
Ken Hibino
f4ddac4dcc Introduce Task Results
* Added Retention Option to specify retention TTL for tasks
* Added ResultWriter as a client interface to write result data for the associated task
2021-11-06 15:18:42 -07:00
Ken Hibino
4638405cbd Fix flaky test 2021-11-06 15:18:42 -07:00
Ken Hibino
9e2f88c00d Add TaskID option to allow user to specify task id 2021-11-06 15:18:42 -07:00
Ken Hibino
dbdd9c6d5f Update RDB Enqueue and Schedule methods to check for task ID conflict 2021-11-06 15:18:42 -07:00
Ken Hibino
2261c7c9a0 Change TaskMessage.ID type from uuid.UUID to string 2021-11-06 15:18:42 -07:00
Ken Hibino
83cae4bb24 Update NewTask function to take Option as varargs 2021-11-06 15:18:42 -07:00
Ajat Prabha
23c522dc9f Add asynq/x/rate package
- Added a directory /x for external, experimental packeges
- Added a `rate` package to enable rate limiting across multiple asynq worker servers
2021-11-03 15:55:23 -07:00
Ken Hibino
0d2c0f612b Add FUNDING.yml 2021-10-03 09:25:35 -07:00
Ken Hibino
d612a8a9e4 v0.18.6 2021-10-03 05:55:49 -07:00
Jason White
b3ef9e91a9 Upgrade go-redis/redis to version 8 2021-09-02 05:56:02 -07:00
Ken Hibino
05534c6f24 v0.18.5 2021-09-01 06:02:49 -07:00
Ken Hibino
f0db219f6a Add IsFailure to Config
With this IsFailure config, users can provide a predicate function to 
determine whether the error returned from Handler counts as a failure.
2021-09-01 06:00:54 -07:00
Mehran Poursadeghi
3ae0e7f528 Fix readme 2021-08-27 14:47:03 -07:00
Ken Hibino
421dc584ff v0.18.4 2021-08-17 17:12:33 -07:00
Ken Hibino
cfd1a1dfe8 Make scheduler methods thread-safe 2021-08-17 17:10:53 -07:00
Ken Hibino
c197902dc0 v0.18.3 2021-08-09 08:59:35 -07:00
Ken Hibino
e6355bf3f5 Use approximate memory usage for QueueInfo 2021-08-09 08:58:44 -07:00
Luqqk
95c90a5cb8 Add changelog entry, add additional test case 2021-08-02 20:20:09 -07:00
Luqqk
6817af366a Adjust error message, use TrimSpace for more robust empty typename check 2021-08-02 20:20:09 -07:00
Luqqk
4bce28d677 client.Enqueue - prevent empty task's typename 2021-08-02 20:20:09 -07:00
Pedro Henrique
73f930313c Fixes links 2021-07-29 17:15:27 -07:00
Ken Hibino
bff2a05d59 Fix examples in readme 2021-07-18 09:28:43 -07:00
Ken Hibino
684a7e0c98 v0.18.2 2021-07-15 06:56:53 -07:00
Ken Hibino
46b23d6495 Allow upper case characters in queue name 2021-07-15 06:55:47 -07:00
Ken Hibino
c0ae62499f v0.18.1 2021-07-04 06:39:54 -07:00
Ken Hibino
7744ade362 Update changelog 2021-07-04 06:38:36 -07:00
Ken Hibino
f532c95394 Update recoverer to recover tasks on server startup 2021-07-04 06:38:36 -07:00
Ken Hibino
ff6768f9bb Fix recoverer to run task recovering logic every minute 2021-07-04 06:38:36 -07:00
Ken Hibino
d5e9f3b1bd Update readme 2021-06-30 06:26:14 -07:00
Ken Hibino
d02b722d8a v0.18.0 2021-06-29 16:36:52 -07:00
Ken Hibino
99c7ebeef2 Add migration command in CLI 2021-06-29 16:34:21 -07:00
Ken Hibino
bf54621196 Update example code in README 2021-06-29 16:34:21 -07:00
Ken Hibino
27baf6de0d Fix error in readme 2021-06-29 16:34:21 -07:00
Ken Hibino
1bd0bee1e5 Fix CLI build 2021-06-29 16:34:21 -07:00
Ken Hibino
a9feec5967 Change TaskInfo to use public fields instead of methods 2021-06-29 16:34:21 -07:00
Ken Hibino
e01c6379c8 Fix lua script for redis-cluster mode 2021-06-29 16:34:21 -07:00
Ken Hibino
a0df047f71 Use md5 to generate checksum for unique key 2021-06-29 16:34:21 -07:00
Ken Hibino
68dd6d9a9d (fix): Clear unique lock when task is deleted via Inspector 2021-06-29 16:34:21 -07:00
Ken Hibino
6cce31a134 Fix recoverer test 2021-06-29 16:34:21 -07:00
Ken Hibino
f9d7af3def Update ProcessorRetry test 2021-06-29 16:34:21 -07:00
Ken Hibino
b0321fb465 Format payload bytes in CLI output 2021-06-29 16:34:21 -07:00
Ken Hibino
7776c7ae53 Rename cli subcommand to not to use dash 2021-06-29 16:34:21 -07:00
Ken Hibino
709ca79a2b Add task inspect command 2021-06-29 16:34:21 -07:00
Ken Hibino
08d8f0b37c Add String method to TaskState 2021-06-29 16:34:21 -07:00
Ken Hibino
385323b679 Minor fix in queue command 2021-06-29 16:34:21 -07:00
Ken Hibino
77604af265 Fix asynq CLI build 2021-06-29 16:34:21 -07:00
Ken Hibino
4765742e8a Add Inspector.GetTaskInfo 2021-06-29 16:34:21 -07:00
Ken Hibino
68839dc9d3 Fix lua scripts for redis cluster 2021-06-29 16:34:21 -07:00
Ken Hibino
8922d2423a Define RDB.GetTaskInfo 2021-06-29 16:34:21 -07:00
Ken Hibino
b358de907e Rename Inspector.CurrentStats to GetQueueInfo 2021-06-29 16:34:21 -07:00
Ken Hibino
8ee1825e67 Rename Inspector.CancelActiveTask to CancelProcessing 2021-06-29 16:34:21 -07:00
Ken Hibino
c8bda26bed Make NodeCluster fields read-only 2021-06-29 16:34:21 -07:00
Ken Hibino
8aeeb61c9d Misc cleanup 2021-06-29 16:34:21 -07:00
Ken Hibino
96c51fdc23 Update WorkerInfo and remove unnecessary types 2021-06-29 16:34:21 -07:00
Ken Hibino
ea9086fd8b Update Inspector.List*Task methods to return ErrQueueNotFound 2021-06-29 16:34:21 -07:00
Ken Hibino
e63d51da0c Update Inspector.ListArchivedTasks 2021-06-29 16:34:21 -07:00
Ken Hibino
cd351d49b9 Add LastFailedAt to TaskInfo 2021-06-29 16:34:21 -07:00
Ken Hibino
87264b66f3 Record last_failed_at time on Retry or Archive event 2021-06-29 16:34:21 -07:00
Ken Hibino
62168b8d0d Add LastFailedAt field to TaskMessage 2021-06-29 16:34:21 -07:00
Ken Hibino
840f7245b1 Update List methods (expect for ListArchived) 2021-06-29 16:34:21 -07:00
Ken Hibino
12f4c7cf6e Move inspeq package content to asynq package 2021-06-29 16:34:21 -07:00
Ken Hibino
0ec3b55e6b Replace ArchiveTaskByKey with ArchiveTask in Inspector 2021-06-29 16:34:21 -07:00
Ken Hibino
4bcc5ab6aa Replace DeleteTaskByKey with DeleteTask in Inspector 2021-06-29 16:34:21 -07:00
Ken Hibino
456edb6b71 Replace RunTaskByKey with RunTask in Inspector 2021-06-29 16:34:21 -07:00
Ken Hibino
b835090ad8 Update Client.Enqueue to return TaskInfo 2021-06-29 16:34:21 -07:00
Ken Hibino
09cbea66f6 Define TaskInfo type 2021-06-29 16:34:21 -07:00
Ken Hibino
b9c2572203 Refactor redis keys and store messages in protobuf
Changes:
- Task messages are stored under "asynq:{<qname>}:t:<task_id>" key in redis, value is a HASH type and message are stored under "msg" key in the hash. The hash also stores "deadline", "timeout".
- Redis LIST and ZSET stores task message IDs
- Task messages are serialized using protocol buffer
2021-06-29 16:34:21 -07:00
Ken Hibino
0bf767cf21 Add TaskState type to base package 2021-06-29 16:34:21 -07:00
Ken Hibino
1812d05d21 Fix build 2021-06-29 16:34:21 -07:00
Ken Hibino
4af65d5fa5 Update RDB methods with new errors package 2021-06-29 16:34:21 -07:00
Ken Hibino
a19ad19382 Update RDB.Dequeue with new errors package 2021-06-29 16:34:21 -07:00
Ken Hibino
8117ce8972 Minor fixes 2021-06-29 16:34:21 -07:00
Ken Hibino
d98ecdebb4 Update RDB.EnqueueUnique and RDB.ScheduleUnique with specific errors 2021-06-29 16:34:21 -07:00
Ken Hibino
ffe9aa74b3 Add errors.RedisCommandError type 2021-06-29 16:34:21 -07:00
Ken Hibino
d2d4029aba Update RDB.CurrentStats and RDB.HistoricalStats with specific errors 2021-06-29 16:34:21 -07:00
Ken Hibino
76bd865ebc Update RDB.RemoveQueue with specific error types 2021-06-29 16:34:21 -07:00
Ken Hibino
136d1c9ea9 Update rdb.List* methods with specific errors 2021-06-29 16:34:21 -07:00
Ken Hibino
52e04355d3 Return QueueNotFoundError from DeleteAll* methods 2021-06-29 16:34:21 -07:00
Ken Hibino
cde3e57c6c Update RDB.RunAll* methods with task state 2021-06-29 16:34:21 -07:00
Ken Hibino
dd66acef1b Return QueueNotFoundError from ArchiveAll* methods 2021-06-29 16:34:21 -07:00
Ken Hibino
30a3d9641a Update tests for RDB.DeleteTask and RDB.ArchiveTask 2021-06-29 16:34:21 -07:00
Ken Hibino
961582cba6 Update RDB.RunTask with more specific errors 2021-06-29 16:34:21 -07:00
Ken Hibino
430dbb298e Update RDB.DeleteTask with task state 2021-06-29 16:34:21 -07:00
Ken Hibino
675826be5f Update RDB.ArchiveAll methods with task state 2021-06-29 16:34:21 -07:00
Ken Hibino
62f4e46b73 Update RDB.ArchiveAllPendingTasks with task state 2021-06-29 16:34:21 -07:00
Ken Hibino
a500f8a534 Reorganize test for RDB.ArchiveTask 2021-06-29 16:34:21 -07:00
Ken Hibino
bcfeff38ed Update errors package with detailed comments 2021-06-29 16:34:21 -07:00
Ken Hibino
12a90f6a8d Update RDB.ArchiveTask with custom errors 2021-06-29 16:34:21 -07:00
Ken Hibino
807624e7dd Create internal errors package 2021-06-29 16:34:21 -07:00
Ken Hibino
4d65024bd7 Update rdb.ArchiveTask with more specific error types 2021-06-29 16:34:21 -07:00
Ken Hibino
76486b5cb4 Rename error types 2021-06-29 16:34:21 -07:00
Ken Hibino
1db516c53c Add a list of canonical errors in base package 2021-06-29 16:34:21 -07:00
Ken Hibino
cb5bdf245c Update RDB.ArchiveTask with task state 2021-06-29 16:34:21 -07:00
Ken Hibino
267493ccef Update RDB.RunTask with task state 2021-06-29 16:34:21 -07:00
Ken Hibino
5d7f1b6a80 Update RDB.Requeue with task state 2021-06-29 16:34:21 -07:00
Ken Hibino
77ded502ab Update RDB.Retry, RDB.Archive with task state 2021-06-29 16:34:21 -07:00
Ken Hibino
f2284be43d Update RDB.Dequeue with task state 2021-06-29 16:34:21 -07:00
Ken Hibino
3cadab55cb Update RDB.ForwardIfReady with task state 2021-06-29 16:34:21 -07:00
Ken Hibino
298a420f9f Update RDB.ScheduleUnique with task state 2021-06-29 16:34:21 -07:00
Ken Hibino
b1d717c842 Update RDB.Schedule with task state 2021-06-29 16:34:21 -07:00
Ken Hibino
56e5762eea Update RDB.EnqueueUnique with task state 2021-06-29 16:34:21 -07:00
Ken Hibino
5ec41e388b Update RDB.Enqueue with task state 2021-06-29 16:34:21 -07:00
Ken Hibino
9c95c41651 Change Server API
* Rename ServerStatus to ServerState internally

* Rename terminate to shutdown internally

* Update Scheduler API to match Server API
2021-06-29 16:34:21 -07:00
Ken Hibino
476812475e Change payload to byte slice 2021-06-29 16:34:21 -07:00
Ken Hibino
7af3981929 Refactor redis keys and store messages in protobuf
Changes:
- Task messages are stored under "asynq:{<qname>}:t:<task_id>" key in redis, value is a HASH type and message are stored under "msg" key in the hash. The hash also stores "deadline", "timeout".
- Redis LIST and ZSET stores task message IDs
- Task messages are serialized using protocol buffer
2021-06-29 16:34:21 -07:00
Ken Hibino
2516c4baba v0.17.2 2021-06-06 06:51:30 -07:00
Ken Hibino
ebe482a65c Free uniqueness lock when task is deleted 2021-06-06 06:48:59 -07:00
Vic Shóstak
3e9fc2f972 Update README 2021-04-28 10:25:34 -07:00
Vic Shóstak
63ce9ed0f9 Update README with a new logo 2021-04-14 10:21:47 -07:00
Ken Hibino
32d3f329b9 v0.17.1 2021-04-04 12:51:00 -07:00
Ken Hibino
544c301a8b Fix bug in RDB.memoryUsage 2021-04-04 12:49:19 -07:00
Ken Hibino
8b997d2fab v0.17.0 2021-03-24 16:51:59 -07:00
Ken Hibino
901105a8d7 Add dial, read, write timeout options to RedisConnOpt 2021-03-24 16:49:04 -07:00
Ken Hibino
aaa3f1d4fd v0.16.1 2021-03-20 06:27:03 -07:00
disc
4722ca2d3d Replaced blocking KEYS XXX:* command to non-blocking SCAN XXX:*
More details: https://redis.io/commands/KEYS
2021-03-20 06:24:08 -07:00
80 changed files with 13889 additions and 5065 deletions

12
.github/FUNDING.yml vendored Normal file
View File

@@ -0,0 +1,12 @@
# These are supported funding model platforms
github: [hibiken] # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2]
patreon: # Replace with a single Patreon username
open_collective: # Replace with a single Open Collective username
ko_fi: # Replace with a single Ko-fi username
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
liberapay: # Replace with a single Liberapay username
issuehunt: # Replace with a single IssueHunt username
otechie: # Replace with a single Otechie username
custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']

View File

@@ -7,7 +7,7 @@ jobs:
strategy: strategy:
matrix: matrix:
os: [ubuntu-latest] os: [ubuntu-latest]
go-version: [1.13.x, 1.14.x, 1.15.x, 1.16.x] go-version: [1.14.x, 1.15.x, 1.16.x, 1.17.x]
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
services: services:
redis: redis:
@@ -22,12 +22,21 @@ jobs:
with: with:
go-version: ${{ matrix.go-version }} go-version: ${{ matrix.go-version }}
- name: Build - name: Build core module
run: go build -v ./... run: go build -v ./...
- name: Test - name: Build x module
run: cd x && go build -v ./... && cd ..
- name: Build tools module
run: cd tools && go build -v ./... && cd ..
- name: Test core module
run: go test -race -v -coverprofile=coverage.txt -covermode=atomic ./... run: go test -race -v -coverprofile=coverage.txt -covermode=atomic ./...
- name: Test x module
run: cd x && go test -race -v ./... && cd ..
- name: Benchmark Test - name: Benchmark Test
run: go test -run=^$ -bench=. -loglevel=debug ./... run: go test -run=^$ -bench=. -loglevel=debug ./...

10
.gitignore vendored
View File

@@ -1,3 +1,4 @@
vendor
# Binaries for programs and plugins # Binaries for programs and plugins
*.exe *.exe
*.exe~ *.exe~
@@ -14,8 +15,13 @@
# Ignore examples for now # Ignore examples for now
/examples /examples
# Ignore command binary # Ignore tool binaries
/tools/asynq/asynq /tools/asynq/asynq
/tools/metrics_exporter/metrics_exporter
# Ignore asynq config file # Ignore asynq config file
.asynq.* .asynq.*
# Ignore editor config files
.vscode
.idea

View File

@@ -7,6 +7,155 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [Unreleased] ## [Unreleased]
## [0.22.1] - 2022-02-20
### Fixed
- Fixed Redis version compatibility: Keep support for redis v4.0+
## [0.22.0] - 2022-02-19
### Added
- `BaseContext` is introduced in `Config` to specify callback hook to provide a base `context` from which `Handler` `context` is derived
- `IsOrphaned` field is added to `TaskInfo` to describe a task left in active state with no worker processing it.
### Changed
- `Server` now recovers tasks with an expired lease. Recovered tasks are retried/archived with `ErrLeaseExpired` error.
## [0.21.0] - 2022-01-22
### Added
- `PeriodicTaskManager` is added. Prefer using this over `Scheduler` as it has better support for dynamic periodic tasks.
- The `asynq stats` command now supports a `--json` option, making its output a JSON object
- Introduced new configuration for `DelayedTaskCheckInterval`. See [godoc](https://godoc.org/github.com/hibiken/asynq) for more details.
## [0.20.0] - 2021-12-19
### Added
- Package `x/metrics` is added.
- Tool `tools/metrics_exporter` binary is added.
- `ProcessedTotal` and `FailedTotal` fields were added to `QueueInfo` struct.
## [0.19.1] - 2021-12-12
### Added
- `Latency` field is added to `QueueInfo`.
- `EnqueueContext` method is added to `Client`.
### Fixed
- Fixed an error when user pass a duration less than 1s to `Unique` option
## [0.19.0] - 2021-11-06
### Changed
- `NewTask` takes `Option` as variadic argument
- Bumped minimum supported go version to 1.14 (i.e. go1.14 or higher is required).
### Added
- `Retention` option is added to allow user to specify task retention duration after completion.
- `TaskID` option is added to allow user to specify task ID.
- `ErrTaskIDConflict` sentinel error value is added.
- `ResultWriter` type is added and provided through `Task.ResultWriter` method.
- `TaskInfo` has new fields `CompletedAt`, `Result` and `Retention`.
### Removed
- `Client.SetDefaultOptions` is removed. Use `NewTask` instead to pass default options for tasks.
## [0.18.6] - 2021-10-03
### Changed
- Updated `github.com/go-redis/redis` package to v8
## [0.18.5] - 2021-09-01
### Added
- `IsFailure` config option is added to determine whether error returned from Handler counts as a failure.
## [0.18.4] - 2021-08-17
### Fixed
- Scheduler methods are now thread-safe. It's now safe to call `Register` and `Unregister` concurrently.
## [0.18.3] - 2021-08-09
### Changed
- `Client.Enqueue` no longer enqueues tasks with empty typename; Error message is returned.
## [0.18.2] - 2021-07-15
### Changed
- Changed `Queue` function to not to convert the provided queue name to lowercase. Queue names are now case-sensitive.
- `QueueInfo.MemoryUsage` is now an approximate usage value.
### Fixed
- Fixed latency issue around memory usage (see https://github.com/hibiken/asynq/issues/309).
## [0.18.1] - 2021-07-04
### Changed
- Changed to execute task recovering logic when server starts up; Previously it needed to wait for a minute for task recovering logic to exeucte.
### Fixed
- Fixed task recovering logic to execute every minute
## [0.18.0] - 2021-06-29
### Changed
- NewTask function now takes array of bytes as payload.
- Task `Type` and `Payload` should be accessed by a method call.
- `Server` API has changed. Renamed `Quiet` to `Stop`. Renamed `Stop` to `Shutdown`. _Note:_ As a result of this renaming, the behavior of `Stop` has changed. Please update the exising code to call `Shutdown` where it used to call `Stop`.
- `Scheduler` API has changed. Renamed `Stop` to `Shutdown`.
- Requires redis v4.0+ for multiple field/value pair support
- `Client.Enqueue` now returns `TaskInfo`
- `Inspector.RunTaskByKey` is replaced with `Inspector.RunTask`
- `Inspector.DeleteTaskByKey` is replaced with `Inspector.DeleteTask`
- `Inspector.ArchiveTaskByKey` is replaced with `Inspector.ArchiveTask`
- `inspeq` package is removed. All types and functions from the package is moved to `asynq` package.
- `WorkerInfo` field names have changed.
- `Inspector.CancelActiveTask` is renamed to `Inspector.CancelProcessing`
## [0.17.2] - 2021-06-06
### Fixed
- Free unique lock when task is deleted (https://github.com/hibiken/asynq/issues/275).
## [0.17.1] - 2021-04-04
### Fixed
- Fix bug in internal `RDB.memoryUsage` method.
## [0.17.0] - 2021-03-24
### Added
- `DialTimeout`, `ReadTimeout`, and `WriteTimeout` options are added to `RedisConnOpt`.
## [0.16.1] - 2021-03-20
### Fixed
- Replace `KEYS` command with `SCAN` as recommended by [redis doc](https://redis.io/commands/KEYS).
## [0.16.0] - 2021-03-10 ## [0.16.0] - 2021-03-10
### Added ### Added

128
CODE_OF_CONDUCT.md Normal file
View File

@@ -0,0 +1,128 @@
# Contributor Covenant Code of Conduct
## Our Pledge
We as members, contributors, and leaders pledge to make participation in our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
nationality, personal appearance, race, religion, or sexual identity
and orientation.
We pledge to act and interact in ways that contribute to an open, welcoming,
diverse, inclusive, and healthy community.
## Our Standards
Examples of behavior that contributes to a positive environment for our
community include:
* Demonstrating empathy and kindness toward other people
* Being respectful of differing opinions, viewpoints, and experiences
* Giving and gracefully accepting constructive feedback
* Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
* Focusing on what is best not just for us as individuals, but for the
overall community
Examples of unacceptable behavior include:
* The use of sexualized language or imagery, and sexual attention or
advances of any kind
* Trolling, insulting or derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or email
address, without their explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Enforcement Responsibilities
Community leaders are responsible for clarifying and enforcing our standards of
acceptable behavior and will take appropriate and fair corrective action in
response to any behavior that they deem inappropriate, threatening, offensive,
or harmful.
Community leaders have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, and will communicate reasons for moderation
decisions when appropriate.
## Scope
This Code of Conduct applies within all community spaces, and also applies when
an individual is officially representing the community in public spaces.
Examples of representing our community include using an official e-mail address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement at
ken.hibino7@gmail.com.
All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the
reporter of any incident.
## Enforcement Guidelines
Community leaders will follow these Community Impact Guidelines in determining
the consequences for any action they deem in violation of this Code of Conduct:
### 1. Correction
**Community Impact**: Use of inappropriate language or other behavior deemed
unprofessional or unwelcome in the community.
**Consequence**: A private, written warning from community leaders, providing
clarity around the nature of the violation and an explanation of why the
behavior was inappropriate. A public apology may be requested.
### 2. Warning
**Community Impact**: A violation through a single incident or series
of actions.
**Consequence**: A warning with consequences for continued behavior. No
interaction with the people involved, including unsolicited interaction with
those enforcing the Code of Conduct, for a specified period of time. This
includes avoiding interactions in community spaces as well as external channels
like social media. Violating these terms may lead to a temporary or
permanent ban.
### 3. Temporary Ban
**Community Impact**: A serious violation of community standards, including
sustained inappropriate behavior.
**Consequence**: A temporary ban from any sort of interaction or public
communication with the community for a specified period of time. No public or
private interaction with the people involved, including unsolicited interaction
with those enforcing the Code of Conduct, is allowed during this period.
Violating these terms may lead to a permanent ban.
### 4. Permanent Ban
**Community Impact**: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior, harassment of an
individual, or aggression toward or disparagement of classes of individuals.
**Consequence**: A permanent ban from any sort of public interaction within
the community.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 2.0, available at
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
Community Impact Guidelines were inspired by [Mozilla's code of conduct
enforcement ladder](https://github.com/mozilla/diversity).
[homepage]: https://www.contributor-covenant.org
For answers to common questions about this code of conduct, see the FAQ at
https://www.contributor-covenant.org/faq. Translations are available at
https://www.contributor-covenant.org/translations.

7
Makefile Normal file
View File

@@ -0,0 +1,7 @@
ROOT_DIR:=$(shell dirname $(realpath $(firstword $(MAKEFILE_LIST))))
proto: internal/proto/asynq.proto
protoc -I=$(ROOT_DIR)/internal/proto \
--go_out=$(ROOT_DIR)/internal/proto \
--go_opt=module=github.com/hibiken/asynq/internal/proto \
$(ROOT_DIR)/internal/proto/asynq.proto

239
README.md
View File

@@ -1,41 +1,35 @@
# Asynq <img src="https://user-images.githubusercontent.com/11155743/114697792-ffbfa580-9d26-11eb-8e5b-33bef69476dc.png" alt="Asynq logo" width="360px" />
# Simple, reliable & efficient distributed task queue in Go
![Build Status](https://github.com/hibiken/asynq/workflows/build/badge.svg)
[![GoDoc](https://godoc.org/github.com/hibiken/asynq?status.svg)](https://godoc.org/github.com/hibiken/asynq) [![GoDoc](https://godoc.org/github.com/hibiken/asynq?status.svg)](https://godoc.org/github.com/hibiken/asynq)
[![Go Report Card](https://goreportcard.com/badge/github.com/hibiken/asynq)](https://goreportcard.com/report/github.com/hibiken/asynq) [![Go Report Card](https://goreportcard.com/badge/github.com/hibiken/asynq)](https://goreportcard.com/report/github.com/hibiken/asynq)
![Build Status](https://github.com/hibiken/asynq/workflows/build/badge.svg)
[![License: MIT](https://img.shields.io/badge/license-MIT-green.svg)](https://opensource.org/licenses/MIT) [![License: MIT](https://img.shields.io/badge/license-MIT-green.svg)](https://opensource.org/licenses/MIT)
[![Gitter chat](https://badges.gitter.im/go-asynq/gitter.svg)](https://gitter.im/go-asynq/community) [![Gitter chat](https://badges.gitter.im/go-asynq/gitter.svg)](https://gitter.im/go-asynq/community)
## Overview Asynq is a Go library for queueing tasks and processing them asynchronously with workers. It's backed by [Redis](https://redis.io/) and is designed to be scalable yet easy to get started.
Asynq is a Go library for queueing tasks and processing them asynchronously with workers. It's backed by Redis and is designed to be scalable yet easy to get started.
Highlevel overview of how Asynq works: Highlevel overview of how Asynq works:
- Client puts task on a queue - Client puts tasks on a queue
- Server pulls task off queues and starts a worker goroutine for each task - Server pulls tasks off queues and starts a worker goroutine for each task
- Tasks are processed concurrently by multiple workers - Tasks are processed concurrently by multiple workers
Task queues are used as a mechanism to distribute work across multiple machines. Task queues are used as a mechanism to distribute work across multiple machines. A system can consist of multiple worker servers and brokers, giving way to high availability and horizontal scaling.
A system can consist of multiple worker servers and brokers, giving way to high availability and horizontal scaling.
![Task Queue Diagram](/docs/assets/overview.png) **Example use case**
## Stability and Compatibility ![Task Queue Diagram](https://user-images.githubusercontent.com/11155743/116358505-656f5f80-a806-11eb-9c16-94e49dab0f99.jpg)
**Important Note**: Current major version is zero (v0.x.x) to accomodate rapid development and fast iteration while getting early feedback from users (Feedback on APIs are appreciated!). The public API could change without a major version update before v1.0.0 release.
**Status**: The library is currently undergoing heavy development with frequent, breaking API changes.
## Features ## Features
- Guaranteed [at least one execution](https://www.cloudcomputingpatterns.org/at_least_once_delivery/) of a task - Guaranteed [at least one execution](https://www.cloudcomputingpatterns.org/at_least_once_delivery/) of a task
- Scheduling of tasks - Scheduling of tasks
- Durability since tasks are written to Redis
- [Retries](https://github.com/hibiken/asynq/wiki/Task-Retry) of failed tasks - [Retries](https://github.com/hibiken/asynq/wiki/Task-Retry) of failed tasks
- Automatic recovery of tasks in the event of a worker crash - Automatic recovery of tasks in the event of a worker crash
- [Weighted priority queues](https://github.com/hibiken/asynq/wiki/Priority-Queues#weighted-priority-queues) - [Weighted priority queues](https://github.com/hibiken/asynq/wiki/Queue-Priority#weighted-priority)
- [Strict priority queues](https://github.com/hibiken/asynq/wiki/Priority-Queues#strict-priority-queues) - [Strict priority queues](https://github.com/hibiken/asynq/wiki/Queue-Priority#strict-priority)
- Low latency to add a task since writes are fast in Redis - Low latency to add a task since writes are fast in Redis
- De-duplication of tasks using [unique option](https://github.com/hibiken/asynq/wiki/Unique-Tasks) - De-duplication of tasks using [unique option](https://github.com/hibiken/asynq/wiki/Unique-Tasks)
- Allow [timeout and deadline per task](https://github.com/hibiken/asynq/wiki/Task-Timeout-and-Cancelation) - Allow [timeout and deadline per task](https://github.com/hibiken/asynq/wiki/Task-Timeout-and-Cancelation)
@@ -44,25 +38,39 @@ A system can consist of multiple worker servers and brokers, giving way to high
- [Periodic Tasks](https://github.com/hibiken/asynq/wiki/Periodic-Tasks) - [Periodic Tasks](https://github.com/hibiken/asynq/wiki/Periodic-Tasks)
- [Support Redis Cluster](https://github.com/hibiken/asynq/wiki/Redis-Cluster) for automatic sharding and high availability - [Support Redis Cluster](https://github.com/hibiken/asynq/wiki/Redis-Cluster) for automatic sharding and high availability
- [Support Redis Sentinels](https://github.com/hibiken/asynq/wiki/Automatic-Failover) for high availability - [Support Redis Sentinels](https://github.com/hibiken/asynq/wiki/Automatic-Failover) for high availability
- Integration with [Prometheus](https://prometheus.io/) to collect and visualize queue metrics
- [Web UI](#web-ui) to inspect and remote-control queues and tasks - [Web UI](#web-ui) to inspect and remote-control queues and tasks
- [CLI](#command-line-tool) to inspect and remote-control queues and tasks - [CLI](#command-line-tool) to inspect and remote-control queues and tasks
## Stability and Compatibility
**Status**: The library is currently undergoing **heavy development** with frequent, breaking API changes.
> ☝️ **Important Note**: Current major version is zero (`v0.x.x`) to accomodate rapid development and fast iteration while getting early feedback from users (_feedback on APIs are appreciated!_). The public API could change without a major version update before `v1.0.0` release.
## Quickstart ## Quickstart
First, make sure you are running a Redis server locally. Make sure you have Go installed ([download](https://golang.org/dl/)). Version `1.14` or higher is required.
Initialize your project by creating a folder and then running `go mod init github.com/your/repo` ([learn more](https://blog.golang.org/using-go-modules)) inside the folder. Then install Asynq library with the [`go get`](https://golang.org/cmd/go/#hdr-Add_dependencies_to_current_module_and_install_them) command:
```sh ```sh
$ redis-server go get -u github.com/hibiken/asynq
``` ```
Make sure you're running a Redis server locally or from a [Docker](https://hub.docker.com/_/redis) container. Version `4.0` or higher is required.
Next, write a package that encapsulates task creation and task handling. Next, write a package that encapsulates task creation and task handling.
```go ```go
package tasks package tasks
import ( import (
"context"
"encoding/json"
"fmt" "fmt"
"log"
"time"
"github.com/hibiken/asynq" "github.com/hibiken/asynq"
) )
@@ -72,19 +80,35 @@ const (
TypeImageResize = "image:resize" TypeImageResize = "image:resize"
) )
type EmailDeliveryPayload struct {
UserID int
TemplateID string
}
type ImageResizePayload struct {
SourceURL string
}
//---------------------------------------------- //----------------------------------------------
// Write a function NewXXXTask to create a task. // Write a function NewXXXTask to create a task.
// A task consists of a type and a payload. // A task consists of a type and a payload.
//---------------------------------------------- //----------------------------------------------
func NewEmailDeliveryTask(userID int, tmplID string) *asynq.Task { func NewEmailDeliveryTask(userID int, tmplID string) (*asynq.Task, error) {
payload := map[string]interface{}{"user_id": userID, "template_id": tmplID} payload, err := json.Marshal(EmailDeliveryPayload{UserID: userID, TemplateID: tmplID})
return asynq.NewTask(TypeEmailDelivery, payload) if err != nil {
return nil, err
}
return asynq.NewTask(TypeEmailDelivery, payload), nil
} }
func NewImageResizeTask(src string) *asynq.Task { func NewImageResizeTask(src string) (*asynq.Task, error) {
payload := map[string]interface{}{"src": src} payload, err := json.Marshal(ImageResizePayload{SourceURL: src})
return asynq.NewTask(TypeImageResize, payload) if err != nil {
return nil, err
}
// task options can be passed to NewTask, which can be overridden at enqueue time.
return asynq.NewTask(TypeImageResize, payload, asynq.MaxRetry(5), asynq.Timeout(20 * time.Minute)), nil
} }
//--------------------------------------------------------------- //---------------------------------------------------------------
@@ -96,15 +120,11 @@ func NewImageResizeTask(src string) *asynq.Task {
//--------------------------------------------------------------- //---------------------------------------------------------------
func HandleEmailDeliveryTask(ctx context.Context, t *asynq.Task) error { func HandleEmailDeliveryTask(ctx context.Context, t *asynq.Task) error {
userID, err := t.Payload.GetInt("user_id") var p EmailDeliveryPayload
if err != nil { if err := json.Unmarshal(t.Payload(), &p); err != nil {
return err return fmt.Errorf("json.Unmarshal failed: %v: %w", err, asynq.SkipRetry)
} }
tmplID, err := t.Payload.GetString("template_id") log.Printf("Sending Email to User: user_id=%d, template_id=%s", p.UserID, p.TemplateID)
if err != nil {
return err
}
fmt.Printf("Send Email to User: user_id = %d, template_id = %s\n", userID, tmplID)
// Email delivery code ... // Email delivery code ...
return nil return nil
} }
@@ -114,28 +134,27 @@ type ImageProcessor struct {
// ... fields for struct // ... fields for struct
} }
func (p *ImageProcessor) ProcessTask(ctx context.Context, t *asynq.Task) error { func (processor *ImageProcessor) ProcessTask(ctx context.Context, t *asynq.Task) error {
src, err := t.Payload.GetString("src") var p ImageResizePayload
if err != nil { if err := json.Unmarshal(t.Payload(), &p); err != nil {
return err return fmt.Errorf("json.Unmarshal failed: %v: %w", err, asynq.SkipRetry)
} }
fmt.Printf("Resize image: src = %s\n", src) log.Printf("Resizing image: src=%s", p.SourceURL)
// Image resizing code ... // Image resizing code ...
return nil return nil
} }
func NewImageProcessor() *ImageProcessor { func NewImageProcessor() *ImageProcessor {
// ... return an instance return &ImageProcessor{}
} }
``` ```
In your application code, import the above package and use [`Client`](https://pkg.go.dev/github.com/hibiken/asynq?tab=doc#Client) to put tasks on the queue. In your application code, import the above package and use [`Client`](https://pkg.go.dev/github.com/hibiken/asynq?tab=doc#Client) to put tasks on queues.
```go ```go
package main package main
import ( import (
"fmt"
"log" "log"
"time" "time"
@@ -146,21 +165,23 @@ import (
const redisAddr = "127.0.0.1:6379" const redisAddr = "127.0.0.1:6379"
func main() { func main() {
r := asynq.RedisClientOpt{Addr: redisAddr} client := asynq.NewClient(asynq.RedisClientOpt{Addr: redisAddr})
c := asynq.NewClient(r) defer client.Close()
defer c.Close()
// ------------------------------------------------------ // ------------------------------------------------------
// Example 1: Enqueue task to be processed immediately. // Example 1: Enqueue task to be processed immediately.
// Use (*Client).Enqueue method. // Use (*Client).Enqueue method.
// ------------------------------------------------------ // ------------------------------------------------------
t := tasks.NewEmailDeliveryTask(42, "some:template:id") task, err := tasks.NewEmailDeliveryTask(42, "some:template:id")
res, err := c.Enqueue(t)
if err != nil { if err != nil {
log.Fatal("could not enqueue task: %v", err) log.Fatalf("could not create task: %v", err)
} }
fmt.Printf("Enqueued Result: %+v\n", res) info, err := client.Enqueue(task)
if err != nil {
log.Fatalf("could not enqueue task: %v", err)
}
log.Printf("enqueued task: id=%s queue=%s", info.ID, info.Queue)
// ------------------------------------------------------------ // ------------------------------------------------------------
@@ -168,12 +189,11 @@ func main() {
// Use ProcessIn or ProcessAt option. // Use ProcessIn or ProcessAt option.
// ------------------------------------------------------------ // ------------------------------------------------------------
t = tasks.NewEmailDeliveryTask(42, "other:template:id") info, err = client.Enqueue(task, asynq.ProcessIn(24*time.Hour))
res, err = c.Enqueue(t, asynq.ProcessIn(24*time.Hour))
if err != nil { if err != nil {
log.Fatal("could not schedule task: %v", err) log.Fatalf("could not schedule task: %v", err)
} }
fmt.Printf("Enqueued Result: %+v\n", res) log.Printf("enqueued task: id=%s queue=%s", info.ID, info.Queue)
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
@@ -181,33 +201,21 @@ func main() {
// Options include MaxRetry, Queue, Timeout, Deadline, Unique etc. // Options include MaxRetry, Queue, Timeout, Deadline, Unique etc.
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
c.SetDefaultOptions(tasks.TypeImageResize, asynq.MaxRetry(10), asynq.Timeout(3*time.Minute)) task, err = tasks.NewImageResizeTask("https://example.com/myassets/image.jpg")
t = tasks.NewImageResizeTask("some/blobstore/path")
res, err = c.Enqueue(t)
if err != nil { if err != nil {
log.Fatal("could not enqueue task: %v", err) log.Fatalf("could not create task: %v", err)
} }
fmt.Printf("Enqueued Result: %+v\n", res) info, err = client.Enqueue(task, asynq.MaxRetry(10), asynq.Timeout(3 * time.Minute))
// ---------------------------------------------------------------------------
// Example 4: Pass options to tune task processing behavior at enqueue time.
// Options passed at enqueue time override default ones, if any.
// ---------------------------------------------------------------------------
t = tasks.NewImageResizeTask("some/blobstore/path")
res, err = c.Enqueue(t, asynq.Queue("critical"), asynq.Timeout(30*time.Second))
if err != nil { if err != nil {
log.Fatal("could not enqueue task: %v", err) log.Fatalf("could not enqueue task: %v", err)
} }
fmt.Printf("Enqueued Result: %+v\n", res) log.Printf("enqueued task: id=%s queue=%s", info.ID, info.Queue)
} }
``` ```
Next, start a worker server to process these tasks in the background. Next, start a worker server to process these tasks in the background. To start the background workers, use [`Server`](https://pkg.go.dev/github.com/hibiken/asynq?tab=doc#Server) and provide your [`Handler`](https://pkg.go.dev/github.com/hibiken/asynq?tab=doc#Handler) to process the tasks.
To start the background workers, use [`Server`](https://pkg.go.dev/github.com/hibiken/asynq?tab=doc#Server) and provide your [`Handler`](https://pkg.go.dev/github.com/hibiken/asynq?tab=doc#Handler) to process the tasks.
You can optionally use [`ServeMux`](https://pkg.go.dev/github.com/hibiken/asynq?tab=doc#ServeMux) to create a handler, just as you would with [`"net/http"`](https://golang.org/pkg/net/http/) Handler. You can optionally use [`ServeMux`](https://pkg.go.dev/github.com/hibiken/asynq?tab=doc#ServeMux) to create a handler, just as you would with [`net/http`](https://golang.org/pkg/net/http/) Handler.
```go ```go
package main package main
@@ -222,19 +230,20 @@ import (
const redisAddr = "127.0.0.1:6379" const redisAddr = "127.0.0.1:6379"
func main() { func main() {
r := asynq.RedisClientOpt{Addr: redisAddr} srv := asynq.NewServer(
asynq.RedisClientOpt{Addr: redisAddr},
srv := asynq.NewServer(r, asynq.Config{ asynq.Config{
// Specify how many concurrent workers to use // Specify how many concurrent workers to use
Concurrency: 10, Concurrency: 10,
// Optionally specify multiple queues with different priority. // Optionally specify multiple queues with different priority.
Queues: map[string]int{ Queues: map[string]int{
"critical": 6, "critical": 6,
"default": 3, "default": 3,
"low": 1, "low": 1,
},
// See the godoc for other configuration options
}, },
// See the godoc for other configuration options )
})
// mux maps a type to a handler // mux maps a type to a handler
mux := asynq.NewServeMux() mux := asynq.NewServeMux()
@@ -248,65 +257,55 @@ func main() {
} }
``` ```
For a more detailed walk-through of the library, see our [Getting Started Guide](https://github.com/hibiken/asynq/wiki/Getting-Started). For a more detailed walk-through of the library, see our [Getting Started](https://github.com/hibiken/asynq/wiki/Getting-Started) guide.
To Learn more about `asynq` features and APIs, see our [Wiki](https://github.com/hibiken/asynq/wiki) and [godoc](https://godoc.org/github.com/hibiken/asynq). To learn more about `asynq` features and APIs, see the package [godoc](https://godoc.org/github.com/hibiken/asynq).
## Web UI ## Web UI
[Asynqmon](https://github.com/hibiken/asynqmon) is a web based tool for monitoring and administrating Asynq queues and tasks. [Asynqmon](https://github.com/hibiken/asynqmon) is a web based tool for monitoring and administrating Asynq queues and tasks.
Please see the tool's [README](https://github.com/hibiken/asynqmon) for details.
Here's a few screenshots of the web UI. Here's a few screenshots of the Web UI:
**Queues view** **Queues view**
![Web UI QueuesView](/docs/assets/asynqmon-queues-view.png)
**Tasks view** ![Web UI Queues View](https://user-images.githubusercontent.com/11155743/114697016-07327f00-9d26-11eb-808c-0ac841dc888e.png)
![Web UI TasksView](/docs/assets/asynqmon-task-view.png)
**Tasks view**
![Web UI TasksView](https://user-images.githubusercontent.com/11155743/114697070-1f0a0300-9d26-11eb-855c-d3ec263865b7.png)
**Metrics view**
<img width="1532" alt="Screen Shot 2021-12-19 at 4 37 19 PM" src="https://user-images.githubusercontent.com/10953044/146777420-cae6c476-bac6-469c-acce-b2f6584e8707.png">
**Settings and adaptive dark mode**
![Web UI Settings and adaptive dark mode](https://user-images.githubusercontent.com/11155743/114697149-3517c380-9d26-11eb-9f7a-ae2dd00aad5b.png)
For details on how to use the tool, refer to the tool's [README](https://github.com/hibiken/asynqmon#readme).
## Command Line Tool ## Command Line Tool
Asynq ships with a command line tool to inspect the state of queues and tasks. Asynq ships with a command line tool to inspect the state of queues and tasks.
Here's an example of running the `stats` command.
![Gif](/docs/assets/demo.gif)
For details on how to use the tool, refer to the tool's [README](/tools/asynq/README.md).
## Installation
To install `asynq` library, run the following command:
```sh
go get -u github.com/hibiken/asynq
```
To install the CLI tool, run the following command: To install the CLI tool, run the following command:
```sh ```sh
go get -u github.com/hibiken/asynq/tools/asynq go get -u github.com/hibiken/asynq/tools/asynq
``` ```
## Requirements Here's an example of running the `asynq stats` command:
| Dependency | Version | ![Gif](/docs/assets/demo.gif)
| -------------------------- | ------- |
| [Redis](https://redis.io/) | v3.0+ | For details on how to use the tool, refer to the tool's [README](/tools/asynq/README.md).
| [Go](https://golang.org/) | v1.13+ |
## Contributing ## Contributing
We are open to, and grateful for, any contributions (Github issues/pull-requests, feedback on Gitter channel, etc) made by the community. We are open to, and grateful for, any contributions (GitHub issues/PRs, feedback on [Gitter channel](https://gitter.im/go-asynq/community), etc) made by the community.
Please see the [Contribution Guide](/CONTRIBUTING.md) before contributing. Please see the [Contribution Guide](/CONTRIBUTING.md) before contributing.
## Acknowledgements
- [Sidekiq](https://github.com/mperham/sidekiq) : Many of the design ideas are taken from sidekiq and its Web UI
- [RQ](https://github.com/rq/rq) : Client APIs are inspired by rq library.
- [Cobra](https://github.com/spf13/cobra) : Asynq CLI is built with cobra
## License ## License
Asynq is released under the MIT license. See [LICENSE](https://github.com/hibiken/asynq/blob/master/LICENSE). Copyright (c) 2019-present [Ken Hibino](https://github.com/hibiken) and [Contributors](https://github.com/hibiken/asynq/graphs/contributors). `Asynq` is free and open-source software licensed under the [MIT License](https://github.com/hibiken/asynq/blob/master/LICENSE). Official logo was created by [Vic Shóstak](https://github.com/koddr) and distributed under [Creative Commons](https://creativecommons.org/publicdomain/zero/1.0/) license (CC0 1.0 Universal).

302
asynq.go
View File

@@ -5,34 +5,209 @@
package asynq package asynq
import ( import (
"context"
"crypto/tls" "crypto/tls"
"fmt" "fmt"
"net/url" "net/url"
"strconv" "strconv"
"strings" "strings"
"time"
"github.com/go-redis/redis/v7" "github.com/go-redis/redis/v8"
"github.com/hibiken/asynq/internal/base"
) )
// Task represents a unit of work to be performed. // Task represents a unit of work to be performed.
type Task struct { type Task struct {
// Type indicates the type of task to be performed. // typename indicates the type of task to be performed.
Type string typename string
// Payload holds data needed to perform the task. // payload holds data needed to perform the task.
Payload Payload payload []byte
// opts holds options for the task.
opts []Option
// w is the ResultWriter for the task.
w *ResultWriter
} }
// NewTask returns a new Task given a type name and payload data. func (t *Task) Type() string { return t.typename }
func (t *Task) Payload() []byte { return t.payload }
// ResultWriter returns a pointer to the ResultWriter associated with the task.
// //
// The payload values must be serializable. // Nil pointer is returned if called on a newly created task (i.e. task created by calling NewTask).
func NewTask(typename string, payload map[string]interface{}) *Task { // Only the tasks passed to Handler.ProcessTask have a valid ResultWriter pointer.
func (t *Task) ResultWriter() *ResultWriter { return t.w }
// NewTask returns a new Task given a type name and payload data.
// Options can be passed to configure task processing behavior.
func NewTask(typename string, payload []byte, opts ...Option) *Task {
return &Task{ return &Task{
Type: typename, typename: typename,
Payload: Payload{payload}, payload: payload,
opts: opts,
} }
} }
// newTask creates a task with the given typename, payload and ResultWriter.
func newTask(typename string, payload []byte, w *ResultWriter) *Task {
return &Task{
typename: typename,
payload: payload,
w: w,
}
}
// A TaskInfo describes a task and its metadata.
type TaskInfo struct {
// ID is the identifier of the task.
ID string
// Queue is the name of the queue in which the task belongs.
Queue string
// Type is the type name of the task.
Type string
// Payload is the payload data of the task.
Payload []byte
// State indicates the task state.
State TaskState
// MaxRetry is the maximum number of times the task can be retried.
MaxRetry int
// Retried is the number of times the task has retried so far.
Retried int
// LastErr is the error message from the last failure.
LastErr string
// LastFailedAt is the time time of the last failure if any.
// If the task has no failures, LastFailedAt is zero time (i.e. time.Time{}).
LastFailedAt time.Time
// Timeout is the duration the task can be processed by Handler before being retried,
// zero if not specified
Timeout time.Duration
// Deadline is the deadline for the task, zero value if not specified.
Deadline time.Time
// NextProcessAt is the time the task is scheduled to be processed,
// zero if not applicable.
NextProcessAt time.Time
// IsOrphaned describes whether the task is left in active state with no worker processing it.
// An orphaned task indicates that the worker has crashed or experienced network failures and was not able to
// extend its lease on the task.
//
// This task will be recovered by running a server against the queue the task is in.
// This field is only applicable to tasks with TaskStateActive.
IsOrphaned bool
// Retention is duration of the retention period after the task is successfully processed.
Retention time.Duration
// CompletedAt is the time when the task is processed successfully.
// Zero value (i.e. time.Time{}) indicates no value.
CompletedAt time.Time
// Result holds the result data associated with the task.
// Use ResultWriter to write result data from the Handler.
Result []byte
}
// If t is non-zero, returns time converted from t as unix time in seconds.
// If t is zero, returns zero value of time.Time.
func fromUnixTimeOrZero(t int64) time.Time {
if t == 0 {
return time.Time{}
}
return time.Unix(t, 0)
}
func newTaskInfo(msg *base.TaskMessage, state base.TaskState, nextProcessAt time.Time, result []byte) *TaskInfo {
info := TaskInfo{
ID: msg.ID,
Queue: msg.Queue,
Type: msg.Type,
Payload: msg.Payload, // Do we need to make a copy?
MaxRetry: msg.Retry,
Retried: msg.Retried,
LastErr: msg.ErrorMsg,
Timeout: time.Duration(msg.Timeout) * time.Second,
Deadline: fromUnixTimeOrZero(msg.Deadline),
Retention: time.Duration(msg.Retention) * time.Second,
NextProcessAt: nextProcessAt,
LastFailedAt: fromUnixTimeOrZero(msg.LastFailedAt),
CompletedAt: fromUnixTimeOrZero(msg.CompletedAt),
Result: result,
}
switch state {
case base.TaskStateActive:
info.State = TaskStateActive
case base.TaskStatePending:
info.State = TaskStatePending
case base.TaskStateScheduled:
info.State = TaskStateScheduled
case base.TaskStateRetry:
info.State = TaskStateRetry
case base.TaskStateArchived:
info.State = TaskStateArchived
case base.TaskStateCompleted:
info.State = TaskStateCompleted
default:
panic(fmt.Sprintf("internal error: unknown state: %d", state))
}
return &info
}
// TaskState denotes the state of a task.
type TaskState int
const (
// Indicates that the task is currently being processed by Handler.
TaskStateActive TaskState = iota + 1
// Indicates that the task is ready to be processed by Handler.
TaskStatePending
// Indicates that the task is scheduled to be processed some time in the future.
TaskStateScheduled
// Indicates that the task has previously failed and scheduled to be processed some time in the future.
TaskStateRetry
// Indicates that the task is archived and stored for inspection purposes.
TaskStateArchived
// Indicates that the task is processed successfully and retained until the retention TTL expires.
TaskStateCompleted
)
func (s TaskState) String() string {
switch s {
case TaskStateActive:
return "active"
case TaskStatePending:
return "pending"
case TaskStateScheduled:
return "scheduled"
case TaskStateRetry:
return "retry"
case TaskStateArchived:
return "archived"
case TaskStateCompleted:
return "completed"
}
panic("asynq: unknown task state")
}
// RedisConnOpt is a discriminated union of types that represent Redis connection configuration option. // RedisConnOpt is a discriminated union of types that represent Redis connection configuration option.
// //
// RedisConnOpt represents a sum of following types: // RedisConnOpt represents a sum of following types:
@@ -68,6 +243,26 @@ type RedisClientOpt struct {
// See: https://redis.io/commands/select. // See: https://redis.io/commands/select.
DB int DB int
// Dial timeout for establishing new connections.
// Default is 5 seconds.
DialTimeout time.Duration
// Timeout for socket reads.
// If timeout is reached, read commands will fail with a timeout error
// instead of blocking.
//
// Use value -1 for no timeout and 0 for default.
// Default is 3 seconds.
ReadTimeout time.Duration
// Timeout for socket writes.
// If timeout is reached, write commands will fail with a timeout error
// instead of blocking.
//
// Use value -1 for no timeout and 0 for default.
// Default is ReadTimout.
WriteTimeout time.Duration
// Maximum number of socket connections. // Maximum number of socket connections.
// Default is 10 connections per every CPU as reported by runtime.NumCPU. // Default is 10 connections per every CPU as reported by runtime.NumCPU.
PoolSize int PoolSize int
@@ -79,13 +274,16 @@ type RedisClientOpt struct {
func (opt RedisClientOpt) MakeRedisClient() interface{} { func (opt RedisClientOpt) MakeRedisClient() interface{} {
return redis.NewClient(&redis.Options{ return redis.NewClient(&redis.Options{
Network: opt.Network, Network: opt.Network,
Addr: opt.Addr, Addr: opt.Addr,
Username: opt.Username, Username: opt.Username,
Password: opt.Password, Password: opt.Password,
DB: opt.DB, DB: opt.DB,
PoolSize: opt.PoolSize, DialTimeout: opt.DialTimeout,
TLSConfig: opt.TLSConfig, ReadTimeout: opt.ReadTimeout,
WriteTimeout: opt.WriteTimeout,
PoolSize: opt.PoolSize,
TLSConfig: opt.TLSConfig,
}) })
} }
@@ -116,6 +314,26 @@ type RedisFailoverClientOpt struct {
// See: https://redis.io/commands/select. // See: https://redis.io/commands/select.
DB int DB int
// Dial timeout for establishing new connections.
// Default is 5 seconds.
DialTimeout time.Duration
// Timeout for socket reads.
// If timeout is reached, read commands will fail with a timeout error
// instead of blocking.
//
// Use value -1 for no timeout and 0 for default.
// Default is 3 seconds.
ReadTimeout time.Duration
// Timeout for socket writes.
// If timeout is reached, write commands will fail with a timeout error
// instead of blocking.
//
// Use value -1 for no timeout and 0 for default.
// Default is ReadTimeout
WriteTimeout time.Duration
// Maximum number of socket connections. // Maximum number of socket connections.
// Default is 10 connections per every CPU as reported by runtime.NumCPU. // Default is 10 connections per every CPU as reported by runtime.NumCPU.
PoolSize int PoolSize int
@@ -133,6 +351,9 @@ func (opt RedisFailoverClientOpt) MakeRedisClient() interface{} {
Username: opt.Username, Username: opt.Username,
Password: opt.Password, Password: opt.Password,
DB: opt.DB, DB: opt.DB,
DialTimeout: opt.DialTimeout,
ReadTimeout: opt.ReadTimeout,
WriteTimeout: opt.WriteTimeout,
PoolSize: opt.PoolSize, PoolSize: opt.PoolSize,
TLSConfig: opt.TLSConfig, TLSConfig: opt.TLSConfig,
}) })
@@ -157,6 +378,26 @@ type RedisClusterClientOpt struct {
// See: https://redis.io/commands/auth. // See: https://redis.io/commands/auth.
Password string Password string
// Dial timeout for establishing new connections.
// Default is 5 seconds.
DialTimeout time.Duration
// Timeout for socket reads.
// If timeout is reached, read commands will fail with a timeout error
// instead of blocking.
//
// Use value -1 for no timeout and 0 for default.
// Default is 3 seconds.
ReadTimeout time.Duration
// Timeout for socket writes.
// If timeout is reached, write commands will fail with a timeout error
// instead of blocking.
//
// Use value -1 for no timeout and 0 for default.
// Default is ReadTimeout.
WriteTimeout time.Duration
// TLS Config used to connect to a server. // TLS Config used to connect to a server.
// TLS will be negotiated only if this field is set. // TLS will be negotiated only if this field is set.
TLSConfig *tls.Config TLSConfig *tls.Config
@@ -168,6 +409,9 @@ func (opt RedisClusterClientOpt) MakeRedisClient() interface{} {
MaxRedirects: opt.MaxRedirects, MaxRedirects: opt.MaxRedirects,
Username: opt.Username, Username: opt.Username,
Password: opt.Password, Password: opt.Password,
DialTimeout: opt.DialTimeout,
ReadTimeout: opt.ReadTimeout,
WriteTimeout: opt.WriteTimeout,
TLSConfig: opt.TLSConfig, TLSConfig: opt.TLSConfig,
}) })
} }
@@ -244,3 +488,27 @@ func parseRedisSentinelURI(u *url.URL) (RedisConnOpt, error) {
} }
return RedisFailoverClientOpt{MasterName: master, SentinelAddrs: addrs, Password: password}, nil return RedisFailoverClientOpt{MasterName: master, SentinelAddrs: addrs, Password: password}, nil
} }
// ResultWriter is a client interface to write result data for a task.
// It writes the data to the redis instance the server is connected to.
type ResultWriter struct {
id string // task ID this writer is responsible for
qname string // queue name the task belongs to
broker base.Broker
ctx context.Context // context associated with the task
}
// Write writes the given data as a result of the task the ResultWriter is associated with.
func (w *ResultWriter) Write(data []byte) (n int, err error) {
select {
case <-w.ctx.Done():
return 0, fmt.Errorf("failed to result task result: %v", w.ctx.Err())
default:
}
return w.broker.WriteResult(w.qname, w.id, data)
}
// TaskID returns the ID of the task the ResultWriter is associated with.
func (w *ResultWriter) TaskID() string {
return w.id
}

View File

@@ -10,7 +10,7 @@ import (
"strings" "strings"
"testing" "testing"
"github.com/go-redis/redis/v7" "github.com/go-redis/redis/v8"
"github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp"
h "github.com/hibiken/asynq/internal/asynqtest" h "github.com/hibiken/asynq/internal/asynqtest"
"github.com/hibiken/asynq/internal/log" "github.com/hibiken/asynq/internal/log"
@@ -85,7 +85,7 @@ func getRedisConnOpt(tb testing.TB) RedisConnOpt {
var sortTaskOpt = cmp.Transformer("SortMsg", func(in []*Task) []*Task { var sortTaskOpt = cmp.Transformer("SortMsg", func(in []*Task) []*Task {
out := append([]*Task(nil), in...) // Copy input to avoid mutating it out := append([]*Task(nil), in...) // Copy input to avoid mutating it
sort.Slice(out, func(i, j int) bool { sort.Slice(out, func(i, j int) bool {
return out[i].Type < out[j].Type return out[i].Type() < out[j].Type()
}) })
return out return out
}) })

View File

@@ -6,12 +6,24 @@ package asynq
import ( import (
"context" "context"
"encoding/json"
"fmt" "fmt"
"sync" "sync"
"testing" "testing"
"time" "time"
h "github.com/hibiken/asynq/internal/asynqtest"
) )
// Creates a new task of type "task<n>" with payload {"data": n}.
func makeTask(n int) *Task {
b, err := json.Marshal(map[string]int{"data": n})
if err != nil {
panic(err)
}
return NewTask(fmt.Sprintf("task%d", n), b)
}
// Simple E2E Benchmark testing with no scheduled tasks and retries. // Simple E2E Benchmark testing with no scheduled tasks and retries.
func BenchmarkEndToEndSimple(b *testing.B) { func BenchmarkEndToEndSimple(b *testing.B) {
const count = 100000 const count = 100000
@@ -29,8 +41,7 @@ func BenchmarkEndToEndSimple(b *testing.B) {
}) })
// Create a bunch of tasks // Create a bunch of tasks
for i := 0; i < count; i++ { for i := 0; i < count; i++ {
t := NewTask(fmt.Sprintf("task%d", i), map[string]interface{}{"data": i}) if _, err := client.Enqueue(makeTask(i)); err != nil {
if _, err := client.Enqueue(t); err != nil {
b.Fatalf("could not enqueue a task: %v", err) b.Fatalf("could not enqueue a task: %v", err)
} }
} }
@@ -70,14 +81,12 @@ func BenchmarkEndToEnd(b *testing.B) {
}) })
// Create a bunch of tasks // Create a bunch of tasks
for i := 0; i < count; i++ { for i := 0; i < count; i++ {
t := NewTask(fmt.Sprintf("task%d", i), map[string]interface{}{"data": i}) if _, err := client.Enqueue(makeTask(i)); err != nil {
if _, err := client.Enqueue(t); err != nil {
b.Fatalf("could not enqueue a task: %v", err) b.Fatalf("could not enqueue a task: %v", err)
} }
} }
for i := 0; i < count; i++ { for i := 0; i < count; i++ {
t := NewTask(fmt.Sprintf("scheduled%d", i), map[string]interface{}{"data": i}) if _, err := client.Enqueue(makeTask(i), ProcessIn(1*time.Second)); err != nil {
if _, err := client.Enqueue(t, ProcessIn(1*time.Second)); err != nil {
b.Fatalf("could not enqueue a task: %v", err) b.Fatalf("could not enqueue a task: %v", err)
} }
} }
@@ -86,13 +95,18 @@ func BenchmarkEndToEnd(b *testing.B) {
var wg sync.WaitGroup var wg sync.WaitGroup
wg.Add(count * 2) wg.Add(count * 2)
handler := func(ctx context.Context, t *Task) error { handler := func(ctx context.Context, t *Task) error {
n, err := t.Payload.GetInt("data") var p map[string]int
if err != nil { if err := json.Unmarshal(t.Payload(), &p); err != nil {
b.Logf("internal error: %v", err) b.Logf("internal error: %v", err)
} }
n, ok := p["data"]
if !ok {
n = 1
b.Logf("internal error: could not get data from payload")
}
retried, ok := GetRetryCount(ctx) retried, ok := GetRetryCount(ctx)
if !ok { if !ok {
b.Logf("internal error: %v", err) b.Logf("internal error: could not get retry count from context")
} }
// Fail 1% of tasks for the first attempt. // Fail 1% of tasks for the first attempt.
if retried == 0 && n%100 == 0 { if retried == 0 && n%100 == 0 {
@@ -136,20 +150,17 @@ func BenchmarkEndToEndMultipleQueues(b *testing.B) {
}) })
// Create a bunch of tasks // Create a bunch of tasks
for i := 0; i < highCount; i++ { for i := 0; i < highCount; i++ {
t := NewTask(fmt.Sprintf("task%d", i), map[string]interface{}{"data": i}) if _, err := client.Enqueue(makeTask(i), Queue("high")); err != nil {
if _, err := client.Enqueue(t, Queue("high")); err != nil {
b.Fatalf("could not enqueue a task: %v", err) b.Fatalf("could not enqueue a task: %v", err)
} }
} }
for i := 0; i < defaultCount; i++ { for i := 0; i < defaultCount; i++ {
t := NewTask(fmt.Sprintf("task%d", i), map[string]interface{}{"data": i}) if _, err := client.Enqueue(makeTask(i)); err != nil {
if _, err := client.Enqueue(t); err != nil {
b.Fatalf("could not enqueue a task: %v", err) b.Fatalf("could not enqueue a task: %v", err)
} }
} }
for i := 0; i < lowCount; i++ { for i := 0; i < lowCount; i++ {
t := NewTask(fmt.Sprintf("task%d", i), map[string]interface{}{"data": i}) if _, err := client.Enqueue(makeTask(i), Queue("low")); err != nil {
if _, err := client.Enqueue(t, Queue("low")); err != nil {
b.Fatalf("could not enqueue a task: %v", err) b.Fatalf("could not enqueue a task: %v", err)
} }
} }
@@ -190,15 +201,13 @@ func BenchmarkClientWhileServerRunning(b *testing.B) {
}) })
// Enqueue 10,000 tasks. // Enqueue 10,000 tasks.
for i := 0; i < count; i++ { for i := 0; i < count; i++ {
t := NewTask(fmt.Sprintf("task%d", i), map[string]interface{}{"data": i}) if _, err := client.Enqueue(makeTask(i)); err != nil {
if _, err := client.Enqueue(t); err != nil {
b.Fatalf("could not enqueue a task: %v", err) b.Fatalf("could not enqueue a task: %v", err)
} }
} }
// Schedule 10,000 tasks. // Schedule 10,000 tasks.
for i := 0; i < count; i++ { for i := 0; i < count; i++ {
t := NewTask(fmt.Sprintf("scheduled%d", i), map[string]interface{}{"data": i}) if _, err := client.Enqueue(makeTask(i), ProcessIn(1*time.Second)); err != nil {
if _, err := client.Enqueue(t, ProcessIn(1*time.Second)); err != nil {
b.Fatalf("could not enqueue a task: %v", err) b.Fatalf("could not enqueue a task: %v", err)
} }
} }
@@ -213,7 +222,7 @@ func BenchmarkClientWhileServerRunning(b *testing.B) {
b.Log("Starting enqueueing") b.Log("Starting enqueueing")
enqueued := 0 enqueued := 0
for enqueued < 100000 { for enqueued < 100000 {
t := NewTask(fmt.Sprintf("enqueued%d", enqueued), map[string]interface{}{"data": enqueued}) t := NewTask(fmt.Sprintf("enqueued%d", enqueued), h.JSON(map[string]interface{}{"data": enqueued}))
if _, err := client.Enqueue(t); err != nil { if _, err := client.Enqueue(t); err != nil {
b.Logf("could not enqueue task %d: %v", enqueued, err) b.Logf("could not enqueue task %d: %v", enqueued, err)
continue continue

204
client.go
View File

@@ -5,15 +5,15 @@
package asynq package asynq
import ( import (
"errors" "context"
"fmt" "fmt"
"strings" "strings"
"sync"
"time" "time"
"github.com/go-redis/redis/v7" "github.com/go-redis/redis/v8"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/hibiken/asynq/internal/base" "github.com/hibiken/asynq/internal/base"
"github.com/hibiken/asynq/internal/errors"
"github.com/hibiken/asynq/internal/rdb" "github.com/hibiken/asynq/internal/rdb"
) )
@@ -24,9 +24,7 @@ import (
// //
// Clients are safe for concurrent use by multiple goroutines. // Clients are safe for concurrent use by multiple goroutines.
type Client struct { type Client struct {
mu sync.Mutex rdb *rdb.RDB
opts map[string][]Option
rdb *rdb.RDB
} }
// NewClient returns a new Client instance given a redis connection option. // NewClient returns a new Client instance given a redis connection option.
@@ -35,11 +33,7 @@ func NewClient(r RedisConnOpt) *Client {
if !ok { if !ok {
panic(fmt.Sprintf("asynq: unsupported RedisConnOpt type %T", r)) panic(fmt.Sprintf("asynq: unsupported RedisConnOpt type %T", r))
} }
rdb := rdb.NewRDB(c) return &Client{rdb: rdb.NewRDB(c)}
return &Client{
opts: make(map[string][]Option),
rdb: rdb,
}
} }
type OptionType int type OptionType int
@@ -52,6 +46,8 @@ const (
UniqueOpt UniqueOpt
ProcessAtOpt ProcessAtOpt
ProcessInOpt ProcessInOpt
TaskIDOpt
RetentionOpt
) )
// Option specifies the task processing behavior. // Option specifies the task processing behavior.
@@ -70,11 +66,13 @@ type Option interface {
type ( type (
retryOption int retryOption int
queueOption string queueOption string
taskIDOption string
timeoutOption time.Duration timeoutOption time.Duration
deadlineOption time.Time deadlineOption time.Time
uniqueOption time.Duration uniqueOption time.Duration
processAtOption time.Time processAtOption time.Time
processInOption time.Duration processInOption time.Duration
retentionOption time.Duration
) )
// MaxRetry returns an option to specify the max number of times // MaxRetry returns an option to specify the max number of times
@@ -93,16 +91,23 @@ func (n retryOption) Type() OptionType { return MaxRetryOpt }
func (n retryOption) Value() interface{} { return int(n) } func (n retryOption) Value() interface{} { return int(n) }
// Queue returns an option to specify the queue to enqueue the task into. // Queue returns an option to specify the queue to enqueue the task into.
//
// Queue name is case-insensitive and the lowercased version is used.
func Queue(qname string) Option { func Queue(qname string) Option {
return queueOption(strings.ToLower(qname)) return queueOption(qname)
} }
func (qname queueOption) String() string { return fmt.Sprintf("Queue(%q)", string(qname)) } func (qname queueOption) String() string { return fmt.Sprintf("Queue(%q)", string(qname)) }
func (qname queueOption) Type() OptionType { return QueueOpt } func (qname queueOption) Type() OptionType { return QueueOpt }
func (qname queueOption) Value() interface{} { return string(qname) } func (qname queueOption) Value() interface{} { return string(qname) }
// TaskID returns an option to specify the task ID.
func TaskID(id string) Option {
return taskIDOption(id)
}
func (id taskIDOption) String() string { return fmt.Sprintf("TaskID(%q)", string(id)) }
func (id taskIDOption) Type() OptionType { return TaskIDOpt }
func (id taskIDOption) Value() interface{} { return string(id) }
// Timeout returns an option to specify how long a task may run. // Timeout returns an option to specify how long a task may run.
// If the timeout elapses before the Handler returns, then the task // If the timeout elapses before the Handler returns, then the task
// will be retried. // will be retried.
@@ -139,6 +144,7 @@ func (t deadlineOption) Value() interface{} { return time.Time(t) }
// Task enqueued with this option is guaranteed to be unique within the given ttl. // Task enqueued with this option is guaranteed to be unique within the given ttl.
// Once the task gets processed successfully or once the TTL has expired, another task with the same uniqueness may be enqueued. // Once the task gets processed successfully or once the TTL has expired, another task with the same uniqueness may be enqueued.
// ErrDuplicateTask error is returned when enqueueing a duplicate task. // ErrDuplicateTask error is returned when enqueueing a duplicate task.
// TTL duration must be greater than or equal to 1 second.
// //
// Uniqueness of a task is based on the following properties: // Uniqueness of a task is based on the following properties:
// - Task Type // - Task Type
@@ -176,19 +182,36 @@ func (d processInOption) String() string { return fmt.Sprintf("ProcessIn(%v)
func (d processInOption) Type() OptionType { return ProcessInOpt } func (d processInOption) Type() OptionType { return ProcessInOpt }
func (d processInOption) Value() interface{} { return time.Duration(d) } func (d processInOption) Value() interface{} { return time.Duration(d) }
// Retention returns an option to specify the duration of retention period for the task.
// If this option is provided, the task will be stored as a completed task after successful processing.
// A completed task will be deleted after the specified duration elapses.
func Retention(d time.Duration) Option {
return retentionOption(d)
}
func (ttl retentionOption) String() string { return fmt.Sprintf("Retention(%v)", time.Duration(ttl)) }
func (ttl retentionOption) Type() OptionType { return RetentionOpt }
func (ttl retentionOption) Value() interface{} { return time.Duration(ttl) }
// ErrDuplicateTask indicates that the given task could not be enqueued since it's a duplicate of another task. // ErrDuplicateTask indicates that the given task could not be enqueued since it's a duplicate of another task.
// //
// ErrDuplicateTask error only applies to tasks enqueued with a Unique option. // ErrDuplicateTask error only applies to tasks enqueued with a Unique option.
var ErrDuplicateTask = errors.New("task already exists") var ErrDuplicateTask = errors.New("task already exists")
// ErrTaskIDConflict indicates that the given task could not be enqueued since its task ID already exists.
//
// ErrTaskIDConflict error only applies to tasks enqueued with a TaskID option.
var ErrTaskIDConflict = errors.New("task ID conflicts with another task")
type option struct { type option struct {
retry int retry int
queue string queue string
taskID string
timeout time.Duration timeout time.Duration
deadline time.Time deadline time.Time
uniqueTTL time.Duration uniqueTTL time.Duration
processAt time.Time processAt time.Time
retention time.Duration
} }
// composeOptions merges user provided options into the default options // composeOptions merges user provided options into the default options
@@ -199,6 +222,7 @@ func composeOptions(opts ...Option) (option, error) {
res := option{ res := option{
retry: defaultMaxRetry, retry: defaultMaxRetry,
queue: base.DefaultQueueName, queue: base.DefaultQueueName,
taskID: uuid.NewString(),
timeout: 0, // do not set to deafultTimeout here timeout: 0, // do not set to deafultTimeout here
deadline: time.Time{}, deadline: time.Time{},
processAt: time.Now(), processAt: time.Now(),
@@ -208,21 +232,33 @@ func composeOptions(opts ...Option) (option, error) {
case retryOption: case retryOption:
res.retry = int(opt) res.retry = int(opt)
case queueOption: case queueOption:
trimmed := strings.TrimSpace(string(opt)) qname := string(opt)
if err := base.ValidateQueueName(trimmed); err != nil { if err := base.ValidateQueueName(qname); err != nil {
return option{}, err return option{}, err
} }
res.queue = trimmed res.queue = qname
case taskIDOption:
id := string(opt)
if err := validateTaskID(id); err != nil {
return option{}, err
}
res.taskID = id
case timeoutOption: case timeoutOption:
res.timeout = time.Duration(opt) res.timeout = time.Duration(opt)
case deadlineOption: case deadlineOption:
res.deadline = time.Time(opt) res.deadline = time.Time(opt)
case uniqueOption: case uniqueOption:
res.uniqueTTL = time.Duration(opt) ttl := time.Duration(opt)
if ttl < 1*time.Second {
return option{}, errors.New("Unique TTL cannot be less than 1s")
}
res.uniqueTTL = ttl
case processAtOption: case processAtOption:
res.processAt = time.Time(opt) res.processAt = time.Time(opt)
case processInOption: case processInOption:
res.processAt = time.Now().Add(time.Duration(opt)) res.processAt = time.Now().Add(time.Duration(opt))
case retentionOption:
res.retention = time.Duration(opt)
default: default:
// ignore unexpected option // ignore unexpected option
} }
@@ -230,6 +266,14 @@ func composeOptions(opts ...Option) (option, error) {
return res, nil return res, nil
} }
// validates user provided task ID string.
func validateTaskID(id string) error {
if strings.TrimSpace(id) == "" {
return errors.New("task ID cannot be empty")
}
return nil
}
const ( const (
// Default max retry count used if nothing is specified. // Default max retry count used if nothing is specified.
defaultMaxRetry = 25 defaultMaxRetry = 25
@@ -244,71 +288,45 @@ var (
noDeadline time.Time = time.Unix(0, 0) noDeadline time.Time = time.Unix(0, 0)
) )
// SetDefaultOptions sets options to be used for a given task type.
// The argument opts specifies the behavior of task processing.
// If there are conflicting Option values the last one overrides others.
//
// Default options can be overridden by options passed at enqueue time.
func (c *Client) SetDefaultOptions(taskType string, opts ...Option) {
c.mu.Lock()
defer c.mu.Unlock()
c.opts[taskType] = opts
}
// A Result holds enqueued task's metadata.
type Result struct {
// ID is a unique identifier for the task.
ID string
// EnqueuedAt is the time the task was enqueued in UTC.
EnqueuedAt time.Time
// ProcessAt indicates when the task should be processed.
ProcessAt time.Time
// Retry is the maximum number of retry for the task.
Retry int
// Queue is a name of the queue the task is enqueued to.
Queue string
// Timeout is the timeout value for the task.
// Counting for timeout starts when a worker starts processing the task.
// If task processing doesn't complete within the timeout, the task will be retried.
// The value zero means no timeout.
//
// If deadline is set, min(now+timeout, deadline) is used, where the now is the time when
// a worker starts processing the task.
Timeout time.Duration
// Deadline is the deadline value for the task.
// If task processing doesn't complete before the deadline, the task will be retried.
// The value time.Unix(0, 0) means no deadline.
//
// If timeout is set, min(now+timeout, deadline) is used, where the now is the time when
// a worker starts processing the task.
Deadline time.Time
}
// Close closes the connection with redis. // Close closes the connection with redis.
func (c *Client) Close() error { func (c *Client) Close() error {
return c.rdb.Close() return c.rdb.Close()
} }
// Enqueue enqueues the given task to be processed asynchronously. // Enqueue enqueues the given task to a queue.
// //
// Enqueue returns nil if the task is enqueued successfully, otherwise returns a non-nil error. // Enqueue returns TaskInfo and nil error if the task is enqueued successfully, otherwise returns a non-nil error.
// //
// The argument opts specifies the behavior of task processing. // The argument opts specifies the behavior of task processing.
// If there are conflicting Option values the last one overrides others. // If there are conflicting Option values the last one overrides others.
// Any options provided to NewTask can be overridden by options passed to Enqueue.
// By deafult, max retry is set to 25 and timeout is set to 30 minutes. // By deafult, max retry is set to 25 and timeout is set to 30 minutes.
// If no ProcessAt or ProcessIn options are passed, the task will be processed immediately. //
func (c *Client) Enqueue(task *Task, opts ...Option) (*Result, error) { // If no ProcessAt or ProcessIn options are provided, the task will be pending immediately.
c.mu.Lock() //
if defaults, ok := c.opts[task.Type]; ok { // Enqueue uses context.Background internally; to specify the context, use EnqueueContext.
opts = append(defaults, opts...) func (c *Client) Enqueue(task *Task, opts ...Option) (*TaskInfo, error) {
return c.EnqueueContext(context.Background(), task, opts...)
}
// EnqueueContext enqueues the given task to a queue.
//
// EnqueueContext returns TaskInfo and nil error if the task is enqueued successfully, otherwise returns a non-nil error.
//
// The argument opts specifies the behavior of task processing.
// If there are conflicting Option values the last one overrides others.
// Any options provided to NewTask can be overridden by options passed to Enqueue.
// By deafult, max retry is set to 25 and timeout is set to 30 minutes.
//
// If no ProcessAt or ProcessIn options are provided, the task will be pending immediately.
//
// The first argument context applies to the enqueue operation. To specify task timeout and deadline, use Timeout and Deadline option instead.
func (c *Client) EnqueueContext(ctx context.Context, task *Task, opts ...Option) (*TaskInfo, error) {
if strings.TrimSpace(task.Type()) == "" {
return nil, fmt.Errorf("task typename cannot be empty")
} }
c.mu.Unlock() // merge task options with the options provided at enqueue time.
opts = append(task.opts, opts...)
opt, err := composeOptions(opts...) opt, err := composeOptions(opts...)
if err != nil { if err != nil {
return nil, err return nil, err
@@ -327,53 +345,51 @@ func (c *Client) Enqueue(task *Task, opts ...Option) (*Result, error) {
} }
var uniqueKey string var uniqueKey string
if opt.uniqueTTL > 0 { if opt.uniqueTTL > 0 {
uniqueKey = base.UniqueKey(opt.queue, task.Type, task.Payload.data) uniqueKey = base.UniqueKey(opt.queue, task.Type(), task.Payload())
} }
msg := &base.TaskMessage{ msg := &base.TaskMessage{
ID: uuid.New(), ID: opt.taskID,
Type: task.Type, Type: task.Type(),
Payload: task.Payload.data, Payload: task.Payload(),
Queue: opt.queue, Queue: opt.queue,
Retry: opt.retry, Retry: opt.retry,
Deadline: deadline.Unix(), Deadline: deadline.Unix(),
Timeout: int64(timeout.Seconds()), Timeout: int64(timeout.Seconds()),
UniqueKey: uniqueKey, UniqueKey: uniqueKey,
Retention: int64(opt.retention.Seconds()),
} }
now := time.Now() now := time.Now()
var state base.TaskState
if opt.processAt.Before(now) || opt.processAt.Equal(now) { if opt.processAt.Before(now) || opt.processAt.Equal(now) {
opt.processAt = now opt.processAt = now
err = c.enqueue(msg, opt.uniqueTTL) err = c.enqueue(ctx, msg, opt.uniqueTTL)
state = base.TaskStatePending
} else { } else {
err = c.schedule(msg, opt.processAt, opt.uniqueTTL) err = c.schedule(ctx, msg, opt.processAt, opt.uniqueTTL)
state = base.TaskStateScheduled
} }
switch { switch {
case err == rdb.ErrDuplicateTask: case errors.Is(err, errors.ErrDuplicateTask):
return nil, fmt.Errorf("%w", ErrDuplicateTask) return nil, fmt.Errorf("%w", ErrDuplicateTask)
case errors.Is(err, errors.ErrTaskIdConflict):
return nil, fmt.Errorf("%w", ErrTaskIDConflict)
case err != nil: case err != nil:
return nil, err return nil, err
} }
return &Result{ return newTaskInfo(msg, state, opt.processAt, nil), nil
ID: msg.ID.String(),
EnqueuedAt: time.Now().UTC(),
ProcessAt: opt.processAt,
Queue: msg.Queue,
Retry: msg.Retry,
Timeout: timeout,
Deadline: deadline,
}, nil
} }
func (c *Client) enqueue(msg *base.TaskMessage, uniqueTTL time.Duration) error { func (c *Client) enqueue(ctx context.Context, msg *base.TaskMessage, uniqueTTL time.Duration) error {
if uniqueTTL > 0 { if uniqueTTL > 0 {
return c.rdb.EnqueueUnique(msg, uniqueTTL) return c.rdb.EnqueueUnique(ctx, msg, uniqueTTL)
} }
return c.rdb.Enqueue(msg) return c.rdb.Enqueue(ctx, msg)
} }
func (c *Client) schedule(msg *base.TaskMessage, t time.Time, uniqueTTL time.Duration) error { func (c *Client) schedule(ctx context.Context, msg *base.TaskMessage, t time.Time, uniqueTTL time.Duration) error {
if uniqueTTL > 0 { if uniqueTTL > 0 {
ttl := t.Add(uniqueTTL).Sub(time.Now()) ttl := t.Add(uniqueTTL).Sub(time.Now())
return c.rdb.ScheduleUnique(msg, t, ttl) return c.rdb.ScheduleUnique(ctx, msg, t, ttl)
} }
return c.rdb.Schedule(msg, t) return c.rdb.Schedule(ctx, msg, t)
} }

View File

@@ -5,6 +5,7 @@
package asynq package asynq
import ( import (
"context"
"errors" "errors"
"testing" "testing"
"time" "time"
@@ -20,7 +21,7 @@ func TestClientEnqueueWithProcessAtOption(t *testing.T) {
client := NewClient(getRedisConnOpt(t)) client := NewClient(getRedisConnOpt(t))
defer client.Close() defer client.Close()
task := NewTask("send_email", map[string]interface{}{"to": "customer@gmail.com", "from": "merchant@example.com"}) task := NewTask("send_email", h.JSON(map[string]interface{}{"to": "customer@gmail.com", "from": "merchant@example.com"}))
var ( var (
now = time.Now() now = time.Now()
@@ -32,7 +33,7 @@ func TestClientEnqueueWithProcessAtOption(t *testing.T) {
task *Task task *Task
processAt time.Time // value for ProcessAt option processAt time.Time // value for ProcessAt option
opts []Option // other options opts []Option // other options
wantRes *Result wantInfo *TaskInfo
wantPending map[string][]*base.TaskMessage wantPending map[string][]*base.TaskMessage
wantScheduled map[string][]base.Z wantScheduled map[string][]base.Z
}{ }{
@@ -41,19 +42,24 @@ func TestClientEnqueueWithProcessAtOption(t *testing.T) {
task: task, task: task,
processAt: now, processAt: now,
opts: []Option{}, opts: []Option{},
wantRes: &Result{ wantInfo: &TaskInfo{
EnqueuedAt: now.UTC(), Queue: "default",
ProcessAt: now, Type: task.Type(),
Queue: "default", Payload: task.Payload(),
Retry: defaultMaxRetry, State: TaskStatePending,
Timeout: defaultTimeout, MaxRetry: defaultMaxRetry,
Deadline: noDeadline, Retried: 0,
LastErr: "",
LastFailedAt: time.Time{},
Timeout: defaultTimeout,
Deadline: time.Time{},
NextProcessAt: now,
}, },
wantPending: map[string][]*base.TaskMessage{ wantPending: map[string][]*base.TaskMessage{
"default": { "default": {
{ {
Type: task.Type, Type: task.Type(),
Payload: task.Payload.data, Payload: task.Payload(),
Retry: defaultMaxRetry, Retry: defaultMaxRetry,
Queue: "default", Queue: "default",
Timeout: int64(defaultTimeout.Seconds()), Timeout: int64(defaultTimeout.Seconds()),
@@ -70,13 +76,18 @@ func TestClientEnqueueWithProcessAtOption(t *testing.T) {
task: task, task: task,
processAt: oneHourLater, processAt: oneHourLater,
opts: []Option{}, opts: []Option{},
wantRes: &Result{ wantInfo: &TaskInfo{
EnqueuedAt: now.UTC(), Queue: "default",
ProcessAt: oneHourLater, Type: task.Type(),
Queue: "default", Payload: task.Payload(),
Retry: defaultMaxRetry, State: TaskStateScheduled,
Timeout: defaultTimeout, MaxRetry: defaultMaxRetry,
Deadline: noDeadline, Retried: 0,
LastErr: "",
LastFailedAt: time.Time{},
Timeout: defaultTimeout,
Deadline: time.Time{},
NextProcessAt: oneHourLater,
}, },
wantPending: map[string][]*base.TaskMessage{ wantPending: map[string][]*base.TaskMessage{
"default": {}, "default": {},
@@ -85,8 +96,8 @@ func TestClientEnqueueWithProcessAtOption(t *testing.T) {
"default": { "default": {
{ {
Message: &base.TaskMessage{ Message: &base.TaskMessage{
Type: task.Type, Type: task.Type(),
Payload: task.Payload.data, Payload: task.Payload(),
Retry: defaultMaxRetry, Retry: defaultMaxRetry,
Queue: "default", Queue: "default",
Timeout: int64(defaultTimeout.Seconds()), Timeout: int64(defaultTimeout.Seconds()),
@@ -103,24 +114,24 @@ func TestClientEnqueueWithProcessAtOption(t *testing.T) {
h.FlushDB(t, r) // clean up db before each test case. h.FlushDB(t, r) // clean up db before each test case.
opts := append(tc.opts, ProcessAt(tc.processAt)) opts := append(tc.opts, ProcessAt(tc.processAt))
gotRes, err := client.Enqueue(tc.task, opts...) gotInfo, err := client.Enqueue(tc.task, opts...)
if err != nil { if err != nil {
t.Error(err) t.Error(err)
continue continue
} }
cmpOptions := []cmp.Option{ cmpOptions := []cmp.Option{
cmpopts.IgnoreFields(Result{}, "ID"), cmpopts.IgnoreFields(TaskInfo{}, "ID"),
cmpopts.EquateApproxTime(500 * time.Millisecond), cmpopts.EquateApproxTime(500 * time.Millisecond),
} }
if diff := cmp.Diff(tc.wantRes, gotRes, cmpOptions...); diff != "" { if diff := cmp.Diff(tc.wantInfo, gotInfo, cmpOptions...); diff != "" {
t.Errorf("%s;\nEnqueue(task, ProcessAt(%v)) returned %v, want %v; (-want,+got)\n%s", t.Errorf("%s;\nEnqueue(task, ProcessAt(%v)) returned %v, want %v; (-want,+got)\n%s",
tc.desc, tc.processAt, gotRes, tc.wantRes, diff) tc.desc, tc.processAt, gotInfo, tc.wantInfo, diff)
} }
for qname, want := range tc.wantPending { for qname, want := range tc.wantPending {
gotPending := h.GetPendingMessages(t, r, qname) gotPending := h.GetPendingMessages(t, r, qname)
if diff := cmp.Diff(want, gotPending, h.IgnoreIDOpt, cmpopts.EquateEmpty()); diff != "" { if diff := cmp.Diff(want, gotPending, h.IgnoreIDOpt, cmpopts.EquateEmpty()); diff != "" {
t.Errorf("%s;\nmismatch found in %q; (-want,+got)\n%s", tc.desc, base.QueueKey(qname), diff) t.Errorf("%s;\nmismatch found in %q; (-want,+got)\n%s", tc.desc, base.PendingKey(qname), diff)
} }
} }
for qname, want := range tc.wantScheduled { for qname, want := range tc.wantScheduled {
@@ -137,14 +148,14 @@ func TestClientEnqueue(t *testing.T) {
client := NewClient(getRedisConnOpt(t)) client := NewClient(getRedisConnOpt(t))
defer client.Close() defer client.Close()
task := NewTask("send_email", map[string]interface{}{"to": "customer@gmail.com", "from": "merchant@example.com"}) task := NewTask("send_email", h.JSON(map[string]interface{}{"to": "customer@gmail.com", "from": "merchant@example.com"}))
now := time.Now() now := time.Now()
tests := []struct { tests := []struct {
desc string desc string
task *Task task *Task
opts []Option opts []Option
wantRes *Result wantInfo *TaskInfo
wantPending map[string][]*base.TaskMessage wantPending map[string][]*base.TaskMessage
}{ }{
{ {
@@ -153,18 +164,24 @@ func TestClientEnqueue(t *testing.T) {
opts: []Option{ opts: []Option{
MaxRetry(3), MaxRetry(3),
}, },
wantRes: &Result{ wantInfo: &TaskInfo{
ProcessAt: now, Queue: "default",
Queue: "default", Type: task.Type(),
Retry: 3, Payload: task.Payload(),
Timeout: defaultTimeout, State: TaskStatePending,
Deadline: noDeadline, MaxRetry: 3,
Retried: 0,
LastErr: "",
LastFailedAt: time.Time{},
Timeout: defaultTimeout,
Deadline: time.Time{},
NextProcessAt: now,
}, },
wantPending: map[string][]*base.TaskMessage{ wantPending: map[string][]*base.TaskMessage{
"default": { "default": {
{ {
Type: task.Type, Type: task.Type(),
Payload: task.Payload.data, Payload: task.Payload(),
Retry: 3, Retry: 3,
Queue: "default", Queue: "default",
Timeout: int64(defaultTimeout.Seconds()), Timeout: int64(defaultTimeout.Seconds()),
@@ -179,18 +196,24 @@ func TestClientEnqueue(t *testing.T) {
opts: []Option{ opts: []Option{
MaxRetry(-2), MaxRetry(-2),
}, },
wantRes: &Result{ wantInfo: &TaskInfo{
ProcessAt: now, Queue: "default",
Queue: "default", Type: task.Type(),
Retry: 0, Payload: task.Payload(),
Timeout: defaultTimeout, State: TaskStatePending,
Deadline: noDeadline, MaxRetry: 0, // Retry count should be set to zero
Retried: 0,
LastErr: "",
LastFailedAt: time.Time{},
Timeout: defaultTimeout,
Deadline: time.Time{},
NextProcessAt: now,
}, },
wantPending: map[string][]*base.TaskMessage{ wantPending: map[string][]*base.TaskMessage{
"default": { "default": {
{ {
Type: task.Type, Type: task.Type(),
Payload: task.Payload.data, Payload: task.Payload(),
Retry: 0, // Retry count should be set to zero Retry: 0, // Retry count should be set to zero
Queue: "default", Queue: "default",
Timeout: int64(defaultTimeout.Seconds()), Timeout: int64(defaultTimeout.Seconds()),
@@ -206,18 +229,24 @@ func TestClientEnqueue(t *testing.T) {
MaxRetry(2), MaxRetry(2),
MaxRetry(10), MaxRetry(10),
}, },
wantRes: &Result{ wantInfo: &TaskInfo{
ProcessAt: now, Queue: "default",
Queue: "default", Type: task.Type(),
Retry: 10, Payload: task.Payload(),
Timeout: defaultTimeout, State: TaskStatePending,
Deadline: noDeadline, MaxRetry: 10, // Last option takes precedence
Retried: 0,
LastErr: "",
LastFailedAt: time.Time{},
Timeout: defaultTimeout,
Deadline: time.Time{},
NextProcessAt: now,
}, },
wantPending: map[string][]*base.TaskMessage{ wantPending: map[string][]*base.TaskMessage{
"default": { "default": {
{ {
Type: task.Type, Type: task.Type(),
Payload: task.Payload.data, Payload: task.Payload(),
Retry: 10, // Last option takes precedence Retry: 10, // Last option takes precedence
Queue: "default", Queue: "default",
Timeout: int64(defaultTimeout.Seconds()), Timeout: int64(defaultTimeout.Seconds()),
@@ -232,18 +261,24 @@ func TestClientEnqueue(t *testing.T) {
opts: []Option{ opts: []Option{
Queue("custom"), Queue("custom"),
}, },
wantRes: &Result{ wantInfo: &TaskInfo{
ProcessAt: now, Queue: "custom",
Queue: "custom", Type: task.Type(),
Retry: defaultMaxRetry, Payload: task.Payload(),
Timeout: defaultTimeout, State: TaskStatePending,
Deadline: noDeadline, MaxRetry: defaultMaxRetry,
Retried: 0,
LastErr: "",
LastFailedAt: time.Time{},
Timeout: defaultTimeout,
Deadline: time.Time{},
NextProcessAt: now,
}, },
wantPending: map[string][]*base.TaskMessage{ wantPending: map[string][]*base.TaskMessage{
"custom": { "custom": {
{ {
Type: task.Type, Type: task.Type(),
Payload: task.Payload.data, Payload: task.Payload(),
Retry: defaultMaxRetry, Retry: defaultMaxRetry,
Queue: "custom", Queue: "custom",
Timeout: int64(defaultTimeout.Seconds()), Timeout: int64(defaultTimeout.Seconds()),
@@ -253,25 +288,31 @@ func TestClientEnqueue(t *testing.T) {
}, },
}, },
{ {
desc: "Queue option should be case-insensitive", desc: "Queue option should be case sensitive",
task: task, task: task,
opts: []Option{ opts: []Option{
Queue("HIGH"), Queue("MyQueue"),
}, },
wantRes: &Result{ wantInfo: &TaskInfo{
ProcessAt: now, Queue: "MyQueue",
Queue: "high", Type: task.Type(),
Retry: defaultMaxRetry, Payload: task.Payload(),
Timeout: defaultTimeout, State: TaskStatePending,
Deadline: noDeadline, MaxRetry: defaultMaxRetry,
Retried: 0,
LastErr: "",
LastFailedAt: time.Time{},
Timeout: defaultTimeout,
Deadline: time.Time{},
NextProcessAt: now,
}, },
wantPending: map[string][]*base.TaskMessage{ wantPending: map[string][]*base.TaskMessage{
"high": { "MyQueue": {
{ {
Type: task.Type, Type: task.Type(),
Payload: task.Payload.data, Payload: task.Payload(),
Retry: defaultMaxRetry, Retry: defaultMaxRetry,
Queue: "high", Queue: "MyQueue",
Timeout: int64(defaultTimeout.Seconds()), Timeout: int64(defaultTimeout.Seconds()),
Deadline: noDeadline.Unix(), Deadline: noDeadline.Unix(),
}, },
@@ -284,18 +325,24 @@ func TestClientEnqueue(t *testing.T) {
opts: []Option{ opts: []Option{
Timeout(20 * time.Second), Timeout(20 * time.Second),
}, },
wantRes: &Result{ wantInfo: &TaskInfo{
ProcessAt: now, Queue: "default",
Queue: "default", Type: task.Type(),
Retry: defaultMaxRetry, Payload: task.Payload(),
Timeout: 20 * time.Second, State: TaskStatePending,
Deadline: noDeadline, MaxRetry: defaultMaxRetry,
Retried: 0,
LastErr: "",
LastFailedAt: time.Time{},
Timeout: 20 * time.Second,
Deadline: time.Time{},
NextProcessAt: now,
}, },
wantPending: map[string][]*base.TaskMessage{ wantPending: map[string][]*base.TaskMessage{
"default": { "default": {
{ {
Type: task.Type, Type: task.Type(),
Payload: task.Payload.data, Payload: task.Payload(),
Retry: defaultMaxRetry, Retry: defaultMaxRetry,
Queue: "default", Queue: "default",
Timeout: 20, Timeout: 20,
@@ -310,18 +357,24 @@ func TestClientEnqueue(t *testing.T) {
opts: []Option{ opts: []Option{
Deadline(time.Date(2020, time.June, 24, 0, 0, 0, 0, time.UTC)), Deadline(time.Date(2020, time.June, 24, 0, 0, 0, 0, time.UTC)),
}, },
wantRes: &Result{ wantInfo: &TaskInfo{
ProcessAt: now, Queue: "default",
Queue: "default", Type: task.Type(),
Retry: defaultMaxRetry, Payload: task.Payload(),
Timeout: noTimeout, State: TaskStatePending,
Deadline: time.Date(2020, time.June, 24, 0, 0, 0, 0, time.UTC), MaxRetry: defaultMaxRetry,
Retried: 0,
LastErr: "",
LastFailedAt: time.Time{},
Timeout: noTimeout,
Deadline: time.Date(2020, time.June, 24, 0, 0, 0, 0, time.UTC),
NextProcessAt: now,
}, },
wantPending: map[string][]*base.TaskMessage{ wantPending: map[string][]*base.TaskMessage{
"default": { "default": {
{ {
Type: task.Type, Type: task.Type(),
Payload: task.Payload.data, Payload: task.Payload(),
Retry: defaultMaxRetry, Retry: defaultMaxRetry,
Queue: "default", Queue: "default",
Timeout: int64(noTimeout.Seconds()), Timeout: int64(noTimeout.Seconds()),
@@ -337,18 +390,24 @@ func TestClientEnqueue(t *testing.T) {
Timeout(20 * time.Second), Timeout(20 * time.Second),
Deadline(time.Date(2020, time.June, 24, 0, 0, 0, 0, time.UTC)), Deadline(time.Date(2020, time.June, 24, 0, 0, 0, 0, time.UTC)),
}, },
wantRes: &Result{ wantInfo: &TaskInfo{
ProcessAt: now, Queue: "default",
Queue: "default", Type: task.Type(),
Retry: defaultMaxRetry, Payload: task.Payload(),
Timeout: 20 * time.Second, State: TaskStatePending,
Deadline: time.Date(2020, time.June, 24, 0, 0, 0, 0, time.UTC), MaxRetry: defaultMaxRetry,
Retried: 0,
LastErr: "",
LastFailedAt: time.Time{},
Timeout: 20 * time.Second,
Deadline: time.Date(2020, time.June, 24, 0, 0, 0, 0, time.UTC),
NextProcessAt: now,
}, },
wantPending: map[string][]*base.TaskMessage{ wantPending: map[string][]*base.TaskMessage{
"default": { "default": {
{ {
Type: task.Type, Type: task.Type(),
Payload: task.Payload.data, Payload: task.Payload(),
Retry: defaultMaxRetry, Retry: defaultMaxRetry,
Queue: "default", Queue: "default",
Timeout: 20, Timeout: 20,
@@ -357,40 +416,168 @@ func TestClientEnqueue(t *testing.T) {
}, },
}, },
}, },
{
desc: "With Retention option",
task: task,
opts: []Option{
Retention(24 * time.Hour),
},
wantInfo: &TaskInfo{
Queue: "default",
Type: task.Type(),
Payload: task.Payload(),
State: TaskStatePending,
MaxRetry: defaultMaxRetry,
Retried: 0,
LastErr: "",
LastFailedAt: time.Time{},
Timeout: defaultTimeout,
Deadline: time.Time{},
NextProcessAt: now,
Retention: 24 * time.Hour,
},
wantPending: map[string][]*base.TaskMessage{
"default": {
{
Type: task.Type(),
Payload: task.Payload(),
Retry: defaultMaxRetry,
Queue: "default",
Timeout: int64(defaultTimeout.Seconds()),
Deadline: noDeadline.Unix(),
Retention: int64((24 * time.Hour).Seconds()),
},
},
},
},
} }
for _, tc := range tests { for _, tc := range tests {
h.FlushDB(t, r) // clean up db before each test case. h.FlushDB(t, r) // clean up db before each test case.
gotRes, err := client.Enqueue(tc.task, tc.opts...) gotInfo, err := client.Enqueue(tc.task, tc.opts...)
if err != nil { if err != nil {
t.Error(err) t.Error(err)
continue continue
} }
cmpOptions := []cmp.Option{ cmpOptions := []cmp.Option{
cmpopts.IgnoreFields(Result{}, "ID", "EnqueuedAt"), cmpopts.IgnoreFields(TaskInfo{}, "ID"),
cmpopts.EquateApproxTime(500 * time.Millisecond), cmpopts.EquateApproxTime(500 * time.Millisecond),
} }
if diff := cmp.Diff(tc.wantRes, gotRes, cmpOptions...); diff != "" { if diff := cmp.Diff(tc.wantInfo, gotInfo, cmpOptions...); diff != "" {
t.Errorf("%s;\nEnqueue(task) returned %v, want %v; (-want,+got)\n%s", t.Errorf("%s;\nEnqueue(task) returned %v, want %v; (-want,+got)\n%s",
tc.desc, gotRes, tc.wantRes, diff) tc.desc, gotInfo, tc.wantInfo, diff)
} }
for qname, want := range tc.wantPending { for qname, want := range tc.wantPending {
got := h.GetPendingMessages(t, r, qname) got := h.GetPendingMessages(t, r, qname)
if diff := cmp.Diff(want, got, h.IgnoreIDOpt); diff != "" { if diff := cmp.Diff(want, got, h.IgnoreIDOpt); diff != "" {
t.Errorf("%s;\nmismatch found in %q; (-want,+got)\n%s", tc.desc, base.QueueKey(qname), diff) t.Errorf("%s;\nmismatch found in %q; (-want,+got)\n%s", tc.desc, base.PendingKey(qname), diff)
} }
} }
} }
} }
func TestClientEnqueueWithTaskIDOption(t *testing.T) {
r := setup(t)
client := NewClient(getRedisConnOpt(t))
defer client.Close()
task := NewTask("send_email", nil)
now := time.Now()
tests := []struct {
desc string
task *Task
opts []Option
wantInfo *TaskInfo
wantPending map[string][]*base.TaskMessage
}{
{
desc: "With a valid TaskID option",
task: task,
opts: []Option{
TaskID("custom_id"),
},
wantInfo: &TaskInfo{
ID: "custom_id",
Queue: "default",
Type: task.Type(),
Payload: task.Payload(),
State: TaskStatePending,
MaxRetry: defaultMaxRetry,
Retried: 0,
LastErr: "",
LastFailedAt: time.Time{},
Timeout: defaultTimeout,
Deadline: time.Time{},
NextProcessAt: now,
},
wantPending: map[string][]*base.TaskMessage{
"default": {
{
ID: "custom_id",
Type: task.Type(),
Payload: task.Payload(),
Retry: defaultMaxRetry,
Queue: "default",
Timeout: int64(defaultTimeout.Seconds()),
Deadline: noDeadline.Unix(),
},
},
},
},
}
for _, tc := range tests {
h.FlushDB(t, r) // clean up db before each test case.
gotInfo, err := client.Enqueue(tc.task, tc.opts...)
if err != nil {
t.Errorf("got non-nil error %v, want nil", err)
continue
}
cmpOptions := []cmp.Option{
cmpopts.EquateApproxTime(500 * time.Millisecond),
}
if diff := cmp.Diff(tc.wantInfo, gotInfo, cmpOptions...); diff != "" {
t.Errorf("%s;\nEnqueue(task) returned %v, want %v; (-want,+got)\n%s",
tc.desc, gotInfo, tc.wantInfo, diff)
}
for qname, want := range tc.wantPending {
got := h.GetPendingMessages(t, r, qname)
if diff := cmp.Diff(want, got); diff != "" {
t.Errorf("%s;\nmismatch found in %q; (-want,+got)\n%s", tc.desc, base.PendingKey(qname), diff)
}
}
}
}
func TestClientEnqueueWithConflictingTaskID(t *testing.T) {
setup(t)
client := NewClient(getRedisConnOpt(t))
defer client.Close()
const taskID = "custom_id"
task := NewTask("foo", nil)
if _, err := client.Enqueue(task, TaskID(taskID)); err != nil {
t.Fatalf("First task: Enqueue failed: %v", err)
}
_, err := client.Enqueue(task, TaskID(taskID))
if !errors.Is(err, ErrTaskIDConflict) {
t.Errorf("Second task: Enqueue returned %v, want %v", err, ErrTaskIDConflict)
}
}
func TestClientEnqueueWithProcessInOption(t *testing.T) { func TestClientEnqueueWithProcessInOption(t *testing.T) {
r := setup(t) r := setup(t)
client := NewClient(getRedisConnOpt(t)) client := NewClient(getRedisConnOpt(t))
defer client.Close() defer client.Close()
task := NewTask("send_email", map[string]interface{}{"to": "customer@gmail.com", "from": "merchant@example.com"}) task := NewTask("send_email", h.JSON(map[string]interface{}{"to": "customer@gmail.com", "from": "merchant@example.com"}))
now := time.Now() now := time.Now()
tests := []struct { tests := []struct {
@@ -398,7 +585,7 @@ func TestClientEnqueueWithProcessInOption(t *testing.T) {
task *Task task *Task
delay time.Duration // value for ProcessIn option delay time.Duration // value for ProcessIn option
opts []Option // other options opts []Option // other options
wantRes *Result wantInfo *TaskInfo
wantPending map[string][]*base.TaskMessage wantPending map[string][]*base.TaskMessage
wantScheduled map[string][]base.Z wantScheduled map[string][]base.Z
}{ }{
@@ -407,12 +594,18 @@ func TestClientEnqueueWithProcessInOption(t *testing.T) {
task: task, task: task,
delay: 1 * time.Hour, delay: 1 * time.Hour,
opts: []Option{}, opts: []Option{},
wantRes: &Result{ wantInfo: &TaskInfo{
ProcessAt: now.Add(1 * time.Hour), Queue: "default",
Queue: "default", Type: task.Type(),
Retry: defaultMaxRetry, Payload: task.Payload(),
Timeout: defaultTimeout, State: TaskStateScheduled,
Deadline: noDeadline, MaxRetry: defaultMaxRetry,
Retried: 0,
LastErr: "",
LastFailedAt: time.Time{},
Timeout: defaultTimeout,
Deadline: time.Time{},
NextProcessAt: time.Now().Add(1 * time.Hour),
}, },
wantPending: map[string][]*base.TaskMessage{ wantPending: map[string][]*base.TaskMessage{
"default": {}, "default": {},
@@ -421,8 +614,8 @@ func TestClientEnqueueWithProcessInOption(t *testing.T) {
"default": { "default": {
{ {
Message: &base.TaskMessage{ Message: &base.TaskMessage{
Type: task.Type, Type: task.Type(),
Payload: task.Payload.data, Payload: task.Payload(),
Retry: defaultMaxRetry, Retry: defaultMaxRetry,
Queue: "default", Queue: "default",
Timeout: int64(defaultTimeout.Seconds()), Timeout: int64(defaultTimeout.Seconds()),
@@ -438,18 +631,24 @@ func TestClientEnqueueWithProcessInOption(t *testing.T) {
task: task, task: task,
delay: 0, delay: 0,
opts: []Option{}, opts: []Option{},
wantRes: &Result{ wantInfo: &TaskInfo{
ProcessAt: now, Queue: "default",
Queue: "default", Type: task.Type(),
Retry: defaultMaxRetry, Payload: task.Payload(),
Timeout: defaultTimeout, State: TaskStatePending,
Deadline: noDeadline, MaxRetry: defaultMaxRetry,
Retried: 0,
LastErr: "",
LastFailedAt: time.Time{},
Timeout: defaultTimeout,
Deadline: time.Time{},
NextProcessAt: now,
}, },
wantPending: map[string][]*base.TaskMessage{ wantPending: map[string][]*base.TaskMessage{
"default": { "default": {
{ {
Type: task.Type, Type: task.Type(),
Payload: task.Payload.data, Payload: task.Payload(),
Retry: defaultMaxRetry, Retry: defaultMaxRetry,
Queue: "default", Queue: "default",
Timeout: int64(defaultTimeout.Seconds()), Timeout: int64(defaultTimeout.Seconds()),
@@ -467,24 +666,24 @@ func TestClientEnqueueWithProcessInOption(t *testing.T) {
h.FlushDB(t, r) // clean up db before each test case. h.FlushDB(t, r) // clean up db before each test case.
opts := append(tc.opts, ProcessIn(tc.delay)) opts := append(tc.opts, ProcessIn(tc.delay))
gotRes, err := client.Enqueue(tc.task, opts...) gotInfo, err := client.Enqueue(tc.task, opts...)
if err != nil { if err != nil {
t.Error(err) t.Error(err)
continue continue
} }
cmpOptions := []cmp.Option{ cmpOptions := []cmp.Option{
cmpopts.IgnoreFields(Result{}, "ID", "EnqueuedAt"), cmpopts.IgnoreFields(TaskInfo{}, "ID"),
cmpopts.EquateApproxTime(500 * time.Millisecond), cmpopts.EquateApproxTime(500 * time.Millisecond),
} }
if diff := cmp.Diff(tc.wantRes, gotRes, cmpOptions...); diff != "" { if diff := cmp.Diff(tc.wantInfo, gotInfo, cmpOptions...); diff != "" {
t.Errorf("%s;\nEnqueue(task, ProcessIn(%v)) returned %v, want %v; (-want,+got)\n%s", t.Errorf("%s;\nEnqueue(task, ProcessIn(%v)) returned %v, want %v; (-want,+got)\n%s",
tc.desc, tc.delay, gotRes, tc.wantRes, diff) tc.desc, tc.delay, gotInfo, tc.wantInfo, diff)
} }
for qname, want := range tc.wantPending { for qname, want := range tc.wantPending {
gotPending := h.GetPendingMessages(t, r, qname) gotPending := h.GetPendingMessages(t, r, qname)
if diff := cmp.Diff(want, gotPending, h.IgnoreIDOpt, cmpopts.EquateEmpty()); diff != "" { if diff := cmp.Diff(want, gotPending, h.IgnoreIDOpt, cmpopts.EquateEmpty()); diff != "" {
t.Errorf("%s;\nmismatch found in %q; (-want,+got)\n%s", tc.desc, base.QueueKey(qname), diff) t.Errorf("%s;\nmismatch found in %q; (-want,+got)\n%s", tc.desc, base.PendingKey(qname), diff)
} }
} }
for qname, want := range tc.wantScheduled { for qname, want := range tc.wantScheduled {
@@ -501,7 +700,7 @@ func TestClientEnqueueError(t *testing.T) {
client := NewClient(getRedisConnOpt(t)) client := NewClient(getRedisConnOpt(t))
defer client.Close() defer client.Close()
task := NewTask("send_email", map[string]interface{}{"to": "customer@gmail.com", "from": "merchant@example.com"}) task := NewTask("send_email", h.JSON(map[string]interface{}{"to": "customer@gmail.com", "from": "merchant@example.com"}))
tests := []struct { tests := []struct {
desc string desc string
@@ -515,6 +714,31 @@ func TestClientEnqueueError(t *testing.T) {
Queue(""), Queue(""),
}, },
}, },
{
desc: "With empty task typename",
task: NewTask("", h.JSON(map[string]interface{}{})),
opts: []Option{},
},
{
desc: "With blank task typename",
task: NewTask(" ", h.JSON(map[string]interface{}{})),
opts: []Option{},
},
{
desc: "With empty task ID",
task: NewTask("foo", nil),
opts: []Option{TaskID("")},
},
{
desc: "With blank task ID",
task: NewTask("foo", nil),
opts: []Option{TaskID(" ")},
},
{
desc: "With unique option less than 1s",
task: NewTask("foo", nil),
opts: []Option{Unique(300 * time.Millisecond)},
},
} }
for _, tc := range tests { for _, tc := range tests {
@@ -527,17 +751,18 @@ func TestClientEnqueueError(t *testing.T) {
} }
} }
func TestClientDefaultOptions(t *testing.T) { func TestClientWithDefaultOptions(t *testing.T) {
r := setup(t) r := setup(t)
now := time.Now() now := time.Now()
tests := []struct { tests := []struct {
desc string desc string
defaultOpts []Option // options set at the client level. defaultOpts []Option // options set at task initialization time
opts []Option // options used at enqueue time. opts []Option // options used at enqueue time.
task *Task tasktype string
wantRes *Result payload []byte
wantInfo *TaskInfo
queue string // queue that the message should go into. queue string // queue that the message should go into.
want *base.TaskMessage want *base.TaskMessage
}{ }{
@@ -545,13 +770,20 @@ func TestClientDefaultOptions(t *testing.T) {
desc: "With queue routing option", desc: "With queue routing option",
defaultOpts: []Option{Queue("feed")}, defaultOpts: []Option{Queue("feed")},
opts: []Option{}, opts: []Option{},
task: NewTask("feed:import", nil), tasktype: "feed:import",
wantRes: &Result{ payload: nil,
ProcessAt: now, wantInfo: &TaskInfo{
Queue: "feed", Queue: "feed",
Retry: defaultMaxRetry, Type: "feed:import",
Timeout: defaultTimeout, Payload: nil,
Deadline: noDeadline, State: TaskStatePending,
MaxRetry: defaultMaxRetry,
Retried: 0,
LastErr: "",
LastFailedAt: time.Time{},
Timeout: defaultTimeout,
Deadline: time.Time{},
NextProcessAt: now,
}, },
queue: "feed", queue: "feed",
want: &base.TaskMessage{ want: &base.TaskMessage{
@@ -567,13 +799,20 @@ func TestClientDefaultOptions(t *testing.T) {
desc: "With multiple options", desc: "With multiple options",
defaultOpts: []Option{Queue("feed"), MaxRetry(5)}, defaultOpts: []Option{Queue("feed"), MaxRetry(5)},
opts: []Option{}, opts: []Option{},
task: NewTask("feed:import", nil), tasktype: "feed:import",
wantRes: &Result{ payload: nil,
ProcessAt: now, wantInfo: &TaskInfo{
Queue: "feed", Queue: "feed",
Retry: 5, Type: "feed:import",
Timeout: defaultTimeout, Payload: nil,
Deadline: noDeadline, State: TaskStatePending,
MaxRetry: 5,
Retried: 0,
LastErr: "",
LastFailedAt: time.Time{},
Timeout: defaultTimeout,
Deadline: time.Time{},
NextProcessAt: now,
}, },
queue: "feed", queue: "feed",
want: &base.TaskMessage{ want: &base.TaskMessage{
@@ -589,13 +828,19 @@ func TestClientDefaultOptions(t *testing.T) {
desc: "With overriding options at enqueue time", desc: "With overriding options at enqueue time",
defaultOpts: []Option{Queue("feed"), MaxRetry(5)}, defaultOpts: []Option{Queue("feed"), MaxRetry(5)},
opts: []Option{Queue("critical")}, opts: []Option{Queue("critical")},
task: NewTask("feed:import", nil), tasktype: "feed:import",
wantRes: &Result{ payload: nil,
ProcessAt: now, wantInfo: &TaskInfo{
Queue: "critical", Queue: "critical",
Retry: 5, Type: "feed:import",
Timeout: defaultTimeout, Payload: nil,
Deadline: noDeadline, State: TaskStatePending,
MaxRetry: 5,
LastErr: "",
LastFailedAt: time.Time{},
Timeout: defaultTimeout,
Deadline: time.Time{},
NextProcessAt: now,
}, },
queue: "critical", queue: "critical",
want: &base.TaskMessage{ want: &base.TaskMessage{
@@ -613,18 +858,18 @@ func TestClientDefaultOptions(t *testing.T) {
h.FlushDB(t, r) h.FlushDB(t, r)
c := NewClient(getRedisConnOpt(t)) c := NewClient(getRedisConnOpt(t))
defer c.Close() defer c.Close()
c.SetDefaultOptions(tc.task.Type, tc.defaultOpts...) task := NewTask(tc.tasktype, tc.payload, tc.defaultOpts...)
gotRes, err := c.Enqueue(tc.task, tc.opts...) gotInfo, err := c.Enqueue(task, tc.opts...)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
cmpOptions := []cmp.Option{ cmpOptions := []cmp.Option{
cmpopts.IgnoreFields(Result{}, "ID", "EnqueuedAt"), cmpopts.IgnoreFields(TaskInfo{}, "ID"),
cmpopts.EquateApproxTime(500 * time.Millisecond), cmpopts.EquateApproxTime(500 * time.Millisecond),
} }
if diff := cmp.Diff(tc.wantRes, gotRes, cmpOptions...); diff != "" { if diff := cmp.Diff(tc.wantInfo, gotInfo, cmpOptions...); diff != "" {
t.Errorf("%s;\nEnqueue(task, opts...) returned %v, want %v; (-want,+got)\n%s", t.Errorf("%s;\nEnqueue(task, opts...) returned %v, want %v; (-want,+got)\n%s",
tc.desc, gotRes, tc.wantRes, diff) tc.desc, gotInfo, tc.wantInfo, diff)
} }
pending := h.GetPendingMessages(t, r, tc.queue) pending := h.GetPendingMessages(t, r, tc.queue)
if len(pending) != 1 { if len(pending) != 1 {
@@ -650,7 +895,7 @@ func TestClientEnqueueUnique(t *testing.T) {
ttl time.Duration ttl time.Duration
}{ }{
{ {
NewTask("email", map[string]interface{}{"user_id": 123}), NewTask("email", h.JSON(map[string]interface{}{"user_id": 123})),
time.Hour, time.Hour,
}, },
} }
@@ -664,7 +909,7 @@ func TestClientEnqueueUnique(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
gotTTL := r.TTL(base.UniqueKey(base.DefaultQueueName, tc.task.Type, tc.task.Payload.data)).Val() gotTTL := r.TTL(context.Background(), base.UniqueKey(base.DefaultQueueName, tc.task.Type(), tc.task.Payload())).Val()
if !cmp.Equal(tc.ttl.Seconds(), gotTTL.Seconds(), cmpopts.EquateApprox(0, 1)) { if !cmp.Equal(tc.ttl.Seconds(), gotTTL.Seconds(), cmpopts.EquateApprox(0, 1)) {
t.Errorf("TTL = %v, want %v", gotTTL, tc.ttl) t.Errorf("TTL = %v, want %v", gotTTL, tc.ttl)
continue continue
@@ -709,7 +954,7 @@ func TestClientEnqueueUniqueWithProcessInOption(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
gotTTL := r.TTL(base.UniqueKey(base.DefaultQueueName, tc.task.Type, tc.task.Payload.data)).Val() gotTTL := r.TTL(context.Background(), base.UniqueKey(base.DefaultQueueName, tc.task.Type(), tc.task.Payload())).Val()
wantTTL := time.Duration(tc.ttl.Seconds()+tc.d.Seconds()) * time.Second wantTTL := time.Duration(tc.ttl.Seconds()+tc.d.Seconds()) * time.Second
if !cmp.Equal(wantTTL.Seconds(), gotTTL.Seconds(), cmpopts.EquateApprox(0, 1)) { if !cmp.Equal(wantTTL.Seconds(), gotTTL.Seconds(), cmpopts.EquateApprox(0, 1)) {
t.Errorf("TTL = %v, want %v", gotTTL, wantTTL) t.Errorf("TTL = %v, want %v", gotTTL, wantTTL)
@@ -755,7 +1000,7 @@ func TestClientEnqueueUniqueWithProcessAtOption(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
gotTTL := r.TTL(base.UniqueKey(base.DefaultQueueName, tc.task.Type, tc.task.Payload.data)).Val() gotTTL := r.TTL(context.Background(), base.UniqueKey(base.DefaultQueueName, tc.task.Type(), tc.task.Payload())).Val()
wantTTL := tc.at.Add(tc.ttl).Sub(time.Now()) wantTTL := tc.at.Add(tc.ttl).Sub(time.Now())
if !cmp.Equal(wantTTL.Seconds(), gotTTL.Seconds(), cmpopts.EquateApprox(0, 1)) { if !cmp.Equal(wantTTL.Seconds(), gotTTL.Seconds(), cmpopts.EquateApprox(0, 1)) {
t.Errorf("TTL = %v, want %v", gotTTL, wantTTL) t.Errorf("TTL = %v, want %v", gotTTL, wantTTL)
@@ -774,4 +1019,3 @@ func TestClientEnqueueUniqueWithProcessAtOption(t *testing.T) {
} }
} }
} }

View File

@@ -6,49 +6,16 @@ package asynq
import ( import (
"context" "context"
"time"
"github.com/hibiken/asynq/internal/base" asynqcontext "github.com/hibiken/asynq/internal/context"
) )
// A taskMetadata holds task scoped data to put in context.
type taskMetadata struct {
id string
maxRetry int
retryCount int
qname string
}
// ctxKey type is unexported to prevent collisions with context keys defined in
// other packages.
type ctxKey int
// metadataCtxKey is the context key for the task metadata.
// Its value of zero is arbitrary.
const metadataCtxKey ctxKey = 0
// createContext returns a context and cancel function for a given task message.
func createContext(msg *base.TaskMessage, deadline time.Time) (context.Context, context.CancelFunc) {
metadata := taskMetadata{
id: msg.ID.String(),
maxRetry: msg.Retry,
retryCount: msg.Retried,
qname: msg.Queue,
}
ctx := context.WithValue(context.Background(), metadataCtxKey, metadata)
return context.WithDeadline(ctx, deadline)
}
// GetTaskID extracts a task ID from a context, if any. // GetTaskID extracts a task ID from a context, if any.
// //
// ID of a task is guaranteed to be unique. // ID of a task is guaranteed to be unique.
// ID of a task doesn't change if the task is being retried. // ID of a task doesn't change if the task is being retried.
func GetTaskID(ctx context.Context) (id string, ok bool) { func GetTaskID(ctx context.Context) (id string, ok bool) {
metadata, ok := ctx.Value(metadataCtxKey).(taskMetadata) return asynqcontext.GetTaskID(ctx)
if !ok {
return "", false
}
return metadata.id, true
} }
// GetRetryCount extracts retry count from a context, if any. // GetRetryCount extracts retry count from a context, if any.
@@ -56,11 +23,7 @@ func GetTaskID(ctx context.Context) (id string, ok bool) {
// Return value n indicates the number of times associated task has been // Return value n indicates the number of times associated task has been
// retried so far. // retried so far.
func GetRetryCount(ctx context.Context) (n int, ok bool) { func GetRetryCount(ctx context.Context) (n int, ok bool) {
metadata, ok := ctx.Value(metadataCtxKey).(taskMetadata) return asynqcontext.GetRetryCount(ctx)
if !ok {
return 0, false
}
return metadata.retryCount, true
} }
// GetMaxRetry extracts maximum retry from a context, if any. // GetMaxRetry extracts maximum retry from a context, if any.
@@ -68,20 +31,12 @@ func GetRetryCount(ctx context.Context) (n int, ok bool) {
// Return value n indicates the maximum number of times the assoicated task // Return value n indicates the maximum number of times the assoicated task
// can be retried if ProcessTask returns a non-nil error. // can be retried if ProcessTask returns a non-nil error.
func GetMaxRetry(ctx context.Context) (n int, ok bool) { func GetMaxRetry(ctx context.Context) (n int, ok bool) {
metadata, ok := ctx.Value(metadataCtxKey).(taskMetadata) return asynqcontext.GetMaxRetry(ctx)
if !ok {
return 0, false
}
return metadata.maxRetry, true
} }
// GetQueueName extracts queue name from a context, if any. // GetQueueName extracts queue name from a context, if any.
// //
// Return value qname indicates which queue the task was pulled from. // Return value qname indicates which queue the task was pulled from.
func GetQueueName(ctx context.Context) (qname string, ok bool) { func GetQueueName(ctx context.Context) (qname string, ok bool) {
metadata, ok := ctx.Value(metadataCtxKey).(taskMetadata) return asynqcontext.GetQueueName(ctx)
if !ok {
return "", false
}
return metadata.qname, true
} }

27
doc.go
View File

@@ -11,7 +11,7 @@ specify the connection using one of RedisConnOpt types.
redisConnOpt = asynq.RedisClientOpt{ redisConnOpt = asynq.RedisClientOpt{
Addr: "127.0.0.1:6379", Addr: "127.0.0.1:6379",
Password: "xxxxx", Password: "xxxxx",
DB: 3, DB: 2,
} }
The Client is used to enqueue a task. The Client is used to enqueue a task.
@@ -20,15 +20,19 @@ The Client is used to enqueue a task.
client := asynq.NewClient(redisConnOpt) client := asynq.NewClient(redisConnOpt)
// Task is created with two parameters: its type and payload. // Task is created with two parameters: its type and payload.
t := asynq.NewTask( // Payload data is simply an array of bytes. It can be encoded in JSON, Protocol Buffer, Gob, etc.
"send_email", b, err := json.Marshal(ExamplePayload{UserID: 42})
map[string]interface{}{"user_id": 42}) if err != nil {
log.Fatal(err)
}
task := asynq.NewTask("example", b)
// Enqueue the task to be processed immediately. // Enqueue the task to be processed immediately.
res, err := client.Enqueue(t) info, err := client.Enqueue(task)
// Schedule the task to be processed after one minute. // Schedule the task to be processed after one minute.
res, err = client.Enqueue(t, asynq.ProcessIn(1*time.Minute)) info, err = client.Enqueue(t, asynq.ProcessIn(1*time.Minute))
The Server is used to run the task processing workers with a given The Server is used to run the task processing workers with a given
handler. handler.
@@ -52,10 +56,13 @@ Example of a type that implements the Handler interface.
func (h *TaskHandler) ProcessTask(ctx context.Context, task *asynq.Task) error { func (h *TaskHandler) ProcessTask(ctx context.Context, task *asynq.Task) error {
switch task.Type { switch task.Type {
case "send_email": case "example":
id, err := task.Payload.GetInt("user_id") var data ExamplePayload
// send email if err := json.Unmarshal(task.Payload(), &data); err != nil {
//... return err
}
// perform task with the data
default: default:
return fmt.Errorf("unexpected task type %q", task.Type) return fmt.Errorf("unexpected task type %q", task.Type)
} }

View File

@@ -5,6 +5,7 @@
package asynq_test package asynq_test
import ( import (
"context"
"fmt" "fmt"
"log" "log"
"os" "os"
@@ -30,7 +31,7 @@ func ExampleServer_Run() {
} }
} }
func ExampleServer_Stop() { func ExampleServer_Shutdown() {
srv := asynq.NewServer( srv := asynq.NewServer(
asynq.RedisClientOpt{Addr: ":6379"}, asynq.RedisClientOpt{Addr: ":6379"},
asynq.Config{Concurrency: 20}, asynq.Config{Concurrency: 20},
@@ -47,10 +48,10 @@ func ExampleServer_Stop() {
signal.Notify(sigs, unix.SIGTERM, unix.SIGINT) signal.Notify(sigs, unix.SIGTERM, unix.SIGINT)
<-sigs // wait for termination signal <-sigs // wait for termination signal
srv.Stop() srv.Shutdown()
} }
func ExampleServer_Quiet() { func ExampleServer_Stop() {
srv := asynq.NewServer( srv := asynq.NewServer(
asynq.RedisClientOpt{Addr: ":6379"}, asynq.RedisClientOpt{Addr: ":6379"},
asynq.Config{Concurrency: 20}, asynq.Config{Concurrency: 20},
@@ -70,13 +71,13 @@ func ExampleServer_Quiet() {
for { for {
s := <-sigs s := <-sigs
if s == unix.SIGTSTP { if s == unix.SIGTSTP {
srv.Quiet() // stop processing new tasks srv.Stop() // stop processing new tasks
continue continue
} }
break break // received SIGTERM or SIGINT signal
} }
srv.Stop() srv.Shutdown()
} }
func ExampleScheduler() { func ExampleScheduler() {
@@ -113,3 +114,20 @@ func ExampleParseRedisURI() {
// localhost:6379 // localhost:6379
// 10 // 10
} }
func ExampleResultWriter() {
// ResultWriter is only accessible in Handler.
h := func(ctx context.Context, task *asynq.Task) error {
// .. do task processing work
res := []byte("task result data")
n, err := task.ResultWriter().Write(res) // implements io.Writer
if err != nil {
return fmt.Errorf("failed to write task result: %v", err)
}
log.Printf(" %d bytes written", n)
return nil
}
_ = h
}

View File

@@ -45,7 +45,7 @@ func newForwarder(params forwarderParams) *forwarder {
} }
} }
func (f *forwarder) terminate() { func (f *forwarder) shutdown() {
f.logger.Debug("Forwarder shutting down...") f.logger.Debug("Forwarder shutting down...")
// Signal the forwarder goroutine to stop polling. // Signal the forwarder goroutine to stop polling.
f.done <- struct{}{} f.done <- struct{}{}
@@ -69,7 +69,7 @@ func (f *forwarder) start(wg *sync.WaitGroup) {
} }
func (f *forwarder) exec() { func (f *forwarder) exec() {
if err := f.broker.CheckAndEnqueue(f.queues...); err != nil { if err := f.broker.ForwardIfReady(f.queues...); err != nil {
f.logger.Errorf("Could not enqueue scheduled tasks: %v", err) f.logger.Errorf("Failed to forward scheduled tasks: %v", err)
} }
} }

View File

@@ -111,7 +111,7 @@ func TestForwarder(t *testing.T) {
var wg sync.WaitGroup var wg sync.WaitGroup
s.start(&wg) s.start(&wg)
time.Sleep(tc.wait) time.Sleep(tc.wait)
s.terminate() s.shutdown()
for qname, want := range tc.wantScheduled { for qname, want := range tc.wantScheduled {
gotScheduled := h.GetScheduledMessages(t, r, qname) gotScheduled := h.GetScheduledMessages(t, r, qname)
@@ -130,7 +130,7 @@ func TestForwarder(t *testing.T) {
for qname, want := range tc.wantPending { for qname, want := range tc.wantPending {
gotPending := h.GetPendingMessages(t, r, qname) gotPending := h.GetPendingMessages(t, r, qname)
if diff := cmp.Diff(want, gotPending, h.SortMsgOpt); diff != "" { if diff := cmp.Diff(want, gotPending, h.SortMsgOpt); diff != "" {
t.Errorf("mismatch found in %q after running forwarder: (-want, +got)\n%s", base.QueueKey(qname), diff) t.Errorf("mismatch found in %q after running forwarder: (-want, +got)\n%s", base.PendingKey(qname), diff)
} }
} }
} }

16
go.mod
View File

@@ -1,15 +1,19 @@
module github.com/hibiken/asynq module github.com/hibiken/asynq
go 1.13 go 1.14
require ( require (
github.com/go-redis/redis/v7 v7.4.0 github.com/go-redis/redis/v8 v8.11.2
github.com/google/go-cmp v0.4.0 github.com/golang/protobuf v1.4.2
github.com/google/uuid v1.1.1 github.com/google/go-cmp v0.5.6
github.com/google/uuid v1.2.0
github.com/kr/pretty v0.1.0 // indirect
github.com/robfig/cron/v3 v3.0.1 github.com/robfig/cron/v3 v3.0.1
github.com/spf13/cast v1.3.1 github.com/spf13/cast v1.3.1
github.com/stretchr/testify v1.6.1 // indirect
go.uber.org/goleak v0.10.0 go.uber.org/goleak v0.10.0
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e golang.org/x/sys v0.0.0-20210112080510-489259a85091
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 golang.org/x/time v0.0.0-20190308202827-9d24e82272b4
gopkg.in/yaml.v2 v2.2.7 // indirect google.golang.org/protobuf v1.25.0
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect
) )

165
go.sum
View File

@@ -1,61 +1,165 @@
cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ=
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473 h1:4cmBvAEBNJaGARUEs3/suWRyfyBfhf7I60WBZq+bv2w=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/go-redis/redis/v7 v7.2.0 h1:CrCexy/jYWZjW0AyVoHlcJUeZN19VWlbepTh1Vq6dJs= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
github.com/go-redis/redis/v7 v7.2.0/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRfnwdHj/Dcg= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/go-redis/redis/v7 v7.4.0 h1:7obg6wUoj05T0EpY0o8B59S9w5yeMWql7sw2kwNW1x4= github.com/go-redis/redis/v8 v8.11.2 h1:WqlSpAwz8mxDSMCvbyz1Mkiqe0LE5OY4j3lgkvu1Ts0=
github.com/go-redis/redis/v7 v7.4.0/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRfnwdHj/Dcg= github.com/go-redis/redis/v8 v8.11.2/go.mod h1:DLomh7y2e3ggQXQLd1YgmvIfecPJoFl7WU5SOQ/r06M=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/mock v1.1.1 h1:G5FRp8JnTd7RQH5kemVNlMeyXQAztQ3mOWV95KxsXH8=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs=
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1 h1:VkoXIwSboBpnk99O/KFauAEILuNHv5DVFKZMBN/gUgw=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.15.0 h1:1V1NfVQR87RtWAgp1lv9JZJ5Jap+XFGKPi00andXGi4=
github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME= github.com/onsi/ginkgo v1.15.0/go.mod h1:hF8qUzuuC8DJGygJH3726JnCZX4MYbRB8yFfISqnKUg=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.10.5 h1:7n6FEkpFmfCoo2t+YYqXH0evK+a9ICQz0xcAy9dYcaQ=
github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng=
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/yuin/goldmark v1.2.1 h1:ruQGxdhGHe7FWOJPT0mKs5+pD2Xs1Bm/kdGlHO04FmM=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.uber.org/goleak v0.10.0 h1:G3eWbSNIskeRqtsN/1uI5B+eP73y3JUuBsv9AZjehb4= go.uber.org/goleak v0.10.0 h1:G3eWbSNIskeRqtsN/1uI5B+eP73y3JUuBsv9AZjehb4=
go.uber.org/goleak v0.10.0/go.mod h1:VCZuO8V8mFPlL0F5J5GK1rtHV3DrFcQ1R8ryq7FK0aI= go.uber.org/goleak v0.10.0/go.mod h1:VCZuO8V8mFPlL0F5J5GK1rtHV3DrFcQ1R8ryq7FK0aI=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4 h1:c2HOrn5iMezYjSlGPncknSEr/8x5LELb/ilJbXi9DEA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3 h1:XQyxROzUlZH+WIQwySDgnISgOivlhjIEwaQaJEJrrN0=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190923162816-aa69164e4478 h1:l5EDrHhldLYb3ZRHDUhXF7Om7MvYXnkV9/iQNo1lX6g= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb h1:eBmm0M9fYhWpKZLjQUUKka/LtIxf46G4fxeEz5KJr9U=
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e h1:o3PsSEY8E4eXWkXrIP9YJALUkVZqzHJT5DOasTyn8Vs= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 h1:SQFwaSi55rU7vdNs9Yr0Z324VNlrF+0wMqRXT4St8ck=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e h1:9vRrk9YW2BTzLP0VCB9ZDjU4cPqkg+IDWL7XgxA1yxQ= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210112080510-489259a85091 h1:DMyOG0U+gKfu8JZzg2UQe9MeaC1X+xQWlAKcRnjxjCw=
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e h1:4nW4NLDYnU28ojHaHO8OVxFHk/aQ33U01a9cjED+pzE=
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.27.0 h1:rRYRFMVgRv6E0D70Skyfsr28tDXIuuPZyWGMPdMcnXg=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@@ -63,8 +167,11 @@ gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo= gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc h1:/hemPrYIhOhy8zYrNj+069zDB68us2sMGsfkFJO0iZs=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

View File

@@ -45,7 +45,7 @@ func newHealthChecker(params healthcheckerParams) *healthchecker {
} }
} }
func (hc *healthchecker) terminate() { func (hc *healthchecker) shutdown() {
if hc.healthcheckFunc == nil { if hc.healthcheckFunc == nil {
return return
} }

View File

@@ -51,7 +51,7 @@ func TestHealthChecker(t *testing.T) {
} }
mu.Unlock() mu.Unlock()
hc.terminate() hc.shutdown()
} }
func TestHealthCheckerWhenRedisDown(t *testing.T) { func TestHealthCheckerWhenRedisDown(t *testing.T) {
@@ -99,5 +99,5 @@ func TestHealthCheckerWhenRedisDown(t *testing.T) {
} }
mu.Unlock() mu.Unlock()
hc.terminate() hc.shutdown()
} }

View File

@@ -12,6 +12,7 @@ import (
"github.com/google/uuid" "github.com/google/uuid"
"github.com/hibiken/asynq/internal/base" "github.com/hibiken/asynq/internal/base"
"github.com/hibiken/asynq/internal/log" "github.com/hibiken/asynq/internal/log"
"github.com/hibiken/asynq/internal/timeutil"
) )
// heartbeater is responsible for writing process info to redis periodically to // heartbeater is responsible for writing process info to redis periodically to
@@ -19,6 +20,7 @@ import (
type heartbeater struct { type heartbeater struct {
logger *log.Logger logger *log.Logger
broker base.Broker broker base.Broker
clock timeutil.Clock
// channel to communicate back to the long running "heartbeater" goroutine. // channel to communicate back to the long running "heartbeater" goroutine.
done chan struct{} done chan struct{}
@@ -40,8 +42,8 @@ type heartbeater struct {
started time.Time started time.Time
workers map[string]*workerInfo workers map[string]*workerInfo
// status is shared with other goroutine but is concurrency safe. // state is shared with other goroutine but is concurrency safe.
status *base.ServerStatus state *serverState
// channels to receive updates on active workers. // channels to receive updates on active workers.
starting <-chan *workerInfo starting <-chan *workerInfo
@@ -55,7 +57,7 @@ type heartbeaterParams struct {
concurrency int concurrency int
queues map[string]int queues map[string]int
strictPriority bool strictPriority bool
status *base.ServerStatus state *serverState
starting <-chan *workerInfo starting <-chan *workerInfo
finished <-chan *base.TaskMessage finished <-chan *base.TaskMessage
} }
@@ -69,6 +71,7 @@ func newHeartbeater(params heartbeaterParams) *heartbeater {
return &heartbeater{ return &heartbeater{
logger: params.logger, logger: params.logger,
broker: params.broker, broker: params.broker,
clock: timeutil.NewRealClock(),
done: make(chan struct{}), done: make(chan struct{}),
interval: params.interval, interval: params.interval,
@@ -79,14 +82,14 @@ func newHeartbeater(params heartbeaterParams) *heartbeater {
queues: params.queues, queues: params.queues,
strictPriority: params.strictPriority, strictPriority: params.strictPriority,
status: params.status, state: params.state,
workers: make(map[string]*workerInfo), workers: make(map[string]*workerInfo),
starting: params.starting, starting: params.starting,
finished: params.finished, finished: params.finished,
} }
} }
func (h *heartbeater) terminate() { func (h *heartbeater) shutdown() {
h.logger.Debug("Heartbeater shutting down...") h.logger.Debug("Heartbeater shutting down...")
// Signal the heartbeater goroutine to stop. // Signal the heartbeater goroutine to stop.
h.done <- struct{}{} h.done <- struct{}{}
@@ -100,6 +103,8 @@ type workerInfo struct {
started time.Time started time.Time
// deadline the worker has to finish processing the task by. // deadline the worker has to finish processing the task by.
deadline time.Time deadline time.Time
// lease the worker holds for the task.
lease *base.Lease
} }
func (h *heartbeater) start(wg *sync.WaitGroup) { func (h *heartbeater) start(wg *sync.WaitGroup) {
@@ -107,7 +112,7 @@ func (h *heartbeater) start(wg *sync.WaitGroup) {
go func() { go func() {
defer wg.Done() defer wg.Done()
h.started = time.Now() h.started = h.clock.Now()
h.beat() h.beat()
@@ -125,16 +130,21 @@ func (h *heartbeater) start(wg *sync.WaitGroup) {
timer.Reset(h.interval) timer.Reset(h.interval)
case w := <-h.starting: case w := <-h.starting:
h.workers[w.msg.ID.String()] = w h.workers[w.msg.ID] = w
case msg := <-h.finished: case msg := <-h.finished:
delete(h.workers, msg.ID.String()) delete(h.workers, msg.ID)
} }
} }
}() }()
} }
// beat extends lease for workers and writes server/worker info to redis.
func (h *heartbeater) beat() { func (h *heartbeater) beat() {
h.state.mu.Lock()
srvStatus := h.state.value.String()
h.state.mu.Unlock()
info := base.ServerInfo{ info := base.ServerInfo{
Host: h.host, Host: h.host,
PID: h.pid, PID: h.pid,
@@ -142,12 +152,13 @@ func (h *heartbeater) beat() {
Concurrency: h.concurrency, Concurrency: h.concurrency,
Queues: h.queues, Queues: h.queues,
StrictPriority: h.strictPriority, StrictPriority: h.strictPriority,
Status: h.status.String(), Status: srvStatus,
Started: h.started, Started: h.started,
ActiveWorkerCount: len(h.workers), ActiveWorkerCount: len(h.workers),
} }
var ws []*base.WorkerInfo var ws []*base.WorkerInfo
idsByQueue := make(map[string][]string)
for id, w := range h.workers { for id, w := range h.workers {
ws = append(ws, &base.WorkerInfo{ ws = append(ws, &base.WorkerInfo{
Host: h.host, Host: h.host,
@@ -160,11 +171,30 @@ func (h *heartbeater) beat() {
Started: w.started, Started: w.started,
Deadline: w.deadline, Deadline: w.deadline,
}) })
// Check lease before adding to the set to make sure not to extend the lease if the lease is already expired.
if w.lease.IsValid() {
idsByQueue[w.msg.Queue] = append(idsByQueue[w.msg.Queue], id)
} else {
w.lease.NotifyExpiration() // notify processor if the lease is expired
}
} }
// Note: Set TTL to be long enough so that it won't expire before we write again // Note: Set TTL to be long enough so that it won't expire before we write again
// and short enough to expire quickly once the process is shut down or killed. // and short enough to expire quickly once the process is shut down or killed.
if err := h.broker.WriteServerState(&info, ws, h.interval*2); err != nil { if err := h.broker.WriteServerState(&info, ws, h.interval*2); err != nil {
h.logger.Errorf("could not write server state data: %v", err) h.logger.Errorf("Failed to write server state data: %v", err)
}
for qname, ids := range idsByQueue {
expirationTime, err := h.broker.ExtendLease(qname, ids...)
if err != nil {
h.logger.Errorf("Failed to extend lease for tasks %v: %v", ids, err)
continue
}
for _, id := range ids {
if l := h.workers[id].lease; !l.Reset(expirationTime) {
h.logger.Warnf("Lease reset failed for %s; lease deadline: %v", id, l.Deadline())
}
}
} }
} }

View File

@@ -5,6 +5,7 @@
package asynq package asynq
import ( import (
"context"
"sync" "sync"
"testing" "testing"
"time" "time"
@@ -15,21 +16,143 @@ import (
"github.com/hibiken/asynq/internal/base" "github.com/hibiken/asynq/internal/base"
"github.com/hibiken/asynq/internal/rdb" "github.com/hibiken/asynq/internal/rdb"
"github.com/hibiken/asynq/internal/testbroker" "github.com/hibiken/asynq/internal/testbroker"
"github.com/hibiken/asynq/internal/timeutil"
) )
// Test goes through a few phases.
//
// Phase1: Simulate Server startup; Simulate starting tasks listed in startedWorkers
// Phase2: Simluate finishing tasks listed in finishedTasks
// Phase3: Simulate Server shutdown;
func TestHeartbeater(t *testing.T) { func TestHeartbeater(t *testing.T) {
r := setup(t) r := setup(t)
defer r.Close() defer r.Close()
rdbClient := rdb.NewRDB(r) rdbClient := rdb.NewRDB(r)
now := time.Now()
const elapsedTime = 10 * time.Second // simulated time elapsed between phase1 and phase2
clock := timeutil.NewSimulatedClock(time.Time{}) // time will be set in each test
t1 := h.NewTaskMessageWithQueue("task1", nil, "default")
t2 := h.NewTaskMessageWithQueue("task2", nil, "default")
t3 := h.NewTaskMessageWithQueue("task3", nil, "default")
t4 := h.NewTaskMessageWithQueue("task4", nil, "custom")
t5 := h.NewTaskMessageWithQueue("task5", nil, "custom")
t6 := h.NewTaskMessageWithQueue("task6", nil, "default")
// Note: intentionally set to time less than now.Add(rdb.LeaseDuration) to test lease extention is working.
lease1 := h.NewLeaseWithClock(now.Add(10*time.Second), clock)
lease2 := h.NewLeaseWithClock(now.Add(10*time.Second), clock)
lease3 := h.NewLeaseWithClock(now.Add(10*time.Second), clock)
lease4 := h.NewLeaseWithClock(now.Add(10*time.Second), clock)
lease5 := h.NewLeaseWithClock(now.Add(10*time.Second), clock)
lease6 := h.NewLeaseWithClock(now.Add(10*time.Second), clock)
tests := []struct { tests := []struct {
interval time.Duration desc string
// Interval between heartbeats.
interval time.Duration
// Server info.
host string host string
pid int pid int
queues map[string]int queues map[string]int
concurrency int concurrency int
active map[string][]*base.TaskMessage // initial active set state
lease map[string][]base.Z // initial lease set state
wantLease1 map[string][]base.Z // expected lease set state after starting all startedWorkers
wantLease2 map[string][]base.Z // expected lease set state after finishing all finishedTasks
startedWorkers []*workerInfo // workerInfo to send via the started channel
finishedTasks []*base.TaskMessage // tasks to send via the finished channel
startTime time.Time // simulated start time
elapsedTime time.Duration // simulated time elapsed between starting and finishing processing tasks
}{ }{
{2 * time.Second, "localhost", 45678, map[string]int{"default": 1}, 10}, {
desc: "With single queue",
interval: 2 * time.Second,
host: "localhost",
pid: 45678,
queues: map[string]int{"default": 1},
concurrency: 10,
active: map[string][]*base.TaskMessage{
"default": {t1, t2, t3},
},
lease: map[string][]base.Z{
"default": {
{Message: t1, Score: now.Add(10 * time.Second).Unix()},
{Message: t2, Score: now.Add(10 * time.Second).Unix()},
{Message: t3, Score: now.Add(10 * time.Second).Unix()},
},
},
startedWorkers: []*workerInfo{
{msg: t1, started: now, deadline: now.Add(2 * time.Minute), lease: lease1},
{msg: t2, started: now, deadline: now.Add(2 * time.Minute), lease: lease2},
{msg: t3, started: now, deadline: now.Add(2 * time.Minute), lease: lease3},
},
finishedTasks: []*base.TaskMessage{t1, t2},
wantLease1: map[string][]base.Z{
"default": {
{Message: t1, Score: now.Add(rdb.LeaseDuration).Unix()},
{Message: t2, Score: now.Add(rdb.LeaseDuration).Unix()},
{Message: t3, Score: now.Add(rdb.LeaseDuration).Unix()},
},
},
wantLease2: map[string][]base.Z{
"default": {
{Message: t3, Score: now.Add(elapsedTime).Add(rdb.LeaseDuration).Unix()},
},
},
startTime: now,
elapsedTime: elapsedTime,
},
{
desc: "With multiple queue",
interval: 2 * time.Second,
host: "localhost",
pid: 45678,
queues: map[string]int{"default": 1, "custom": 2},
concurrency: 10,
active: map[string][]*base.TaskMessage{
"default": {t6},
"custom": {t4, t5},
},
lease: map[string][]base.Z{
"default": {
{Message: t6, Score: now.Add(10 * time.Second).Unix()},
},
"custom": {
{Message: t4, Score: now.Add(10 * time.Second).Unix()},
{Message: t5, Score: now.Add(10 * time.Second).Unix()},
},
},
startedWorkers: []*workerInfo{
{msg: t6, started: now, deadline: now.Add(2 * time.Minute), lease: lease6},
{msg: t4, started: now, deadline: now.Add(2 * time.Minute), lease: lease4},
{msg: t5, started: now, deadline: now.Add(2 * time.Minute), lease: lease5},
},
finishedTasks: []*base.TaskMessage{t6, t5},
wantLease1: map[string][]base.Z{
"default": {
{Message: t6, Score: now.Add(rdb.LeaseDuration).Unix()},
},
"custom": {
{Message: t4, Score: now.Add(rdb.LeaseDuration).Unix()},
{Message: t5, Score: now.Add(rdb.LeaseDuration).Unix()},
},
},
wantLease2: map[string][]base.Z{
"default": {},
"custom": {
{Message: t4, Score: now.Add(elapsedTime).Add(rdb.LeaseDuration).Unix()},
},
},
startTime: now,
elapsedTime: elapsedTime,
},
} }
timeCmpOpt := cmpopts.EquateApproxTime(10 * time.Millisecond) timeCmpOpt := cmpopts.EquateApproxTime(10 * time.Millisecond)
@@ -37,8 +160,15 @@ func TestHeartbeater(t *testing.T) {
ignoreFieldOpt := cmpopts.IgnoreFields(base.ServerInfo{}, "ServerID") ignoreFieldOpt := cmpopts.IgnoreFields(base.ServerInfo{}, "ServerID")
for _, tc := range tests { for _, tc := range tests {
h.FlushDB(t, r) h.FlushDB(t, r)
h.SeedAllActiveQueues(t, r, tc.active)
h.SeedAllLease(t, r, tc.lease)
status := base.NewServerStatus(base.StatusIdle) clock.SetTime(tc.startTime)
rdbClient.SetClock(clock)
srvState := &serverState{}
startingCh := make(chan *workerInfo)
finishedCh := make(chan *base.TaskMessage)
hb := newHeartbeater(heartbeaterParams{ hb := newHeartbeater(heartbeaterParams{
logger: testLogger, logger: testLogger,
broker: rdbClient, broker: rdbClient,
@@ -46,77 +176,139 @@ func TestHeartbeater(t *testing.T) {
concurrency: tc.concurrency, concurrency: tc.concurrency,
queues: tc.queues, queues: tc.queues,
strictPriority: false, strictPriority: false,
status: status, state: srvState,
starting: make(chan *workerInfo), starting: startingCh,
finished: make(chan *base.TaskMessage), finished: finishedCh,
}) })
hb.clock = clock
// Change host and pid fields for testing purpose. // Change host and pid fields for testing purpose.
hb.host = tc.host hb.host = tc.host
hb.pid = tc.pid hb.pid = tc.pid
status.Set(base.StatusRunning) //===================
// Start Phase1
//===================
srvState.mu.Lock()
srvState.value = srvStateActive // simulating Server.Start
srvState.mu.Unlock()
var wg sync.WaitGroup var wg sync.WaitGroup
hb.start(&wg) hb.start(&wg)
want := &base.ServerInfo{ // Simulate processor starting to work on tasks.
Host: tc.host, for _, w := range tc.startedWorkers {
PID: tc.pid, startingCh <- w
Queues: tc.queues,
Concurrency: tc.concurrency,
Started: time.Now(),
Status: "running",
} }
// allow for heartbeater to write to redis // Wait for heartbeater to write to redis
time.Sleep(tc.interval) time.Sleep(tc.interval * 2)
ss, err := rdbClient.ListServers() ss, err := rdbClient.ListServers()
if err != nil { if err != nil {
t.Errorf("could not read server info from redis: %v", err) t.Errorf("%s: could not read server info from redis: %v", tc.desc, err)
hb.terminate() hb.shutdown()
continue continue
} }
if len(ss) != 1 { if len(ss) != 1 {
t.Errorf("(*RDB).ListServers returned %d process info, want 1", len(ss)) t.Errorf("%s: (*RDB).ListServers returned %d server info, want 1", tc.desc, len(ss))
hb.terminate() hb.shutdown()
continue continue
} }
if diff := cmp.Diff(want, ss[0], timeCmpOpt, ignoreOpt, ignoreFieldOpt); diff != "" { wantInfo := &base.ServerInfo{
t.Errorf("redis stored process status %+v, want %+v; (-want, +got)\n%s", ss[0], want, diff) Host: tc.host,
hb.terminate() PID: tc.pid,
Queues: tc.queues,
Concurrency: tc.concurrency,
Started: now,
Status: "active",
ActiveWorkerCount: len(tc.startedWorkers),
}
if diff := cmp.Diff(wantInfo, ss[0], timeCmpOpt, ignoreOpt, ignoreFieldOpt); diff != "" {
t.Errorf("%s: redis stored server status %+v, want %+v; (-want, +got)\n%s", tc.desc, ss[0], wantInfo, diff)
hb.shutdown()
continue continue
} }
// status change for qname, wantLease := range tc.wantLease1 {
status.Set(base.StatusStopped) gotLease := h.GetLeaseEntries(t, r, qname)
if diff := cmp.Diff(wantLease, gotLease, h.SortZSetEntryOpt); diff != "" {
t.Errorf("%s: mismatch found in %q: (-want,+got):\n%s", tc.desc, base.LeaseKey(qname), diff)
}
}
// allow for heartbeater to write to redis for _, w := range tc.startedWorkers {
if want := now.Add(rdb.LeaseDuration); w.lease.Deadline() != want {
t.Errorf("%s: lease deadline for %v is set to %v, want %v", tc.desc, w.msg, w.lease.Deadline(), want)
}
}
//===================
// Start Phase2
//===================
clock.AdvanceTime(tc.elapsedTime)
// Simulate processor finished processing tasks.
for _, msg := range tc.finishedTasks {
if err := rdbClient.Done(context.Background(), msg); err != nil {
t.Fatalf("RDB.Done failed: %v", err)
}
finishedCh <- msg
}
// Wait for heartbeater to write to redis
time.Sleep(tc.interval * 2) time.Sleep(tc.interval * 2)
want.Status = "stopped" for qname, wantLease := range tc.wantLease2 {
gotLease := h.GetLeaseEntries(t, r, qname)
if diff := cmp.Diff(wantLease, gotLease, h.SortZSetEntryOpt); diff != "" {
t.Errorf("%s: mismatch found in %q: (-want,+got):\n%s", tc.desc, base.LeaseKey(qname), diff)
}
}
//===================
// Start Phase3
//===================
// Server state change; simulating Server.Shutdown
srvState.mu.Lock()
srvState.value = srvStateClosed
srvState.mu.Unlock()
// Wait for heartbeater to write to redis
time.Sleep(tc.interval * 2)
wantInfo = &base.ServerInfo{
Host: tc.host,
PID: tc.pid,
Queues: tc.queues,
Concurrency: tc.concurrency,
Started: now,
Status: "closed",
ActiveWorkerCount: len(tc.startedWorkers) - len(tc.finishedTasks),
}
ss, err = rdbClient.ListServers() ss, err = rdbClient.ListServers()
if err != nil { if err != nil {
t.Errorf("could not read process status from redis: %v", err) t.Errorf("%s: could not read server status from redis: %v", tc.desc, err)
hb.terminate() hb.shutdown()
continue continue
} }
if len(ss) != 1 { if len(ss) != 1 {
t.Errorf("(*RDB).ListProcesses returned %d process info, want 1", len(ss)) t.Errorf("%s: (*RDB).ListServers returned %d server info, want 1", tc.desc, len(ss))
hb.terminate() hb.shutdown()
continue continue
} }
if diff := cmp.Diff(want, ss[0], timeCmpOpt, ignoreOpt, ignoreFieldOpt); diff != "" { if diff := cmp.Diff(wantInfo, ss[0], timeCmpOpt, ignoreOpt, ignoreFieldOpt); diff != "" {
t.Errorf("redis stored process status %+v, want %+v; (-want, +got)\n%s", ss[0], want, diff) t.Errorf("%s: redis stored process status %+v, want %+v; (-want, +got)\n%s", tc.desc, ss[0], wantInfo, diff)
hb.terminate() hb.shutdown()
continue continue
} }
hb.terminate() hb.shutdown()
} }
} }
@@ -131,6 +323,7 @@ func TestHeartbeaterWithRedisDown(t *testing.T) {
r := rdb.NewRDB(setup(t)) r := rdb.NewRDB(setup(t))
defer r.Close() defer r.Close()
testBroker := testbroker.NewTestBroker(r) testBroker := testbroker.NewTestBroker(r)
state := &serverState{value: srvStateActive}
hb := newHeartbeater(heartbeaterParams{ hb := newHeartbeater(heartbeaterParams{
logger: testLogger, logger: testLogger,
broker: testBroker, broker: testBroker,
@@ -138,7 +331,7 @@ func TestHeartbeaterWithRedisDown(t *testing.T) {
concurrency: 10, concurrency: 10,
queues: map[string]int{"default": 1}, queues: map[string]int{"default": 1},
strictPriority: false, strictPriority: false,
status: base.NewServerStatus(base.StatusRunning), state: state,
starting: make(chan *workerInfo), starting: make(chan *workerInfo),
finished: make(chan *base.TaskMessage), finished: make(chan *base.TaskMessage),
}) })
@@ -150,5 +343,5 @@ func TestHeartbeaterWithRedisDown(t *testing.T) {
// wait for heartbeater to try writing data to redis // wait for heartbeater to try writing data to redis
time.Sleep(2 * time.Second) time.Sleep(2 * time.Second)
hb.terminate() hb.shutdown()
} }

View File

@@ -2,7 +2,7 @@
// Use of this source code is governed by a MIT license // Use of this source code is governed by a MIT license
// that can be found in the LICENSE file. // that can be found in the LICENSE file.
package inspeq package asynq
import ( import (
"fmt" "fmt"
@@ -10,10 +10,9 @@ import (
"strings" "strings"
"time" "time"
"github.com/go-redis/redis/v7" "github.com/go-redis/redis/v8"
"github.com/google/uuid"
"github.com/hibiken/asynq"
"github.com/hibiken/asynq/internal/base" "github.com/hibiken/asynq/internal/base"
"github.com/hibiken/asynq/internal/errors"
"github.com/hibiken/asynq/internal/rdb" "github.com/hibiken/asynq/internal/rdb"
) )
@@ -24,7 +23,7 @@ type Inspector struct {
} }
// New returns a new instance of Inspector. // New returns a new instance of Inspector.
func New(r asynq.RedisConnOpt) *Inspector { func NewInspector(r RedisConnOpt) *Inspector {
c, ok := r.MakeRedisClient().(redis.UniversalClient) c, ok := r.MakeRedisClient().(redis.UniversalClient)
if !ok { if !ok {
panic(fmt.Sprintf("inspeq: unsupported RedisConnOpt type %T", r)) panic(fmt.Sprintf("inspeq: unsupported RedisConnOpt type %T", r))
@@ -44,15 +43,22 @@ func (i *Inspector) Queues() ([]string, error) {
return i.rdb.AllQueues() return i.rdb.AllQueues()
} }
// QueueStats represents a state of queues at a certain time. // QueueInfo represents a state of queues at a certain time.
type QueueStats struct { type QueueInfo struct {
// Name of the queue. // Name of the queue.
Queue string Queue string
// Total number of bytes that the queue and its tasks require to be stored in redis. // Total number of bytes that the queue and its tasks require to be stored in redis.
// It is an approximate memory usage value in bytes since the value is computed by sampling.
MemoryUsage int64 MemoryUsage int64
// Latency of the queue, measured by the oldest pending task in the queue.
Latency time.Duration
// Size is the total number of tasks in the queue. // Size is the total number of tasks in the queue.
// The value is the sum of Pending, Active, Scheduled, Retry, and Archived. // The value is the sum of Pending, Active, Scheduled, Retry, and Archived.
Size int Size int
// Number of pending tasks. // Number of pending tasks.
Pending int Pending int
// Number of active tasks. // Number of active tasks.
@@ -63,20 +69,30 @@ type QueueStats struct {
Retry int Retry int
// Number of archived tasks. // Number of archived tasks.
Archived int Archived int
// Total number of tasks being processed during the given date. // Number of stored completed tasks.
Completed int
// Total number of tasks being processed within the given date (counter resets daily).
// The number includes both succeeded and failed tasks. // The number includes both succeeded and failed tasks.
Processed int Processed int
// Total number of tasks failed to be processed during the given date. // Total number of tasks failed to be processed within the given date (counter resets daily).
Failed int Failed int
// Total number of tasks processed (cumulative).
ProcessedTotal int
// Total number of tasks failed (cumulative).
FailedTotal int
// Paused indicates whether the queue is paused. // Paused indicates whether the queue is paused.
// If true, tasks in the queue will not be processed. // If true, tasks in the queue will not be processed.
Paused bool Paused bool
// Time when this stats was taken.
// Time when this queue info snapshot was taken.
Timestamp time.Time Timestamp time.Time
} }
// CurrentStats returns a current stats of the given queue. // GetQueueInfo returns current information of the given queue.
func (i *Inspector) CurrentStats(qname string) (*QueueStats, error) { func (i *Inspector) GetQueueInfo(qname string) (*QueueInfo, error) {
if err := base.ValidateQueueName(qname); err != nil { if err := base.ValidateQueueName(qname); err != nil {
return nil, err return nil, err
} }
@@ -84,19 +100,23 @@ func (i *Inspector) CurrentStats(qname string) (*QueueStats, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
return &QueueStats{ return &QueueInfo{
Queue: stats.Queue, Queue: stats.Queue,
MemoryUsage: stats.MemoryUsage, MemoryUsage: stats.MemoryUsage,
Size: stats.Size, Latency: stats.Latency,
Pending: stats.Pending, Size: stats.Size,
Active: stats.Active, Pending: stats.Pending,
Scheduled: stats.Scheduled, Active: stats.Active,
Retry: stats.Retry, Scheduled: stats.Scheduled,
Archived: stats.Archived, Retry: stats.Retry,
Processed: stats.Processed, Archived: stats.Archived,
Failed: stats.Failed, Completed: stats.Completed,
Paused: stats.Paused, Processed: stats.Processed,
Timestamp: stats.Timestamp, Failed: stats.Failed,
ProcessedTotal: stats.ProcessedTotal,
FailedTotal: stats.FailedTotal,
Paused: stats.Paused,
Timestamp: stats.Timestamp,
}, nil }, nil
} }
@@ -134,23 +154,16 @@ func (i *Inspector) History(qname string, n int) ([]*DailyStats, error) {
return res, nil return res, nil
} }
// ErrQueueNotFound indicates that the specified queue does not exist. var (
type ErrQueueNotFound struct { // ErrQueueNotFound indicates that the specified queue does not exist.
qname string ErrQueueNotFound = errors.New("queue not found")
}
func (e *ErrQueueNotFound) Error() string { // ErrQueueNotEmpty indicates that the specified queue is not empty.
return fmt.Sprintf("queue %q does not exist", e.qname) ErrQueueNotEmpty = errors.New("queue is not empty")
}
// ErrQueueNotEmpty indicates that the specified queue is not empty. // ErrTaskNotFound indicates that the specified task cannot be found in the queue.
type ErrQueueNotEmpty struct { ErrTaskNotFound = errors.New("task not found")
qname string )
}
func (e *ErrQueueNotEmpty) Error() string {
return fmt.Sprintf("queue %q is not empty", e.qname)
}
// DeleteQueue removes the specified queue. // DeleteQueue removes the specified queue.
// //
@@ -164,134 +177,30 @@ func (e *ErrQueueNotEmpty) Error() string {
// returns ErrQueueNotEmpty. // returns ErrQueueNotEmpty.
func (i *Inspector) DeleteQueue(qname string, force bool) error { func (i *Inspector) DeleteQueue(qname string, force bool) error {
err := i.rdb.RemoveQueue(qname, force) err := i.rdb.RemoveQueue(qname, force)
if _, ok := err.(*rdb.ErrQueueNotFound); ok { if errors.IsQueueNotFound(err) {
return &ErrQueueNotFound{qname} return fmt.Errorf("%w: queue=%q", ErrQueueNotFound, qname)
} }
if _, ok := err.(*rdb.ErrQueueNotEmpty); ok { if errors.IsQueueNotEmpty(err) {
return &ErrQueueNotEmpty{qname} return fmt.Errorf("%w: queue=%q", ErrQueueNotEmpty, qname)
} }
return err return err
} }
// PendingTask is a task in a queue and is ready to be processed. // GetTaskInfo retrieves task information given a task id and queue name.
type PendingTask struct { //
*asynq.Task // Returns an error wrapping ErrQueueNotFound if a queue with the given name doesn't exist.
ID string // Returns an error wrapping ErrTaskNotFound if a task with the given id doesn't exist in the queue.
Queue string func (i *Inspector) GetTaskInfo(qname, id string) (*TaskInfo, error) {
MaxRetry int info, err := i.rdb.GetTaskInfo(qname, id)
Retried int switch {
LastError string case errors.IsQueueNotFound(err):
} return nil, fmt.Errorf("asynq: %w", ErrQueueNotFound)
case errors.IsTaskNotFound(err):
// ActiveTask is a task that's currently being processed. return nil, fmt.Errorf("asynq: %w", ErrTaskNotFound)
type ActiveTask struct { case err != nil:
*asynq.Task return nil, fmt.Errorf("asynq: %v", err)
ID string
Queue string
MaxRetry int
Retried int
LastError string
}
// ScheduledTask is a task scheduled to be processed in the future.
type ScheduledTask struct {
*asynq.Task
ID string
Queue string
MaxRetry int
Retried int
LastError string
NextProcessAt time.Time
score int64
}
// RetryTask is a task scheduled to be retried in the future.
type RetryTask struct {
*asynq.Task
ID string
Queue string
NextProcessAt time.Time
MaxRetry int
Retried int
LastError string
// TODO: LastFailedAt time.Time
score int64
}
// ArchivedTask is a task archived for debugging and inspection purposes, and
// it won't be retried automatically.
// A task can be archived when the task exhausts its retry counts or manually
// archived by a user via the CLI or Inspector.
type ArchivedTask struct {
*asynq.Task
ID string
Queue string
MaxRetry int
Retried int
LastFailedAt time.Time
LastError string
score int64
}
// Format string used for task key.
// Format is <prefix>:<uuid>:<score>.
const taskKeyFormat = "%s:%v:%v"
// Prefix used for task key.
const (
keyPrefixPending = "p"
keyPrefixScheduled = "s"
keyPrefixRetry = "r"
keyPrefixArchived = "a"
allKeyPrefixes = keyPrefixPending + keyPrefixScheduled + keyPrefixRetry + keyPrefixArchived
)
// Key returns a key used to delete, and archive the pending task.
func (t *PendingTask) Key() string {
// Note: Pending tasks are stored in redis LIST, therefore no score.
// Use zero for the score to use the same key format.
return fmt.Sprintf(taskKeyFormat, keyPrefixPending, t.ID, 0)
}
// Key returns a key used to delete, run, and archive the scheduled task.
func (t *ScheduledTask) Key() string {
return fmt.Sprintf(taskKeyFormat, keyPrefixScheduled, t.ID, t.score)
}
// Key returns a key used to delete, run, and archive the retry task.
func (t *RetryTask) Key() string {
return fmt.Sprintf(taskKeyFormat, keyPrefixRetry, t.ID, t.score)
}
// Key returns a key used to delete and run the archived task.
func (t *ArchivedTask) Key() string {
return fmt.Sprintf(taskKeyFormat, keyPrefixArchived, t.ID, t.score)
}
// parseTaskKey parses a key string and returns each part of key with proper
// type if valid, otherwise it reports an error.
func parseTaskKey(key string) (prefix string, id uuid.UUID, score int64, err error) {
parts := strings.Split(key, ":")
if len(parts) != 3 {
return "", uuid.Nil, 0, fmt.Errorf("invalid id")
} }
id, err = uuid.Parse(parts[1]) return newTaskInfo(info.Message, info.State, info.NextProcessAt, info.Result), nil
if err != nil {
return "", uuid.Nil, 0, fmt.Errorf("invalid id")
}
score, err = strconv.ParseInt(parts[2], 10, 64)
if err != nil {
return "", uuid.Nil, 0, fmt.Errorf("invalid id")
}
prefix = parts[0]
if len(prefix) != 1 || !strings.Contains(allKeyPrefixes, prefix) {
return "", uuid.Nil, 0, fmt.Errorf("invalid id")
}
return prefix, id, score, nil
} }
// ListOption specifies behavior of list operation. // ListOption specifies behavior of list operation.
@@ -358,26 +267,27 @@ func Page(n int) ListOption {
// ListPendingTasks retrieves pending tasks from the specified queue. // ListPendingTasks retrieves pending tasks from the specified queue.
// //
// By default, it retrieves the first 30 tasks. // By default, it retrieves the first 30 tasks.
func (i *Inspector) ListPendingTasks(qname string, opts ...ListOption) ([]*PendingTask, error) { func (i *Inspector) ListPendingTasks(qname string, opts ...ListOption) ([]*TaskInfo, error) {
if err := base.ValidateQueueName(qname); err != nil { if err := base.ValidateQueueName(qname); err != nil {
return nil, err return nil, fmt.Errorf("asynq: %v", err)
} }
opt := composeListOptions(opts...) opt := composeListOptions(opts...)
pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1} pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1}
msgs, err := i.rdb.ListPending(qname, pgn) infos, err := i.rdb.ListPending(qname, pgn)
if err != nil { switch {
return nil, err case errors.IsQueueNotFound(err):
return nil, fmt.Errorf("asynq: %w", ErrQueueNotFound)
case err != nil:
return nil, fmt.Errorf("asynq: %v", err)
} }
var tasks []*PendingTask var tasks []*TaskInfo
for _, m := range msgs { for _, i := range infos {
tasks = append(tasks, &PendingTask{ tasks = append(tasks, newTaskInfo(
Task: asynq.NewTask(m.Type, m.Payload), i.Message,
ID: m.ID.String(), i.State,
Queue: m.Queue, i.NextProcessAt,
MaxRetry: m.Retry, i.Result,
Retried: m.Retried, ))
LastError: m.ErrorMsg,
})
} }
return tasks, err return tasks, err
} }
@@ -385,124 +295,155 @@ func (i *Inspector) ListPendingTasks(qname string, opts ...ListOption) ([]*Pendi
// ListActiveTasks retrieves active tasks from the specified queue. // ListActiveTasks retrieves active tasks from the specified queue.
// //
// By default, it retrieves the first 30 tasks. // By default, it retrieves the first 30 tasks.
func (i *Inspector) ListActiveTasks(qname string, opts ...ListOption) ([]*ActiveTask, error) { func (i *Inspector) ListActiveTasks(qname string, opts ...ListOption) ([]*TaskInfo, error) {
if err := base.ValidateQueueName(qname); err != nil { if err := base.ValidateQueueName(qname); err != nil {
return nil, err return nil, fmt.Errorf("asynq: %v", err)
} }
opt := composeListOptions(opts...) opt := composeListOptions(opts...)
pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1} pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1}
msgs, err := i.rdb.ListActive(qname, pgn) infos, err := i.rdb.ListActive(qname, pgn)
switch {
case errors.IsQueueNotFound(err):
return nil, fmt.Errorf("asynq: %w", ErrQueueNotFound)
case err != nil:
return nil, fmt.Errorf("asynq: %v", err)
}
expired, err := i.rdb.ListLeaseExpired(time.Now(), qname)
if err != nil { if err != nil {
return nil, err return nil, fmt.Errorf("asynq: %v", err)
} }
var tasks []*ActiveTask expiredSet := make(map[string]struct{}) // set of expired message IDs
for _, m := range msgs { for _, msg := range expired {
expiredSet[msg.ID] = struct{}{}
tasks = append(tasks, &ActiveTask{
Task: asynq.NewTask(m.Type, m.Payload),
ID: m.ID.String(),
Queue: m.Queue,
MaxRetry: m.Retry,
Retried: m.Retried,
LastError: m.ErrorMsg,
})
} }
return tasks, err var tasks []*TaskInfo
for _, i := range infos {
t := newTaskInfo(
i.Message,
i.State,
i.NextProcessAt,
i.Result,
)
if _, ok := expiredSet[i.Message.ID]; ok {
t.IsOrphaned = true
}
tasks = append(tasks, t)
}
return tasks, nil
} }
// ListScheduledTasks retrieves scheduled tasks from the specified queue. // ListScheduledTasks retrieves scheduled tasks from the specified queue.
// Tasks are sorted by NextProcessAt field in ascending order. // Tasks are sorted by NextProcessAt in ascending order.
// //
// By default, it retrieves the first 30 tasks. // By default, it retrieves the first 30 tasks.
func (i *Inspector) ListScheduledTasks(qname string, opts ...ListOption) ([]*ScheduledTask, error) { func (i *Inspector) ListScheduledTasks(qname string, opts ...ListOption) ([]*TaskInfo, error) {
if err := base.ValidateQueueName(qname); err != nil { if err := base.ValidateQueueName(qname); err != nil {
return nil, err return nil, fmt.Errorf("asynq: %v", err)
} }
opt := composeListOptions(opts...) opt := composeListOptions(opts...)
pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1} pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1}
zs, err := i.rdb.ListScheduled(qname, pgn) infos, err := i.rdb.ListScheduled(qname, pgn)
if err != nil { switch {
return nil, err case errors.IsQueueNotFound(err):
return nil, fmt.Errorf("asynq: %w", ErrQueueNotFound)
case err != nil:
return nil, fmt.Errorf("asynq: %v", err)
} }
var tasks []*ScheduledTask var tasks []*TaskInfo
for _, z := range zs { for _, i := range infos {
processAt := time.Unix(z.Score, 0) tasks = append(tasks, newTaskInfo(
t := asynq.NewTask(z.Message.Type, z.Message.Payload) i.Message,
tasks = append(tasks, &ScheduledTask{ i.State,
Task: t, i.NextProcessAt,
ID: z.Message.ID.String(), i.Result,
Queue: z.Message.Queue, ))
MaxRetry: z.Message.Retry,
Retried: z.Message.Retried,
LastError: z.Message.ErrorMsg,
NextProcessAt: processAt,
score: z.Score,
})
} }
return tasks, nil return tasks, nil
} }
// ListRetryTasks retrieves retry tasks from the specified queue. // ListRetryTasks retrieves retry tasks from the specified queue.
// Tasks are sorted by NextProcessAt field in ascending order. // Tasks are sorted by NextProcessAt in ascending order.
// //
// By default, it retrieves the first 30 tasks. // By default, it retrieves the first 30 tasks.
func (i *Inspector) ListRetryTasks(qname string, opts ...ListOption) ([]*RetryTask, error) { func (i *Inspector) ListRetryTasks(qname string, opts ...ListOption) ([]*TaskInfo, error) {
if err := base.ValidateQueueName(qname); err != nil { if err := base.ValidateQueueName(qname); err != nil {
return nil, err return nil, fmt.Errorf("asynq: %v", err)
} }
opt := composeListOptions(opts...) opt := composeListOptions(opts...)
pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1} pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1}
zs, err := i.rdb.ListRetry(qname, pgn) infos, err := i.rdb.ListRetry(qname, pgn)
if err != nil { switch {
return nil, err case errors.IsQueueNotFound(err):
return nil, fmt.Errorf("asynq: %w", ErrQueueNotFound)
case err != nil:
return nil, fmt.Errorf("asynq: %v", err)
} }
var tasks []*RetryTask var tasks []*TaskInfo
for _, z := range zs { for _, i := range infos {
processAt := time.Unix(z.Score, 0) tasks = append(tasks, newTaskInfo(
t := asynq.NewTask(z.Message.Type, z.Message.Payload) i.Message,
tasks = append(tasks, &RetryTask{ i.State,
Task: t, i.NextProcessAt,
ID: z.Message.ID.String(), i.Result,
Queue: z.Message.Queue, ))
NextProcessAt: processAt,
MaxRetry: z.Message.Retry,
Retried: z.Message.Retried,
// TODO: LastFailedAt: z.Message.LastFailedAt
LastError: z.Message.ErrorMsg,
score: z.Score,
})
} }
return tasks, nil return tasks, nil
} }
// ListArchivedTasks retrieves archived tasks from the specified queue. // ListArchivedTasks retrieves archived tasks from the specified queue.
// Tasks are sorted by LastFailedAt field in descending order. // Tasks are sorted by LastFailedAt in descending order.
// //
// By default, it retrieves the first 30 tasks. // By default, it retrieves the first 30 tasks.
func (i *Inspector) ListArchivedTasks(qname string, opts ...ListOption) ([]*ArchivedTask, error) { func (i *Inspector) ListArchivedTasks(qname string, opts ...ListOption) ([]*TaskInfo, error) {
if err := base.ValidateQueueName(qname); err != nil { if err := base.ValidateQueueName(qname); err != nil {
return nil, err return nil, fmt.Errorf("asynq: %v", err)
} }
opt := composeListOptions(opts...) opt := composeListOptions(opts...)
pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1} pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1}
zs, err := i.rdb.ListArchived(qname, pgn) infos, err := i.rdb.ListArchived(qname, pgn)
if err != nil { switch {
return nil, err case errors.IsQueueNotFound(err):
return nil, fmt.Errorf("asynq: %w", ErrQueueNotFound)
case err != nil:
return nil, fmt.Errorf("asynq: %v", err)
} }
var tasks []*ArchivedTask var tasks []*TaskInfo
for _, z := range zs { for _, i := range infos {
failedAt := time.Unix(z.Score, 0) tasks = append(tasks, newTaskInfo(
t := asynq.NewTask(z.Message.Type, z.Message.Payload) i.Message,
tasks = append(tasks, &ArchivedTask{ i.State,
Task: t, i.NextProcessAt,
ID: z.Message.ID.String(), i.Result,
Queue: z.Message.Queue, ))
MaxRetry: z.Message.Retry, }
Retried: z.Message.Retried, return tasks, nil
LastFailedAt: failedAt, }
LastError: z.Message.ErrorMsg,
score: z.Score, // ListCompletedTasks retrieves completed tasks from the specified queue.
}) // Tasks are sorted by expiration time (i.e. CompletedAt + Retention) in descending order.
//
// By default, it retrieves the first 30 tasks.
func (i *Inspector) ListCompletedTasks(qname string, opts ...ListOption) ([]*TaskInfo, error) {
if err := base.ValidateQueueName(qname); err != nil {
return nil, fmt.Errorf("asynq: %v", err)
}
opt := composeListOptions(opts...)
pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1}
infos, err := i.rdb.ListCompleted(qname, pgn)
switch {
case errors.IsQueueNotFound(err):
return nil, fmt.Errorf("asynq: %w", ErrQueueNotFound)
case err != nil:
return nil, fmt.Errorf("asynq: %v", err)
}
var tasks []*TaskInfo
for _, i := range infos {
tasks = append(tasks, newTaskInfo(
i.Message,
i.State,
i.NextProcessAt,
i.Result,
))
} }
return tasks, nil return tasks, nil
} }
@@ -547,27 +488,38 @@ func (i *Inspector) DeleteAllArchivedTasks(qname string) (int, error) {
return int(n), err return int(n), err
} }
// DeleteTaskByKey deletes a task with the given key from the given queue. // DeleteAllCompletedTasks deletes all completed tasks from the specified queue,
func (i *Inspector) DeleteTaskByKey(qname, key string) error { // and reports the number tasks deleted.
func (i *Inspector) DeleteAllCompletedTasks(qname string) (int, error) {
if err := base.ValidateQueueName(qname); err != nil { if err := base.ValidateQueueName(qname); err != nil {
return err return 0, err
} }
prefix, id, score, err := parseTaskKey(key) n, err := i.rdb.DeleteAllCompletedTasks(qname)
if err != nil { return int(n), err
return err }
// DeleteTask deletes a task with the given id from the given queue.
// The task needs to be in pending, scheduled, retry, or archived state,
// otherwise DeleteTask will return an error.
//
// If a queue with the given name doesn't exist, it returns an error wrapping ErrQueueNotFound.
// If a task with the given id doesn't exist in the queue, it returns an error wrapping ErrTaskNotFound.
// If the task is in active state, it returns a non-nil error.
func (i *Inspector) DeleteTask(qname, id string) error {
if err := base.ValidateQueueName(qname); err != nil {
return fmt.Errorf("asynq: %v", err)
} }
switch prefix { err := i.rdb.DeleteTask(qname, id)
case keyPrefixPending: switch {
return i.rdb.DeletePendingTask(qname, id) case errors.IsQueueNotFound(err):
case keyPrefixScheduled: return fmt.Errorf("asynq: %w", ErrQueueNotFound)
return i.rdb.DeleteScheduledTask(qname, id, score) case errors.IsTaskNotFound(err):
case keyPrefixRetry: return fmt.Errorf("asynq: %w", ErrTaskNotFound)
return i.rdb.DeleteRetryTask(qname, id, score) case err != nil:
case keyPrefixArchived: return fmt.Errorf("asynq: %v", err)
return i.rdb.DeleteArchivedTask(qname, id, score)
default:
return fmt.Errorf("invalid key")
} }
return nil
} }
// RunAllScheduledTasks transition all scheduled tasks to pending state from the given queue, // RunAllScheduledTasks transition all scheduled tasks to pending state from the given queue,
@@ -600,27 +552,27 @@ func (i *Inspector) RunAllArchivedTasks(qname string) (int, error) {
return int(n), err return int(n), err
} }
// RunTaskByKey transition a task to pending state given task key and queue name. // RunTask updates the task to pending state given a queue name and task id.
func (i *Inspector) RunTaskByKey(qname, key string) error { // The task needs to be in scheduled, retry, or archived state, otherwise RunTask
// will return an error.
//
// If a queue with the given name doesn't exist, it returns an error wrapping ErrQueueNotFound.
// If a task with the given id doesn't exist in the queue, it returns an error wrapping ErrTaskNotFound.
// If the task is in pending or active state, it returns a non-nil error.
func (i *Inspector) RunTask(qname, id string) error {
if err := base.ValidateQueueName(qname); err != nil { if err := base.ValidateQueueName(qname); err != nil {
return err return fmt.Errorf("asynq: %v", err)
} }
prefix, id, score, err := parseTaskKey(key) err := i.rdb.RunTask(qname, id)
if err != nil { switch {
return err case errors.IsQueueNotFound(err):
} return fmt.Errorf("asynq: %w", ErrQueueNotFound)
switch prefix { case errors.IsTaskNotFound(err):
case keyPrefixScheduled: return fmt.Errorf("asynq: %w", ErrTaskNotFound)
return i.rdb.RunScheduledTask(qname, id, score) case err != nil:
case keyPrefixRetry: return fmt.Errorf("asynq: %v", err)
return i.rdb.RunRetryTask(qname, id, score)
case keyPrefixArchived:
return i.rdb.RunArchivedTask(qname, id, score)
case keyPrefixPending:
return fmt.Errorf("task is already pending for run")
default:
return fmt.Errorf("invalid key")
} }
return nil
} }
// ArchiveAllPendingTasks archives all pending tasks from the given queue, // ArchiveAllPendingTasks archives all pending tasks from the given queue,
@@ -653,34 +605,34 @@ func (i *Inspector) ArchiveAllRetryTasks(qname string) (int, error) {
return int(n), err return int(n), err
} }
// ArchiveTaskByKey archives a task with the given key in the given queue. // ArchiveTask archives a task with the given id in the given queue.
func (i *Inspector) ArchiveTaskByKey(qname, key string) error { // The task needs to be in pending, scheduled, or retry state, otherwise ArchiveTask
// will return an error.
//
// If a queue with the given name doesn't exist, it returns an error wrapping ErrQueueNotFound.
// If a task with the given id doesn't exist in the queue, it returns an error wrapping ErrTaskNotFound.
// If the task is in already archived, it returns a non-nil error.
func (i *Inspector) ArchiveTask(qname, id string) error {
if err := base.ValidateQueueName(qname); err != nil { if err := base.ValidateQueueName(qname); err != nil {
return err return fmt.Errorf("asynq: err")
} }
prefix, id, score, err := parseTaskKey(key) err := i.rdb.ArchiveTask(qname, id)
if err != nil { switch {
return err case errors.IsQueueNotFound(err):
} return fmt.Errorf("asynq: %w", ErrQueueNotFound)
switch prefix { case errors.IsTaskNotFound(err):
case keyPrefixPending: return fmt.Errorf("asynq: %w", ErrTaskNotFound)
return i.rdb.ArchivePendingTask(qname, id) case err != nil:
case keyPrefixScheduled: return fmt.Errorf("asynq: %v", err)
return i.rdb.ArchiveScheduledTask(qname, id, score)
case keyPrefixRetry:
return i.rdb.ArchiveRetryTask(qname, id, score)
case keyPrefixArchived:
return fmt.Errorf("task is already archived")
default:
return fmt.Errorf("invalid key")
} }
return nil
} }
// CancelActiveTask sends a signal to cancel processing of the task with // CancelProcessing sends a signal to cancel processing of the task
// the given id. CancelActiveTask is best-effort, which means that it does not // given a task id. CancelProcessing is best-effort, which means that it does not
// guarantee that the task with the given id will be canceled. The return // guarantee that the task with the given id will be canceled. The return
// value only indicates whether the cancelation signal has been sent. // value only indicates whether the cancelation signal has been sent.
func (i *Inspector) CancelActiveTask(id string) error { func (i *Inspector) CancelProcessing(id string) error {
return i.rdb.PublishCancelation(id) return i.rdb.PublishCancelation(id)
} }
@@ -732,13 +684,12 @@ func (i *Inspector) Servers() ([]*ServerInfo, error) {
continue continue
} }
wrkInfo := &WorkerInfo{ wrkInfo := &WorkerInfo{
Started: w.Started, TaskID: w.ID,
Deadline: w.Deadline, TaskType: w.Type,
Task: &ActiveTask{ TaskPayload: w.Payload,
Task: asynq.NewTask(w.Type, w.Payload), Queue: w.Queue,
ID: w.ID, Started: w.Started,
Queue: w.Queue, Deadline: w.Deadline,
},
} }
srvInfo.ActiveWorkers = append(srvInfo.ActiveWorkers, wrkInfo) srvInfo.ActiveWorkers = append(srvInfo.ActiveWorkers, wrkInfo)
} }
@@ -775,8 +726,14 @@ type ServerInfo struct {
// WorkerInfo describes a running worker processing a task. // WorkerInfo describes a running worker processing a task.
type WorkerInfo struct { type WorkerInfo struct {
// The task the worker is processing. // ID of the task the worker is processing.
Task *ActiveTask TaskID string
// Type of the task the worker is processing.
TaskType string
// Payload of the task the worker is processing.
TaskPayload []byte
// Queue from which the worker got its task.
Queue string
// Time the worker started processing the task. // Time the worker started processing the task.
Started time.Time Started time.Time
// Time the worker needs to finish processing the task by. // Time the worker needs to finish processing the task by.
@@ -798,14 +755,16 @@ type ClusterNode struct {
} }
// ClusterNodes returns a list of nodes the given queue belongs to. // ClusterNodes returns a list of nodes the given queue belongs to.
func (i *Inspector) ClusterNodes(qname string) ([]ClusterNode, error) { //
// Only relevant if task queues are stored in redis cluster.
func (i *Inspector) ClusterNodes(qname string) ([]*ClusterNode, error) {
nodes, err := i.rdb.ClusterNodes(qname) nodes, err := i.rdb.ClusterNodes(qname)
if err != nil { if err != nil {
return nil, err return nil, err
} }
var res []ClusterNode var res []*ClusterNode
for _, node := range nodes { for _, node := range nodes {
res = append(res, ClusterNode{ID: node.ID, Addr: node.Addr}) res = append(res, &ClusterNode{ID: node.ID, Addr: node.Addr})
} }
return res, nil return res, nil
} }
@@ -819,10 +778,10 @@ type SchedulerEntry struct {
Spec string Spec string
// Periodic Task registered for this entry. // Periodic Task registered for this entry.
Task *asynq.Task Task *Task
// Opts is the options for the periodic task. // Opts is the options for the periodic task.
Opts []asynq.Option Opts []Option
// Next shows the next time the task will be enqueued. // Next shows the next time the task will be enqueued.
Next time.Time Next time.Time
@@ -841,8 +800,8 @@ func (i *Inspector) SchedulerEntries() ([]*SchedulerEntry, error) {
return nil, err return nil, err
} }
for _, e := range res { for _, e := range res {
task := asynq.NewTask(e.Type, e.Payload) task := NewTask(e.Type, e.Payload)
var opts []asynq.Option var opts []Option
for _, s := range e.Opts { for _, s := range e.Opts {
if o, err := parseOption(s); err == nil { if o, err := parseOption(s); err == nil {
// ignore bad data // ignore bad data
@@ -863,7 +822,7 @@ func (i *Inspector) SchedulerEntries() ([]*SchedulerEntry, error) {
// parseOption interprets a string s as an Option and returns the Option if parsing is successful, // parseOption interprets a string s as an Option and returns the Option if parsing is successful,
// otherwise returns non-nil error. // otherwise returns non-nil error.
func parseOption(s string) (asynq.Option, error) { func parseOption(s string) (Option, error) {
fn, arg := parseOptionFunc(s), parseOptionArg(s) fn, arg := parseOptionFunc(s), parseOptionArg(s)
switch fn { switch fn {
case "Queue": case "Queue":
@@ -871,43 +830,49 @@ func parseOption(s string) (asynq.Option, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
return asynq.Queue(qname), nil return Queue(qname), nil
case "MaxRetry": case "MaxRetry":
n, err := strconv.Atoi(arg) n, err := strconv.Atoi(arg)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return asynq.MaxRetry(n), nil return MaxRetry(n), nil
case "Timeout": case "Timeout":
d, err := time.ParseDuration(arg) d, err := time.ParseDuration(arg)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return asynq.Timeout(d), nil return Timeout(d), nil
case "Deadline": case "Deadline":
t, err := time.Parse(time.UnixDate, arg) t, err := time.Parse(time.UnixDate, arg)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return asynq.Deadline(t), nil return Deadline(t), nil
case "Unique": case "Unique":
d, err := time.ParseDuration(arg) d, err := time.ParseDuration(arg)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return asynq.Unique(d), nil return Unique(d), nil
case "ProcessAt": case "ProcessAt":
t, err := time.Parse(time.UnixDate, arg) t, err := time.Parse(time.UnixDate, arg)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return asynq.ProcessAt(t), nil return ProcessAt(t), nil
case "ProcessIn": case "ProcessIn":
d, err := time.ParseDuration(arg) d, err := time.ParseDuration(arg)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return asynq.ProcessIn(d), nil return ProcessIn(d), nil
case "Retention":
d, err := time.ParseDuration(arg)
if err != nil {
return nil, err
}
return Retention(d), nil
default: default:
return nil, fmt.Errorf("cannot not parse option string %q", s) return nil, fmt.Errorf("cannot not parse option string %q", s)
} }

File diff suppressed because it is too large Load Diff

View File

@@ -1,22 +0,0 @@
// Copyright 2020 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
/*
Package inspeq provides helper types and functions to inspect queues and tasks managed by Asynq.
Inspector is used to query and mutate the state of queues and tasks.
Example:
inspector := inspeq.New(asynq.RedisClientOpt{Addr: "localhost:6379"})
tasks, err := inspector.ListArchivedTasks("my-queue")
for _, t := range tasks {
if err := inspector.DeleteTaskByKey(t.Key()); err != nil {
// handle error
}
}
*/
package inspeq

View File

@@ -6,16 +6,19 @@
package asynqtest package asynqtest
import ( import (
"context"
"encoding/json" "encoding/json"
"math" "math"
"sort" "sort"
"testing" "testing"
"time"
"github.com/go-redis/redis/v7" "github.com/go-redis/redis/v8"
"github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts" "github.com/google/go-cmp/cmp/cmpopts"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/hibiken/asynq/internal/base" "github.com/hibiken/asynq/internal/base"
"github.com/hibiken/asynq/internal/timeutil"
) )
// EquateInt64Approx returns a Comparer option that treats int64 values // EquateInt64Approx returns a Comparer option that treats int64 values
@@ -30,7 +33,7 @@ func EquateInt64Approx(margin int64) cmp.Option {
var SortMsgOpt = cmp.Transformer("SortTaskMessages", func(in []*base.TaskMessage) []*base.TaskMessage { var SortMsgOpt = cmp.Transformer("SortTaskMessages", func(in []*base.TaskMessage) []*base.TaskMessage {
out := append([]*base.TaskMessage(nil), in...) // Copy input to avoid mutating it out := append([]*base.TaskMessage(nil), in...) // Copy input to avoid mutating it
sort.Slice(out, func(i, j int) bool { sort.Slice(out, func(i, j int) bool {
return out[i].ID.String() < out[j].ID.String() return out[i].ID < out[j].ID
}) })
return out return out
}) })
@@ -39,7 +42,7 @@ var SortMsgOpt = cmp.Transformer("SortTaskMessages", func(in []*base.TaskMessage
var SortZSetEntryOpt = cmp.Transformer("SortZSetEntries", func(in []base.Z) []base.Z { var SortZSetEntryOpt = cmp.Transformer("SortZSetEntries", func(in []base.Z) []base.Z {
out := append([]base.Z(nil), in...) // Copy input to avoid mutating it out := append([]base.Z(nil), in...) // Copy input to avoid mutating it
sort.Slice(out, func(i, j int) bool { sort.Slice(out, func(i, j int) bool {
return out[i].Message.ID.String() < out[j].Message.ID.String() return out[i].Message.ID < out[j].Message.ID
}) })
return out return out
}) })
@@ -94,15 +97,15 @@ var SortStringSliceOpt = cmp.Transformer("SortStringSlice", func(in []string) []
var IgnoreIDOpt = cmpopts.IgnoreFields(base.TaskMessage{}, "ID") var IgnoreIDOpt = cmpopts.IgnoreFields(base.TaskMessage{}, "ID")
// NewTaskMessage returns a new instance of TaskMessage given a task type and payload. // NewTaskMessage returns a new instance of TaskMessage given a task type and payload.
func NewTaskMessage(taskType string, payload map[string]interface{}) *base.TaskMessage { func NewTaskMessage(taskType string, payload []byte) *base.TaskMessage {
return NewTaskMessageWithQueue(taskType, payload, base.DefaultQueueName) return NewTaskMessageWithQueue(taskType, payload, base.DefaultQueueName)
} }
// NewTaskMessageWithQueue returns a new instance of TaskMessage given a // NewTaskMessageWithQueue returns a new instance of TaskMessage given a
// task type, payload and queue name. // task type, payload and queue name.
func NewTaskMessageWithQueue(taskType string, payload map[string]interface{}, qname string) *base.TaskMessage { func NewTaskMessageWithQueue(taskType string, payload []byte, qname string) *base.TaskMessage {
return &base.TaskMessage{ return &base.TaskMessage{
ID: uuid.New(), ID: uuid.NewString(),
Type: taskType, Type: taskType,
Queue: qname, Queue: qname,
Retry: 25, Retry: 25,
@@ -112,17 +115,41 @@ func NewTaskMessageWithQueue(taskType string, payload map[string]interface{}, qn
} }
} }
// NewLeaseWithClock returns a new lease with the given expiration time and clock.
func NewLeaseWithClock(expirationTime time.Time, clock timeutil.Clock) *base.Lease {
l := base.NewLease(expirationTime)
l.Clock = clock
return l
}
// JSON serializes the given key-value pairs into stream of bytes in JSON.
func JSON(kv map[string]interface{}) []byte {
b, err := json.Marshal(kv)
if err != nil {
panic(err)
}
return b
}
// TaskMessageAfterRetry returns an updated copy of t after retry. // TaskMessageAfterRetry returns an updated copy of t after retry.
// It increments retry count and sets the error message. // It increments retry count and sets the error message and last_failed_at time.
func TaskMessageAfterRetry(t base.TaskMessage, errMsg string) *base.TaskMessage { func TaskMessageAfterRetry(t base.TaskMessage, errMsg string, failedAt time.Time) *base.TaskMessage {
t.Retried = t.Retried + 1 t.Retried = t.Retried + 1
t.ErrorMsg = errMsg t.ErrorMsg = errMsg
t.LastFailedAt = failedAt.Unix()
return &t return &t
} }
// TaskMessageWithError returns an updated copy of t with the given error message. // TaskMessageWithError returns an updated copy of t with the given error message.
func TaskMessageWithError(t base.TaskMessage, errMsg string) *base.TaskMessage { func TaskMessageWithError(t base.TaskMessage, errMsg string, failedAt time.Time) *base.TaskMessage {
t.ErrorMsg = errMsg t.ErrorMsg = errMsg
t.LastFailedAt = failedAt.Unix()
return &t
}
// TaskMessageWithCompletedAt returns an updated copy of t after completion.
func TaskMessageWithCompletedAt(t base.TaskMessage, completedAt time.Time) *base.TaskMessage {
t.CompletedAt = completedAt.Unix()
return &t return &t
} }
@@ -130,7 +157,7 @@ func TaskMessageWithError(t base.TaskMessage, errMsg string) *base.TaskMessage {
// Calling test will fail if marshaling errors out. // Calling test will fail if marshaling errors out.
func MustMarshal(tb testing.TB, msg *base.TaskMessage) string { func MustMarshal(tb testing.TB, msg *base.TaskMessage) string {
tb.Helper() tb.Helper()
data, err := json.Marshal(msg) data, err := base.EncodeMessage(msg)
if err != nil { if err != nil {
tb.Fatal(err) tb.Fatal(err)
} }
@@ -141,34 +168,11 @@ func MustMarshal(tb testing.TB, msg *base.TaskMessage) string {
// Calling test will fail if unmarshaling errors out. // Calling test will fail if unmarshaling errors out.
func MustUnmarshal(tb testing.TB, data string) *base.TaskMessage { func MustUnmarshal(tb testing.TB, data string) *base.TaskMessage {
tb.Helper() tb.Helper()
var msg base.TaskMessage msg, err := base.DecodeMessage([]byte(data))
err := json.Unmarshal([]byte(data), &msg)
if err != nil { if err != nil {
tb.Fatal(err) tb.Fatal(err)
} }
return &msg return msg
}
// MustMarshalSlice marshals a slice of task messages and return a slice of
// json strings. Calling test will fail if marshaling errors out.
func MustMarshalSlice(tb testing.TB, msgs []*base.TaskMessage) []string {
tb.Helper()
var data []string
for _, m := range msgs {
data = append(data, MustMarshal(tb, m))
}
return data
}
// MustUnmarshalSlice unmarshals a slice of strings into a slice of task message structs.
// Calling test will fail if marshaling errors out.
func MustUnmarshalSlice(tb testing.TB, data []string) []*base.TaskMessage {
tb.Helper()
var msgs []*base.TaskMessage
for _, s := range data {
msgs = append(msgs, MustUnmarshal(tb, s))
}
return msgs
} }
// FlushDB deletes all the keys of the currently selected DB. // FlushDB deletes all the keys of the currently selected DB.
@@ -176,12 +180,12 @@ func FlushDB(tb testing.TB, r redis.UniversalClient) {
tb.Helper() tb.Helper()
switch r := r.(type) { switch r := r.(type) {
case *redis.Client: case *redis.Client:
if err := r.FlushDB().Err(); err != nil { if err := r.FlushDB(context.Background()).Err(); err != nil {
tb.Fatal(err) tb.Fatal(err)
} }
case *redis.ClusterClient: case *redis.ClusterClient:
err := r.ForEachMaster(func(c *redis.Client) error { err := r.ForEachMaster(context.Background(), func(ctx context.Context, c *redis.Client) error {
if err := c.FlushAll().Err(); err != nil { if err := c.FlushAll(ctx).Err(); err != nil {
return err return err
} }
return nil return nil
@@ -195,49 +199,57 @@ func FlushDB(tb testing.TB, r redis.UniversalClient) {
// SeedPendingQueue initializes the specified queue with the given messages. // SeedPendingQueue initializes the specified queue with the given messages.
func SeedPendingQueue(tb testing.TB, r redis.UniversalClient, msgs []*base.TaskMessage, qname string) { func SeedPendingQueue(tb testing.TB, r redis.UniversalClient, msgs []*base.TaskMessage, qname string) {
tb.Helper() tb.Helper()
r.SAdd(base.AllQueues, qname) r.SAdd(context.Background(), base.AllQueues, qname)
seedRedisList(tb, r, base.QueueKey(qname), msgs) seedRedisList(tb, r, base.PendingKey(qname), msgs, base.TaskStatePending)
} }
// SeedActiveQueue initializes the active queue with the given messages. // SeedActiveQueue initializes the active queue with the given messages.
func SeedActiveQueue(tb testing.TB, r redis.UniversalClient, msgs []*base.TaskMessage, qname string) { func SeedActiveQueue(tb testing.TB, r redis.UniversalClient, msgs []*base.TaskMessage, qname string) {
tb.Helper() tb.Helper()
r.SAdd(base.AllQueues, qname) r.SAdd(context.Background(), base.AllQueues, qname)
seedRedisList(tb, r, base.ActiveKey(qname), msgs) seedRedisList(tb, r, base.ActiveKey(qname), msgs, base.TaskStateActive)
} }
// SeedScheduledQueue initializes the scheduled queue with the given messages. // SeedScheduledQueue initializes the scheduled queue with the given messages.
func SeedScheduledQueue(tb testing.TB, r redis.UniversalClient, entries []base.Z, qname string) { func SeedScheduledQueue(tb testing.TB, r redis.UniversalClient, entries []base.Z, qname string) {
tb.Helper() tb.Helper()
r.SAdd(base.AllQueues, qname) r.SAdd(context.Background(), base.AllQueues, qname)
seedRedisZSet(tb, r, base.ScheduledKey(qname), entries) seedRedisZSet(tb, r, base.ScheduledKey(qname), entries, base.TaskStateScheduled)
} }
// SeedRetryQueue initializes the retry queue with the given messages. // SeedRetryQueue initializes the retry queue with the given messages.
func SeedRetryQueue(tb testing.TB, r redis.UniversalClient, entries []base.Z, qname string) { func SeedRetryQueue(tb testing.TB, r redis.UniversalClient, entries []base.Z, qname string) {
tb.Helper() tb.Helper()
r.SAdd(base.AllQueues, qname) r.SAdd(context.Background(), base.AllQueues, qname)
seedRedisZSet(tb, r, base.RetryKey(qname), entries) seedRedisZSet(tb, r, base.RetryKey(qname), entries, base.TaskStateRetry)
} }
// SeedArchivedQueue initializes the archived queue with the given messages. // SeedArchivedQueue initializes the archived queue with the given messages.
func SeedArchivedQueue(tb testing.TB, r redis.UniversalClient, entries []base.Z, qname string) { func SeedArchivedQueue(tb testing.TB, r redis.UniversalClient, entries []base.Z, qname string) {
tb.Helper() tb.Helper()
r.SAdd(base.AllQueues, qname) r.SAdd(context.Background(), base.AllQueues, qname)
seedRedisZSet(tb, r, base.ArchivedKey(qname), entries) seedRedisZSet(tb, r, base.ArchivedKey(qname), entries, base.TaskStateArchived)
} }
// SeedDeadlines initializes the deadlines set with the given entries. // SeedLease initializes the lease set with the given entries.
func SeedDeadlines(tb testing.TB, r redis.UniversalClient, entries []base.Z, qname string) { func SeedLease(tb testing.TB, r redis.UniversalClient, entries []base.Z, qname string) {
tb.Helper() tb.Helper()
r.SAdd(base.AllQueues, qname) r.SAdd(context.Background(), base.AllQueues, qname)
seedRedisZSet(tb, r, base.DeadlinesKey(qname), entries) seedRedisZSet(tb, r, base.LeaseKey(qname), entries, base.TaskStateActive)
}
// SeedCompletedQueue initializes the completed set witht the given entries.
func SeedCompletedQueue(tb testing.TB, r redis.UniversalClient, entries []base.Z, qname string) {
tb.Helper()
r.SAdd(context.Background(), base.AllQueues, qname)
seedRedisZSet(tb, r, base.CompletedKey(qname), entries, base.TaskStateCompleted)
} }
// SeedAllPendingQueues initializes all of the specified queues with the given messages. // SeedAllPendingQueues initializes all of the specified queues with the given messages.
// //
// pending maps a queue name to a list of messages. // pending maps a queue name to a list of messages.
func SeedAllPendingQueues(tb testing.TB, r redis.UniversalClient, pending map[string][]*base.TaskMessage) { func SeedAllPendingQueues(tb testing.TB, r redis.UniversalClient, pending map[string][]*base.TaskMessage) {
tb.Helper()
for q, msgs := range pending { for q, msgs := range pending {
SeedPendingQueue(tb, r, msgs, q) SeedPendingQueue(tb, r, msgs, q)
} }
@@ -245,6 +257,7 @@ func SeedAllPendingQueues(tb testing.TB, r redis.UniversalClient, pending map[st
// SeedAllActiveQueues initializes all of the specified active queues with the given messages. // SeedAllActiveQueues initializes all of the specified active queues with the given messages.
func SeedAllActiveQueues(tb testing.TB, r redis.UniversalClient, active map[string][]*base.TaskMessage) { func SeedAllActiveQueues(tb testing.TB, r redis.UniversalClient, active map[string][]*base.TaskMessage) {
tb.Helper()
for q, msgs := range active { for q, msgs := range active {
SeedActiveQueue(tb, r, msgs, q) SeedActiveQueue(tb, r, msgs, q)
} }
@@ -252,6 +265,7 @@ func SeedAllActiveQueues(tb testing.TB, r redis.UniversalClient, active map[stri
// SeedAllScheduledQueues initializes all of the specified scheduled queues with the given entries. // SeedAllScheduledQueues initializes all of the specified scheduled queues with the given entries.
func SeedAllScheduledQueues(tb testing.TB, r redis.UniversalClient, scheduled map[string][]base.Z) { func SeedAllScheduledQueues(tb testing.TB, r redis.UniversalClient, scheduled map[string][]base.Z) {
tb.Helper()
for q, entries := range scheduled { for q, entries := range scheduled {
SeedScheduledQueue(tb, r, entries, q) SeedScheduledQueue(tb, r, entries, q)
} }
@@ -259,6 +273,7 @@ func SeedAllScheduledQueues(tb testing.TB, r redis.UniversalClient, scheduled ma
// SeedAllRetryQueues initializes all of the specified retry queues with the given entries. // SeedAllRetryQueues initializes all of the specified retry queues with the given entries.
func SeedAllRetryQueues(tb testing.TB, r redis.UniversalClient, retry map[string][]base.Z) { func SeedAllRetryQueues(tb testing.TB, r redis.UniversalClient, retry map[string][]base.Z) {
tb.Helper()
for q, entries := range retry { for q, entries := range retry {
SeedRetryQueue(tb, r, entries, q) SeedRetryQueue(tb, r, entries, q)
} }
@@ -266,108 +281,207 @@ func SeedAllRetryQueues(tb testing.TB, r redis.UniversalClient, retry map[string
// SeedAllArchivedQueues initializes all of the specified archived queues with the given entries. // SeedAllArchivedQueues initializes all of the specified archived queues with the given entries.
func SeedAllArchivedQueues(tb testing.TB, r redis.UniversalClient, archived map[string][]base.Z) { func SeedAllArchivedQueues(tb testing.TB, r redis.UniversalClient, archived map[string][]base.Z) {
tb.Helper()
for q, entries := range archived { for q, entries := range archived {
SeedArchivedQueue(tb, r, entries, q) SeedArchivedQueue(tb, r, entries, q)
} }
} }
// SeedAllDeadlines initializes all of the deadlines with the given entries. // SeedAllLease initializes all of the lease sets with the given entries.
func SeedAllDeadlines(tb testing.TB, r redis.UniversalClient, deadlines map[string][]base.Z) { func SeedAllLease(tb testing.TB, r redis.UniversalClient, lease map[string][]base.Z) {
for q, entries := range deadlines { tb.Helper()
SeedDeadlines(tb, r, entries, q) for q, entries := range lease {
SeedLease(tb, r, entries, q)
} }
} }
func seedRedisList(tb testing.TB, c redis.UniversalClient, key string, msgs []*base.TaskMessage) { // SeedAllCompletedQueues initializes all of the completed queues with the given entries.
data := MustMarshalSlice(tb, msgs) func SeedAllCompletedQueues(tb testing.TB, r redis.UniversalClient, completed map[string][]base.Z) {
for _, s := range data { tb.Helper()
if err := c.LPush(key, s).Err(); err != nil { for q, entries := range completed {
SeedCompletedQueue(tb, r, entries, q)
}
}
func seedRedisList(tb testing.TB, c redis.UniversalClient, key string,
msgs []*base.TaskMessage, state base.TaskState) {
tb.Helper()
for _, msg := range msgs {
encoded := MustMarshal(tb, msg)
if err := c.LPush(context.Background(), key, msg.ID).Err(); err != nil {
tb.Fatal(err) tb.Fatal(err)
} }
key := base.TaskKey(msg.Queue, msg.ID)
data := map[string]interface{}{
"msg": encoded,
"state": state.String(),
"unique_key": msg.UniqueKey,
}
if err := c.HSet(context.Background(), key, data).Err(); err != nil {
tb.Fatal(err)
}
if len(msg.UniqueKey) > 0 {
err := c.SetNX(context.Background(), msg.UniqueKey, msg.ID, 1*time.Minute).Err()
if err != nil {
tb.Fatalf("Failed to set unique lock in redis: %v", err)
}
}
} }
} }
func seedRedisZSet(tb testing.TB, c redis.UniversalClient, key string, items []base.Z) { func seedRedisZSet(tb testing.TB, c redis.UniversalClient, key string,
items []base.Z, state base.TaskState) {
tb.Helper()
for _, item := range items { for _, item := range items {
z := &redis.Z{Member: MustMarshal(tb, item.Message), Score: float64(item.Score)} msg := item.Message
if err := c.ZAdd(key, z).Err(); err != nil { encoded := MustMarshal(tb, msg)
z := &redis.Z{Member: msg.ID, Score: float64(item.Score)}
if err := c.ZAdd(context.Background(), key, z).Err(); err != nil {
tb.Fatal(err) tb.Fatal(err)
} }
key := base.TaskKey(msg.Queue, msg.ID)
data := map[string]interface{}{
"msg": encoded,
"state": state.String(),
"unique_key": msg.UniqueKey,
}
if err := c.HSet(context.Background(), key, data).Err(); err != nil {
tb.Fatal(err)
}
if len(msg.UniqueKey) > 0 {
err := c.SetNX(context.Background(), msg.UniqueKey, msg.ID, 1*time.Minute).Err()
if err != nil {
tb.Fatalf("Failed to set unique lock in redis: %v", err)
}
}
} }
} }
// GetPendingMessages returns all pending messages in the given queue. // GetPendingMessages returns all pending messages in the given queue.
// It also asserts the state field of the task.
func GetPendingMessages(tb testing.TB, r redis.UniversalClient, qname string) []*base.TaskMessage { func GetPendingMessages(tb testing.TB, r redis.UniversalClient, qname string) []*base.TaskMessage {
tb.Helper() tb.Helper()
return getListMessages(tb, r, base.QueueKey(qname)) return getMessagesFromList(tb, r, qname, base.PendingKey, base.TaskStatePending)
} }
// GetActiveMessages returns all active messages in the given queue. // GetActiveMessages returns all active messages in the given queue.
// It also asserts the state field of the task.
func GetActiveMessages(tb testing.TB, r redis.UniversalClient, qname string) []*base.TaskMessage { func GetActiveMessages(tb testing.TB, r redis.UniversalClient, qname string) []*base.TaskMessage {
tb.Helper() tb.Helper()
return getListMessages(tb, r, base.ActiveKey(qname)) return getMessagesFromList(tb, r, qname, base.ActiveKey, base.TaskStateActive)
} }
// GetScheduledMessages returns all scheduled task messages in the given queue. // GetScheduledMessages returns all scheduled task messages in the given queue.
// It also asserts the state field of the task.
func GetScheduledMessages(tb testing.TB, r redis.UniversalClient, qname string) []*base.TaskMessage { func GetScheduledMessages(tb testing.TB, r redis.UniversalClient, qname string) []*base.TaskMessage {
tb.Helper() tb.Helper()
return getZSetMessages(tb, r, base.ScheduledKey(qname)) return getMessagesFromZSet(tb, r, qname, base.ScheduledKey, base.TaskStateScheduled)
} }
// GetRetryMessages returns all retry messages in the given queue. // GetRetryMessages returns all retry messages in the given queue.
// It also asserts the state field of the task.
func GetRetryMessages(tb testing.TB, r redis.UniversalClient, qname string) []*base.TaskMessage { func GetRetryMessages(tb testing.TB, r redis.UniversalClient, qname string) []*base.TaskMessage {
tb.Helper() tb.Helper()
return getZSetMessages(tb, r, base.RetryKey(qname)) return getMessagesFromZSet(tb, r, qname, base.RetryKey, base.TaskStateRetry)
} }
// GetArchivedMessages returns all archived messages in the given queue. // GetArchivedMessages returns all archived messages in the given queue.
// It also asserts the state field of the task.
func GetArchivedMessages(tb testing.TB, r redis.UniversalClient, qname string) []*base.TaskMessage { func GetArchivedMessages(tb testing.TB, r redis.UniversalClient, qname string) []*base.TaskMessage {
tb.Helper() tb.Helper()
return getZSetMessages(tb, r, base.ArchivedKey(qname)) return getMessagesFromZSet(tb, r, qname, base.ArchivedKey, base.TaskStateArchived)
}
// GetCompletedMessages returns all completed task messages in the given queue.
// It also asserts the state field of the task.
func GetCompletedMessages(tb testing.TB, r redis.UniversalClient, qname string) []*base.TaskMessage {
tb.Helper()
return getMessagesFromZSet(tb, r, qname, base.CompletedKey, base.TaskStateCompleted)
} }
// GetScheduledEntries returns all scheduled messages and its score in the given queue. // GetScheduledEntries returns all scheduled messages and its score in the given queue.
// It also asserts the state field of the task.
func GetScheduledEntries(tb testing.TB, r redis.UniversalClient, qname string) []base.Z { func GetScheduledEntries(tb testing.TB, r redis.UniversalClient, qname string) []base.Z {
tb.Helper() tb.Helper()
return getZSetEntries(tb, r, base.ScheduledKey(qname)) return getMessagesFromZSetWithScores(tb, r, qname, base.ScheduledKey, base.TaskStateScheduled)
} }
// GetRetryEntries returns all retry messages and its score in the given queue. // GetRetryEntries returns all retry messages and its score in the given queue.
// It also asserts the state field of the task.
func GetRetryEntries(tb testing.TB, r redis.UniversalClient, qname string) []base.Z { func GetRetryEntries(tb testing.TB, r redis.UniversalClient, qname string) []base.Z {
tb.Helper() tb.Helper()
return getZSetEntries(tb, r, base.RetryKey(qname)) return getMessagesFromZSetWithScores(tb, r, qname, base.RetryKey, base.TaskStateRetry)
} }
// GetArchivedEntries returns all archived messages and its score in the given queue. // GetArchivedEntries returns all archived messages and its score in the given queue.
// It also asserts the state field of the task.
func GetArchivedEntries(tb testing.TB, r redis.UniversalClient, qname string) []base.Z { func GetArchivedEntries(tb testing.TB, r redis.UniversalClient, qname string) []base.Z {
tb.Helper() tb.Helper()
return getZSetEntries(tb, r, base.ArchivedKey(qname)) return getMessagesFromZSetWithScores(tb, r, qname, base.ArchivedKey, base.TaskStateArchived)
} }
// GetDeadlinesEntries returns all task messages and its score in the deadlines set for the given queue. // GetLeaseEntries returns all task IDs and its score in the lease set for the given queue.
func GetDeadlinesEntries(tb testing.TB, r redis.UniversalClient, qname string) []base.Z { // It also asserts the state field of the task.
func GetLeaseEntries(tb testing.TB, r redis.UniversalClient, qname string) []base.Z {
tb.Helper() tb.Helper()
return getZSetEntries(tb, r, base.DeadlinesKey(qname)) return getMessagesFromZSetWithScores(tb, r, qname, base.LeaseKey, base.TaskStateActive)
} }
func getListMessages(tb testing.TB, r redis.UniversalClient, list string) []*base.TaskMessage { // GetCompletedEntries returns all completed messages and its score in the given queue.
data := r.LRange(list, 0, -1).Val() // It also asserts the state field of the task.
return MustUnmarshalSlice(tb, data) func GetCompletedEntries(tb testing.TB, r redis.UniversalClient, qname string) []base.Z {
tb.Helper()
return getMessagesFromZSetWithScores(tb, r, qname, base.CompletedKey, base.TaskStateCompleted)
} }
func getZSetMessages(tb testing.TB, r redis.UniversalClient, zset string) []*base.TaskMessage { // Retrieves all messages stored under `keyFn(qname)` key in redis list.
data := r.ZRange(zset, 0, -1).Val() func getMessagesFromList(tb testing.TB, r redis.UniversalClient, qname string,
return MustUnmarshalSlice(tb, data) keyFn func(qname string) string, state base.TaskState) []*base.TaskMessage {
} tb.Helper()
ids := r.LRange(context.Background(), keyFn(qname), 0, -1).Val()
func getZSetEntries(tb testing.TB, r redis.UniversalClient, zset string) []base.Z { var msgs []*base.TaskMessage
data := r.ZRangeWithScores(zset, 0, -1).Val() for _, id := range ids {
var entries []base.Z taskKey := base.TaskKey(qname, id)
for _, z := range data { data := r.HGet(context.Background(), taskKey, "msg").Val()
entries = append(entries, base.Z{ msgs = append(msgs, MustUnmarshal(tb, data))
Message: MustUnmarshal(tb, z.Member.(string)), if gotState := r.HGet(context.Background(), taskKey, "state").Val(); gotState != state.String() {
Score: int64(z.Score), tb.Errorf("task (id=%q) is in %q state, want %v", id, gotState, state)
}) }
} }
return entries return msgs
}
// Retrieves all messages stored under `keyFn(qname)` key in redis zset (sorted-set).
func getMessagesFromZSet(tb testing.TB, r redis.UniversalClient, qname string,
keyFn func(qname string) string, state base.TaskState) []*base.TaskMessage {
tb.Helper()
ids := r.ZRange(context.Background(), keyFn(qname), 0, -1).Val()
var msgs []*base.TaskMessage
for _, id := range ids {
taskKey := base.TaskKey(qname, id)
msg := r.HGet(context.Background(), taskKey, "msg").Val()
msgs = append(msgs, MustUnmarshal(tb, msg))
if gotState := r.HGet(context.Background(), taskKey, "state").Val(); gotState != state.String() {
tb.Errorf("task (id=%q) is in %q state, want %v", id, gotState, state)
}
}
return msgs
}
// Retrieves all messages along with their scores stored under `keyFn(qname)` key in redis zset (sorted-set).
func getMessagesFromZSetWithScores(tb testing.TB, r redis.UniversalClient,
qname string, keyFn func(qname string) string, state base.TaskState) []base.Z {
tb.Helper()
zs := r.ZRangeWithScores(context.Background(), keyFn(qname), 0, -1).Val()
var res []base.Z
for _, z := range zs {
taskID := z.Member.(string)
taskKey := base.TaskKey(qname, taskID)
msg := r.HGet(context.Background(), taskKey, "msg").Val()
res = append(res, base.Z{Message: MustUnmarshal(tb, msg), Score: int64(z.Score)})
if gotState := r.HGet(context.Background(), taskKey, "state").Val(); gotState != state.String() {
tb.Errorf("task (id=%q) is in %q state, want %v", taskID, gotState, state)
}
}
return res
} }

View File

@@ -7,25 +7,29 @@ package base
import ( import (
"context" "context"
"encoding/json" "crypto/md5"
"encoding/hex"
"fmt" "fmt"
"sort"
"strings" "strings"
"sync" "sync"
"time" "time"
"github.com/go-redis/redis/v7" "github.com/go-redis/redis/v8"
"github.com/google/uuid" "github.com/golang/protobuf/ptypes"
"github.com/hibiken/asynq/internal/errors"
pb "github.com/hibiken/asynq/internal/proto"
"github.com/hibiken/asynq/internal/timeutil"
"google.golang.org/protobuf/proto"
) )
// Version of asynq library and CLI. // Version of asynq library and CLI.
const Version = "0.16.0" const Version = "0.22.1"
// DefaultQueueName is the queue name used if none are specified by user. // DefaultQueueName is the queue name used if none are specified by user.
const DefaultQueueName = "default" const DefaultQueueName = "default"
// DefaultQueue is the redis key for the default queue. // DefaultQueue is the redis key for the default queue.
var DefaultQueue = QueueKey(DefaultQueueName) var DefaultQueue = PendingKey(DefaultQueueName)
// Global Redis keys. // Global Redis keys.
const ( const (
@@ -36,58 +40,135 @@ const (
CancelChannel = "asynq:cancel" // PubSub channel CancelChannel = "asynq:cancel" // PubSub channel
) )
// TaskState denotes the state of a task.
type TaskState int
const (
TaskStateActive TaskState = iota + 1
TaskStatePending
TaskStateScheduled
TaskStateRetry
TaskStateArchived
TaskStateCompleted
)
func (s TaskState) String() string {
switch s {
case TaskStateActive:
return "active"
case TaskStatePending:
return "pending"
case TaskStateScheduled:
return "scheduled"
case TaskStateRetry:
return "retry"
case TaskStateArchived:
return "archived"
case TaskStateCompleted:
return "completed"
}
panic(fmt.Sprintf("internal error: unknown task state %d", s))
}
func TaskStateFromString(s string) (TaskState, error) {
switch s {
case "active":
return TaskStateActive, nil
case "pending":
return TaskStatePending, nil
case "scheduled":
return TaskStateScheduled, nil
case "retry":
return TaskStateRetry, nil
case "archived":
return TaskStateArchived, nil
case "completed":
return TaskStateCompleted, nil
}
return 0, errors.E(errors.FailedPrecondition, fmt.Sprintf("%q is not supported task state", s))
}
// ValidateQueueName validates a given qname to be used as a queue name. // ValidateQueueName validates a given qname to be used as a queue name.
// Returns nil if valid, otherwise returns non-nil error. // Returns nil if valid, otherwise returns non-nil error.
func ValidateQueueName(qname string) error { func ValidateQueueName(qname string) error {
if len(qname) == 0 { if len(strings.TrimSpace(qname)) == 0 {
return fmt.Errorf("queue name must contain one or more characters") return fmt.Errorf("queue name must contain one or more characters")
} }
return nil return nil
} }
// QueueKey returns a redis key for the given queue name. // QueueKeyPrefix returns a prefix for all keys in the given queue.
func QueueKey(qname string) string { func QueueKeyPrefix(qname string) string {
return fmt.Sprintf("asynq:{%s}", qname) return fmt.Sprintf("asynq:{%s}:", qname)
}
// TaskKeyPrefix returns a prefix for task key.
func TaskKeyPrefix(qname string) string {
return fmt.Sprintf("%st:", QueueKeyPrefix(qname))
}
// TaskKey returns a redis key for the given task message.
func TaskKey(qname, id string) string {
return fmt.Sprintf("%s%s", TaskKeyPrefix(qname), id)
}
// PendingKey returns a redis key for the given queue name.
func PendingKey(qname string) string {
return fmt.Sprintf("%spending", QueueKeyPrefix(qname))
} }
// ActiveKey returns a redis key for the active tasks. // ActiveKey returns a redis key for the active tasks.
func ActiveKey(qname string) string { func ActiveKey(qname string) string {
return fmt.Sprintf("asynq:{%s}:active", qname) return fmt.Sprintf("%sactive", QueueKeyPrefix(qname))
} }
// ScheduledKey returns a redis key for the scheduled tasks. // ScheduledKey returns a redis key for the scheduled tasks.
func ScheduledKey(qname string) string { func ScheduledKey(qname string) string {
return fmt.Sprintf("asynq:{%s}:scheduled", qname) return fmt.Sprintf("%sscheduled", QueueKeyPrefix(qname))
} }
// RetryKey returns a redis key for the retry tasks. // RetryKey returns a redis key for the retry tasks.
func RetryKey(qname string) string { func RetryKey(qname string) string {
return fmt.Sprintf("asynq:{%s}:retry", qname) return fmt.Sprintf("%sretry", QueueKeyPrefix(qname))
} }
// ArchivedKey returns a redis key for the archived tasks. // ArchivedKey returns a redis key for the archived tasks.
func ArchivedKey(qname string) string { func ArchivedKey(qname string) string {
return fmt.Sprintf("asynq:{%s}:archived", qname) return fmt.Sprintf("%sarchived", QueueKeyPrefix(qname))
} }
// DeadlinesKey returns a redis key for the deadlines. // LeaseKey returns a redis key for the lease.
func DeadlinesKey(qname string) string { func LeaseKey(qname string) string {
return fmt.Sprintf("asynq:{%s}:deadlines", qname) return fmt.Sprintf("%slease", QueueKeyPrefix(qname))
}
func CompletedKey(qname string) string {
return fmt.Sprintf("%scompleted", QueueKeyPrefix(qname))
} }
// PausedKey returns a redis key to indicate that the given queue is paused. // PausedKey returns a redis key to indicate that the given queue is paused.
func PausedKey(qname string) string { func PausedKey(qname string) string {
return fmt.Sprintf("asynq:{%s}:paused", qname) return fmt.Sprintf("%spaused", QueueKeyPrefix(qname))
}
// ProcessedTotalKey returns a redis key for total processed count for the given queue.
func ProcessedTotalKey(qname string) string {
return fmt.Sprintf("%sprocessed", QueueKeyPrefix(qname))
}
// FailedTotalKey returns a redis key for total failure count for the given queue.
func FailedTotalKey(qname string) string {
return fmt.Sprintf("%sfailed", QueueKeyPrefix(qname))
} }
// ProcessedKey returns a redis key for processed count for the given day for the queue. // ProcessedKey returns a redis key for processed count for the given day for the queue.
func ProcessedKey(qname string, t time.Time) string { func ProcessedKey(qname string, t time.Time) string {
return fmt.Sprintf("asynq:{%s}:processed:%s", qname, t.UTC().Format("2006-01-02")) return fmt.Sprintf("%sprocessed:%s", QueueKeyPrefix(qname), t.UTC().Format("2006-01-02"))
} }
// FailedKey returns a redis key for failure count for the given day for the queue. // FailedKey returns a redis key for failure count for the given day for the queue.
func FailedKey(qname string, t time.Time) string { func FailedKey(qname string, t time.Time) string {
return fmt.Sprintf("asynq:{%s}:failed:%s", qname, t.UTC().Format("2006-01-02")) return fmt.Sprintf("%sfailed:%s", QueueKeyPrefix(qname), t.UTC().Format("2006-01-02"))
} }
// ServerInfoKey returns a redis key for process info. // ServerInfoKey returns a redis key for process info.
@@ -111,32 +192,12 @@ func SchedulerHistoryKey(entryID string) string {
} }
// UniqueKey returns a redis key with the given type, payload, and queue name. // UniqueKey returns a redis key with the given type, payload, and queue name.
func UniqueKey(qname, tasktype string, payload map[string]interface{}) string { func UniqueKey(qname, tasktype string, payload []byte) string {
return fmt.Sprintf("asynq:{%s}:unique:%s:%s", qname, tasktype, serializePayload(payload))
}
func serializePayload(payload map[string]interface{}) string {
if payload == nil { if payload == nil {
return "nil" return fmt.Sprintf("%sunique:%s:", QueueKeyPrefix(qname), tasktype)
} }
type entry struct { checksum := md5.Sum(payload)
k string return fmt.Sprintf("%sunique:%s:%s", QueueKeyPrefix(qname), tasktype, hex.EncodeToString(checksum[:]))
v interface{}
}
var es []entry
for k, v := range payload {
es = append(es, entry{k, v})
}
// sort entries by key
sort.Slice(es, func(i, j int) bool { return es[i].k < es[j].k })
var b strings.Builder
for _, e := range es {
if b.Len() > 0 {
b.WriteString(",")
}
b.WriteString(fmt.Sprintf("%s=%v", e.k, e.v))
}
return b.String()
} }
// TaskMessage is the internal representation of a task with additional metadata fields. // TaskMessage is the internal representation of a task with additional metadata fields.
@@ -146,10 +207,10 @@ type TaskMessage struct {
Type string Type string
// Payload holds data needed to process the task. // Payload holds data needed to process the task.
Payload map[string]interface{} Payload []byte
// ID is a unique identifier for each task. // ID is a unique identifier for each task.
ID uuid.UUID ID string
// Queue is a name this message should be enqueued to. // Queue is a name this message should be enqueued to.
Queue string Queue string
@@ -163,6 +224,12 @@ type TaskMessage struct {
// ErrorMsg holds the error message from the last failure. // ErrorMsg holds the error message from the last failure.
ErrorMsg string ErrorMsg string
// Time of last failure in Unix time,
// the number of seconds elapsed since January 1, 1970 UTC.
//
// Use zero to indicate no last failure
LastFailedAt int64
// Timeout specifies timeout in seconds. // Timeout specifies timeout in seconds.
// If task processing doesn't complete within the timeout, the task will be retried // If task processing doesn't complete within the timeout, the task will be retried
// if retry count is remaining. Otherwise it will be moved to the archive. // if retry count is remaining. Otherwise it will be moved to the archive.
@@ -182,26 +249,68 @@ type TaskMessage struct {
// //
// Empty string indicates that no uniqueness lock was used. // Empty string indicates that no uniqueness lock was used.
UniqueKey string UniqueKey string
// Retention specifies the number of seconds the task should be retained after completion.
Retention int64
// CompletedAt is the time the task was processed successfully in Unix time,
// the number of seconds elapsed since January 1, 1970 UTC.
//
// Use zero to indicate no value.
CompletedAt int64
} }
// EncodeMessage marshals the given task message in JSON and returns an encoded string. // EncodeMessage marshals the given task message and returns an encoded bytes.
func EncodeMessage(msg *TaskMessage) (string, error) { func EncodeMessage(msg *TaskMessage) ([]byte, error) {
b, err := json.Marshal(msg) if msg == nil {
if err != nil { return nil, fmt.Errorf("cannot encode nil message")
return "", err
} }
return string(b), nil return proto.Marshal(&pb.TaskMessage{
Type: msg.Type,
Payload: msg.Payload,
Id: msg.ID,
Queue: msg.Queue,
Retry: int32(msg.Retry),
Retried: int32(msg.Retried),
ErrorMsg: msg.ErrorMsg,
LastFailedAt: msg.LastFailedAt,
Timeout: msg.Timeout,
Deadline: msg.Deadline,
UniqueKey: msg.UniqueKey,
Retention: msg.Retention,
CompletedAt: msg.CompletedAt,
})
} }
// DecodeMessage unmarshals the given encoded string and returns a decoded task message. // DecodeMessage unmarshals the given bytes and returns a decoded task message.
func DecodeMessage(s string) (*TaskMessage, error) { func DecodeMessage(data []byte) (*TaskMessage, error) {
d := json.NewDecoder(strings.NewReader(s)) var pbmsg pb.TaskMessage
d.UseNumber() if err := proto.Unmarshal(data, &pbmsg); err != nil {
var msg TaskMessage
if err := d.Decode(&msg); err != nil {
return nil, err return nil, err
} }
return &msg, nil return &TaskMessage{
Type: pbmsg.GetType(),
Payload: pbmsg.GetPayload(),
ID: pbmsg.GetId(),
Queue: pbmsg.GetQueue(),
Retry: int(pbmsg.GetRetry()),
Retried: int(pbmsg.GetRetried()),
ErrorMsg: pbmsg.GetErrorMsg(),
LastFailedAt: pbmsg.GetLastFailedAt(),
Timeout: pbmsg.GetTimeout(),
Deadline: pbmsg.GetDeadline(),
UniqueKey: pbmsg.GetUniqueKey(),
Retention: pbmsg.GetRetention(),
CompletedAt: pbmsg.GetCompletedAt(),
}, nil
}
// TaskInfo describes a task message and its metadata.
type TaskInfo struct {
Message *TaskMessage
State TaskState
NextProcessAt time.Time
Result []byte
} }
// Z represents sorted set member. // Z represents sorted set member.
@@ -210,65 +319,6 @@ type Z struct {
Score int64 Score int64
} }
// ServerStatus represents status of a server.
// ServerStatus methods are concurrency safe.
type ServerStatus struct {
mu sync.Mutex
val ServerStatusValue
}
// NewServerStatus returns a new status instance given an initial value.
func NewServerStatus(v ServerStatusValue) *ServerStatus {
return &ServerStatus{val: v}
}
type ServerStatusValue int
const (
// StatusIdle indicates the server is in idle state.
StatusIdle ServerStatusValue = iota
// StatusRunning indicates the server is up and active.
StatusRunning
// StatusQuiet indicates the server is up but not active.
StatusQuiet
// StatusStopped indicates the server server has been stopped.
StatusStopped
)
var statuses = []string{
"idle",
"running",
"quiet",
"stopped",
}
func (s *ServerStatus) String() string {
s.mu.Lock()
defer s.mu.Unlock()
if StatusIdle <= s.val && s.val <= StatusStopped {
return statuses[s.val]
}
return "unknown status"
}
// Get returns the status value.
func (s *ServerStatus) Get() ServerStatusValue {
s.mu.Lock()
v := s.val
s.mu.Unlock()
return v
}
// Set sets the status value.
func (s *ServerStatus) Set(v ServerStatusValue) {
s.mu.Lock()
s.val = v
s.mu.Unlock()
}
// ServerInfo holds information about a running server. // ServerInfo holds information about a running server.
type ServerInfo struct { type ServerInfo struct {
Host string Host string
@@ -282,6 +332,59 @@ type ServerInfo struct {
ActiveWorkerCount int ActiveWorkerCount int
} }
// EncodeServerInfo marshals the given ServerInfo and returns the encoded bytes.
func EncodeServerInfo(info *ServerInfo) ([]byte, error) {
if info == nil {
return nil, fmt.Errorf("cannot encode nil server info")
}
queues := make(map[string]int32)
for q, p := range info.Queues {
queues[q] = int32(p)
}
started, err := ptypes.TimestampProto(info.Started)
if err != nil {
return nil, err
}
return proto.Marshal(&pb.ServerInfo{
Host: info.Host,
Pid: int32(info.PID),
ServerId: info.ServerID,
Concurrency: int32(info.Concurrency),
Queues: queues,
StrictPriority: info.StrictPriority,
Status: info.Status,
StartTime: started,
ActiveWorkerCount: int32(info.ActiveWorkerCount),
})
}
// DecodeServerInfo decodes the given bytes into ServerInfo.
func DecodeServerInfo(b []byte) (*ServerInfo, error) {
var pbmsg pb.ServerInfo
if err := proto.Unmarshal(b, &pbmsg); err != nil {
return nil, err
}
queues := make(map[string]int)
for q, p := range pbmsg.GetQueues() {
queues[q] = int(p)
}
startTime, err := ptypes.Timestamp(pbmsg.GetStartTime())
if err != nil {
return nil, err
}
return &ServerInfo{
Host: pbmsg.GetHost(),
PID: int(pbmsg.GetPid()),
ServerID: pbmsg.GetServerId(),
Concurrency: int(pbmsg.GetConcurrency()),
Queues: queues,
StrictPriority: pbmsg.GetStrictPriority(),
Status: pbmsg.GetStatus(),
Started: startTime,
ActiveWorkerCount: int(pbmsg.GetActiveWorkerCount()),
}, nil
}
// WorkerInfo holds information about a running worker. // WorkerInfo holds information about a running worker.
type WorkerInfo struct { type WorkerInfo struct {
Host string Host string
@@ -289,12 +392,65 @@ type WorkerInfo struct {
ServerID string ServerID string
ID string ID string
Type string Type string
Payload []byte
Queue string Queue string
Payload map[string]interface{}
Started time.Time Started time.Time
Deadline time.Time Deadline time.Time
} }
// EncodeWorkerInfo marshals the given WorkerInfo and returns the encoded bytes.
func EncodeWorkerInfo(info *WorkerInfo) ([]byte, error) {
if info == nil {
return nil, fmt.Errorf("cannot encode nil worker info")
}
startTime, err := ptypes.TimestampProto(info.Started)
if err != nil {
return nil, err
}
deadline, err := ptypes.TimestampProto(info.Deadline)
if err != nil {
return nil, err
}
return proto.Marshal(&pb.WorkerInfo{
Host: info.Host,
Pid: int32(info.PID),
ServerId: info.ServerID,
TaskId: info.ID,
TaskType: info.Type,
TaskPayload: info.Payload,
Queue: info.Queue,
StartTime: startTime,
Deadline: deadline,
})
}
// DecodeWorkerInfo decodes the given bytes into WorkerInfo.
func DecodeWorkerInfo(b []byte) (*WorkerInfo, error) {
var pbmsg pb.WorkerInfo
if err := proto.Unmarshal(b, &pbmsg); err != nil {
return nil, err
}
startTime, err := ptypes.Timestamp(pbmsg.GetStartTime())
if err != nil {
return nil, err
}
deadline, err := ptypes.Timestamp(pbmsg.GetDeadline())
if err != nil {
return nil, err
}
return &WorkerInfo{
Host: pbmsg.GetHost(),
PID: int(pbmsg.GetPid()),
ServerID: pbmsg.GetServerId(),
ID: pbmsg.GetTaskId(),
Type: pbmsg.GetTaskType(),
Payload: pbmsg.GetTaskPayload(),
Queue: pbmsg.GetQueue(),
Started: startTime,
Deadline: deadline,
}, nil
}
// SchedulerEntry holds information about a periodic task registered with a scheduler. // SchedulerEntry holds information about a periodic task registered with a scheduler.
type SchedulerEntry struct { type SchedulerEntry struct {
// Identifier of this entry. // Identifier of this entry.
@@ -307,7 +463,7 @@ type SchedulerEntry struct {
Type string Type string
// Payload is the payload of the periodic task. // Payload is the payload of the periodic task.
Payload map[string]interface{} Payload []byte
// Opts is the options for the periodic task. // Opts is the options for the periodic task.
Opts []string Opts []string
@@ -320,6 +476,55 @@ type SchedulerEntry struct {
Prev time.Time Prev time.Time
} }
// EncodeSchedulerEntry marshals the given entry and returns an encoded bytes.
func EncodeSchedulerEntry(entry *SchedulerEntry) ([]byte, error) {
if entry == nil {
return nil, fmt.Errorf("cannot encode nil scheduler entry")
}
next, err := ptypes.TimestampProto(entry.Next)
if err != nil {
return nil, err
}
prev, err := ptypes.TimestampProto(entry.Prev)
if err != nil {
return nil, err
}
return proto.Marshal(&pb.SchedulerEntry{
Id: entry.ID,
Spec: entry.Spec,
TaskType: entry.Type,
TaskPayload: entry.Payload,
EnqueueOptions: entry.Opts,
NextEnqueueTime: next,
PrevEnqueueTime: prev,
})
}
// DecodeSchedulerEntry unmarshals the given bytes and returns a decoded SchedulerEntry.
func DecodeSchedulerEntry(b []byte) (*SchedulerEntry, error) {
var pbmsg pb.SchedulerEntry
if err := proto.Unmarshal(b, &pbmsg); err != nil {
return nil, err
}
next, err := ptypes.Timestamp(pbmsg.GetNextEnqueueTime())
if err != nil {
return nil, err
}
prev, err := ptypes.Timestamp(pbmsg.GetPrevEnqueueTime())
if err != nil {
return nil, err
}
return &SchedulerEntry{
ID: pbmsg.GetId(),
Spec: pbmsg.GetSpec(),
Type: pbmsg.GetTaskType(),
Payload: pbmsg.GetTaskPayload(),
Opts: pbmsg.GetEnqueueOptions(),
Next: next,
Prev: prev,
}, nil
}
// SchedulerEnqueueEvent holds information about an enqueue event by a scheduler. // SchedulerEnqueueEvent holds information about an enqueue event by a scheduler.
type SchedulerEnqueueEvent struct { type SchedulerEnqueueEvent struct {
// ID of the task that was enqueued. // ID of the task that was enqueued.
@@ -329,6 +534,39 @@ type SchedulerEnqueueEvent struct {
EnqueuedAt time.Time EnqueuedAt time.Time
} }
// EncodeSchedulerEnqueueEvent marshals the given event
// and returns an encoded bytes.
func EncodeSchedulerEnqueueEvent(event *SchedulerEnqueueEvent) ([]byte, error) {
if event == nil {
return nil, fmt.Errorf("cannot encode nil enqueue event")
}
enqueuedAt, err := ptypes.TimestampProto(event.EnqueuedAt)
if err != nil {
return nil, err
}
return proto.Marshal(&pb.SchedulerEnqueueEvent{
TaskId: event.TaskID,
EnqueueTime: enqueuedAt,
})
}
// DecodeSchedulerEnqueueEvent unmarshals the given bytes
// and returns a decoded SchedulerEnqueueEvent.
func DecodeSchedulerEnqueueEvent(b []byte) (*SchedulerEnqueueEvent, error) {
var pbmsg pb.SchedulerEnqueueEvent
if err := proto.Unmarshal(b, &pbmsg); err != nil {
return nil, err
}
enqueuedAt, err := ptypes.Timestamp(pbmsg.GetEnqueueTime())
if err != nil {
return nil, err
}
return &SchedulerEnqueueEvent{
TaskID: pbmsg.GetTaskId(),
EnqueuedAt: enqueuedAt,
}, nil
}
// Cancelations is a collection that holds cancel functions for all active tasks. // Cancelations is a collection that holds cancel functions for all active tasks.
// //
// Cancelations are safe for concurrent use by multipel goroutines. // Cancelations are safe for concurrent use by multipel goroutines.
@@ -366,25 +604,96 @@ func (c *Cancelations) Get(id string) (fn context.CancelFunc, ok bool) {
return fn, ok return fn, ok
} }
// Lease is a time bound lease for worker to process task.
// It provides a communication channel between lessor and lessee about lease expiration.
type Lease struct {
once sync.Once
ch chan struct{}
Clock timeutil.Clock
mu sync.Mutex
expireAt time.Time // guarded by mu
}
func NewLease(expirationTime time.Time) *Lease {
return &Lease{
ch: make(chan struct{}),
expireAt: expirationTime,
Clock: timeutil.NewRealClock(),
}
}
// Reset chanegs the lease to expire at the given time.
// It returns true if the lease is still valid and reset operation was successful, false if the lease had been expired.
func (l *Lease) Reset(expirationTime time.Time) bool {
if !l.IsValid() {
return false
}
l.mu.Lock()
defer l.mu.Unlock()
l.expireAt = expirationTime
return true
}
// Sends a notification to lessee about expired lease
// Returns true if notification was sent, returns false if the lease is still valid and notification was not sent.
func (l *Lease) NotifyExpiration() bool {
if l.IsValid() {
return false
}
l.once.Do(l.closeCh)
return true
}
func (l *Lease) closeCh() {
close(l.ch)
}
// Done returns a communication channel from which the lessee can read to get notified when lessor notifies about lease expiration.
func (l *Lease) Done() <-chan struct{} {
return l.ch
}
// Deadline returns the expiration time of the lease.
func (l *Lease) Deadline() time.Time {
l.mu.Lock()
defer l.mu.Unlock()
return l.expireAt
}
// IsValid returns true if the lease's expieration time is in the future or equals to the current time,
// returns false otherwise.
func (l *Lease) IsValid() bool {
now := l.Clock.Now()
l.mu.Lock()
defer l.mu.Unlock()
return l.expireAt.After(now) || l.expireAt.Equal(now)
}
// Broker is a message broker that supports operations to manage task queues. // Broker is a message broker that supports operations to manage task queues.
// //
// See rdb.RDB as a reference implementation. // See rdb.RDB as a reference implementation.
type Broker interface { type Broker interface {
Ping() error Ping() error
Enqueue(msg *TaskMessage) error Enqueue(ctx context.Context, msg *TaskMessage) error
EnqueueUnique(msg *TaskMessage, ttl time.Duration) error EnqueueUnique(ctx context.Context, msg *TaskMessage, ttl time.Duration) error
Dequeue(qnames ...string) (*TaskMessage, time.Time, error) Dequeue(qnames ...string) (*TaskMessage, time.Time, error)
Done(msg *TaskMessage) error Done(ctx context.Context, msg *TaskMessage) error
Requeue(msg *TaskMessage) error MarkAsComplete(ctx context.Context, msg *TaskMessage) error
Schedule(msg *TaskMessage, processAt time.Time) error Requeue(ctx context.Context, msg *TaskMessage) error
ScheduleUnique(msg *TaskMessage, processAt time.Time, ttl time.Duration) error Schedule(ctx context.Context, msg *TaskMessage, processAt time.Time) error
Retry(msg *TaskMessage, processAt time.Time, errMsg string) error ScheduleUnique(ctx context.Context, msg *TaskMessage, processAt time.Time, ttl time.Duration) error
Archive(msg *TaskMessage, errMsg string) error Retry(ctx context.Context, msg *TaskMessage, processAt time.Time, errMsg string, isFailure bool) error
CheckAndEnqueue(qnames ...string) error Archive(ctx context.Context, msg *TaskMessage, errMsg string) error
ListDeadlineExceeded(deadline time.Time, qnames ...string) ([]*TaskMessage, error) ForwardIfReady(qnames ...string) error
DeleteExpiredCompletedTasks(qname string) error
ListLeaseExpired(cutoff time.Time, qnames ...string) ([]*TaskMessage, error)
ExtendLease(qname string, ids ...string) (time.Time, error)
WriteServerState(info *ServerInfo, workers []*WorkerInfo, ttl time.Duration) error WriteServerState(info *ServerInfo, workers []*WorkerInfo, ttl time.Duration) error
ClearServerState(host string, pid int, serverID string) error ClearServerState(host string, pid int, serverID string) error
CancelationPubSub() (*redis.PubSub, error) // TODO: Need to decouple from redis to support other brokers CancelationPubSub() (*redis.PubSub, error) // TODO: Need to decouple from redis to support other brokers
PublishCancelation(id string) error PublishCancelation(id string) error
WriteResult(qname, id string, data []byte) (n int, err error)
Close() error Close() error
} }

View File

@@ -6,26 +6,49 @@ package base
import ( import (
"context" "context"
"crypto/md5"
"encoding/hex"
"encoding/json" "encoding/json"
"fmt"
"sync" "sync"
"testing" "testing"
"time" "time"
"github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/hibiken/asynq/internal/timeutil"
) )
func TestTaskKey(t *testing.T) {
id := uuid.NewString()
tests := []struct {
qname string
id string
want string
}{
{"default", id, fmt.Sprintf("asynq:{default}:t:%s", id)},
}
for _, tc := range tests {
got := TaskKey(tc.qname, tc.id)
if got != tc.want {
t.Errorf("TaskKey(%q, %s) = %q, want %q", tc.qname, tc.id, got, tc.want)
}
}
}
func TestQueueKey(t *testing.T) { func TestQueueKey(t *testing.T) {
tests := []struct { tests := []struct {
qname string qname string
want string want string
}{ }{
{"default", "asynq:{default}"}, {"default", "asynq:{default}:pending"},
{"custom", "asynq:{custom}"}, {"custom", "asynq:{custom}:pending"},
} }
for _, tc := range tests { for _, tc := range tests {
got := QueueKey(tc.qname) got := PendingKey(tc.qname)
if got != tc.want { if got != tc.want {
t.Errorf("QueueKey(%q) = %q, want %q", tc.qname, got, tc.want) t.Errorf("QueueKey(%q) = %q, want %q", tc.qname, got, tc.want)
} }
@@ -49,19 +72,19 @@ func TestActiveKey(t *testing.T) {
} }
} }
func TestDeadlinesKey(t *testing.T) { func TestLeaseKey(t *testing.T) {
tests := []struct { tests := []struct {
qname string qname string
want string want string
}{ }{
{"default", "asynq:{default}:deadlines"}, {"default", "asynq:{default}:lease"},
{"custom", "asynq:{custom}:deadlines"}, {"custom", "asynq:{custom}:lease"},
} }
for _, tc := range tests { for _, tc := range tests {
got := DeadlinesKey(tc.qname) got := LeaseKey(tc.qname)
if got != tc.want { if got != tc.want {
t.Errorf("DeadlinesKey(%q) = %q, want %q", tc.qname, got, tc.want) t.Errorf("LeaseKey(%q) = %q, want %q", tc.qname, got, tc.want)
} }
} }
} }
@@ -117,6 +140,23 @@ func TestArchivedKey(t *testing.T) {
} }
} }
func TestCompletedKey(t *testing.T) {
tests := []struct {
qname string
want string
}{
{"default", "asynq:{default}:completed"},
{"custom", "asynq:{custom}:completed"},
}
for _, tc := range tests {
got := CompletedKey(tc.qname)
if got != tc.want {
t.Errorf("CompletedKey(%q) = %q, want %q", tc.qname, got, tc.want)
}
}
}
func TestPausedKey(t *testing.T) { func TestPausedKey(t *testing.T) {
tests := []struct { tests := []struct {
qname string qname string
@@ -134,6 +174,40 @@ func TestPausedKey(t *testing.T) {
} }
} }
func TestProcessedTotalKey(t *testing.T) {
tests := []struct {
qname string
want string
}{
{"default", "asynq:{default}:processed"},
{"custom", "asynq:{custom}:processed"},
}
for _, tc := range tests {
got := ProcessedTotalKey(tc.qname)
if got != tc.want {
t.Errorf("ProcessedTotalKey(%q) = %q, want %q", tc.qname, got, tc.want)
}
}
}
func TestFailedTotalKey(t *testing.T) {
tests := []struct {
qname string
want string
}{
{"default", "asynq:{default}:failed"},
{"custom", "asynq:{custom}:failed"},
}
for _, tc := range tests {
got := FailedTotalKey(tc.qname)
if got != tc.want {
t.Errorf("FailedTotalKey(%q) = %q, want %q", tc.qname, got, tc.want)
}
}
}
func TestProcessedKey(t *testing.T) { func TestProcessedKey(t *testing.T) {
tests := []struct { tests := []struct {
qname string qname string
@@ -247,52 +321,69 @@ func TestSchedulerHistoryKey(t *testing.T) {
} }
} }
func toBytes(m map[string]interface{}) []byte {
b, err := json.Marshal(m)
if err != nil {
panic(err)
}
return b
}
func TestUniqueKey(t *testing.T) { func TestUniqueKey(t *testing.T) {
payload1 := toBytes(map[string]interface{}{"a": 123, "b": "hello", "c": true})
payload2 := toBytes(map[string]interface{}{"b": "hello", "c": true, "a": 123})
payload3 := toBytes(map[string]interface{}{
"address": map[string]string{"line": "123 Main St", "city": "Boston", "state": "MA"},
"names": []string{"bob", "mike", "rob"}})
payload4 := toBytes(map[string]interface{}{
"time": time.Date(2020, time.July, 28, 0, 0, 0, 0, time.UTC),
"duration": time.Hour})
checksum := func(data []byte) string {
sum := md5.Sum(data)
return hex.EncodeToString(sum[:])
}
tests := []struct { tests := []struct {
desc string desc string
qname string qname string
tasktype string tasktype string
payload map[string]interface{} payload []byte
want string want string
}{ }{
{ {
"with primitive types", "with primitive types",
"default", "default",
"email:send", "email:send",
map[string]interface{}{"a": 123, "b": "hello", "c": true}, payload1,
"asynq:{default}:unique:email:send:a=123,b=hello,c=true", fmt.Sprintf("asynq:{default}:unique:email:send:%s", checksum(payload1)),
}, },
{ {
"with unsorted keys", "with unsorted keys",
"default", "default",
"email:send", "email:send",
map[string]interface{}{"b": "hello", "c": true, "a": 123}, payload2,
"asynq:{default}:unique:email:send:a=123,b=hello,c=true", fmt.Sprintf("asynq:{default}:unique:email:send:%s", checksum(payload2)),
}, },
{ {
"with composite types", "with composite types",
"default", "default",
"email:send", "email:send",
map[string]interface{}{ payload3,
"address": map[string]string{"line": "123 Main St", "city": "Boston", "state": "MA"}, fmt.Sprintf("asynq:{default}:unique:email:send:%s", checksum(payload3)),
"names": []string{"bob", "mike", "rob"}},
"asynq:{default}:unique:email:send:address=map[city:Boston line:123 Main St state:MA],names=[bob mike rob]",
}, },
{ {
"with complex types", "with complex types",
"default", "default",
"email:send", "email:send",
map[string]interface{}{ payload4,
"time": time.Date(2020, time.July, 28, 0, 0, 0, 0, time.UTC), fmt.Sprintf("asynq:{default}:unique:email:send:%s", checksum(payload4)),
"duration": time.Hour},
"asynq:{default}:unique:email:send:duration=1h0m0s,time=2020-07-28 00:00:00 +0000 UTC",
}, },
{ {
"with nil payload", "with nil payload",
"default", "default",
"reindex", "reindex",
nil, nil,
"asynq:{default}:unique:reindex:nil", "asynq:{default}:unique:reindex:",
}, },
} }
@@ -305,31 +396,33 @@ func TestUniqueKey(t *testing.T) {
} }
func TestMessageEncoding(t *testing.T) { func TestMessageEncoding(t *testing.T) {
id := uuid.New() id := uuid.NewString()
tests := []struct { tests := []struct {
in *TaskMessage in *TaskMessage
out *TaskMessage out *TaskMessage
}{ }{
{ {
in: &TaskMessage{ in: &TaskMessage{
Type: "task1", Type: "task1",
Payload: map[string]interface{}{"a": 1, "b": "hello!", "c": true}, Payload: toBytes(map[string]interface{}{"a": 1, "b": "hello!", "c": true}),
ID: id, ID: id,
Queue: "default", Queue: "default",
Retry: 10, Retry: 10,
Retried: 0, Retried: 0,
Timeout: 1800, Timeout: 1800,
Deadline: 1692311100, Deadline: 1692311100,
Retention: 3600,
}, },
out: &TaskMessage{ out: &TaskMessage{
Type: "task1", Type: "task1",
Payload: map[string]interface{}{"a": json.Number("1"), "b": "hello!", "c": true}, Payload: toBytes(map[string]interface{}{"a": json.Number("1"), "b": "hello!", "c": true}),
ID: id, ID: id,
Queue: "default", Queue: "default",
Retry: 10, Retry: 10,
Retried: 0, Retried: 0,
Timeout: 1800, Timeout: 1800,
Deadline: 1692311100, Deadline: 1692311100,
Retention: 3600,
}, },
}, },
} }
@@ -352,28 +445,143 @@ func TestMessageEncoding(t *testing.T) {
} }
} }
// Test for status being accessed by multiple goroutines. func TestServerInfoEncoding(t *testing.T) {
// Run with -race flag to check for data race. tests := []struct {
func TestStatusConcurrentAccess(t *testing.T) { info ServerInfo
status := NewServerStatus(StatusIdle) }{
{
info: ServerInfo{
Host: "127.0.0.1",
PID: 9876,
ServerID: "abc123",
Concurrency: 10,
Queues: map[string]int{"default": 1, "critical": 2},
StrictPriority: false,
Status: "active",
Started: time.Now().Add(-3 * time.Hour),
ActiveWorkerCount: 8,
},
},
}
var wg sync.WaitGroup for _, tc := range tests {
encoded, err := EncodeServerInfo(&tc.info)
if err != nil {
t.Errorf("EncodeServerInfo(info) returned error: %v", err)
continue
}
decoded, err := DecodeServerInfo(encoded)
if err != nil {
t.Errorf("DecodeServerInfo(encoded) returned error: %v", err)
continue
}
if diff := cmp.Diff(&tc.info, decoded); diff != "" {
t.Errorf("Decoded ServerInfo == %+v, want %+v;(-want,+got)\n%s",
decoded, tc.info, diff)
}
}
}
wg.Add(1) func TestWorkerInfoEncoding(t *testing.T) {
go func() { tests := []struct {
defer wg.Done() info WorkerInfo
status.Get() }{
_ = status.String() {
}() info: WorkerInfo{
Host: "127.0.0.1",
PID: 9876,
ServerID: "abc123",
ID: uuid.NewString(),
Type: "taskA",
Payload: toBytes(map[string]interface{}{"foo": "bar"}),
Queue: "default",
Started: time.Now().Add(-3 * time.Hour),
Deadline: time.Now().Add(30 * time.Second),
},
},
}
wg.Add(1) for _, tc := range tests {
go func() { encoded, err := EncodeWorkerInfo(&tc.info)
defer wg.Done() if err != nil {
status.Set(StatusStopped) t.Errorf("EncodeWorkerInfo(info) returned error: %v", err)
_ = status.String() continue
}() }
decoded, err := DecodeWorkerInfo(encoded)
if err != nil {
t.Errorf("DecodeWorkerInfo(encoded) returned error: %v", err)
continue
}
if diff := cmp.Diff(&tc.info, decoded); diff != "" {
t.Errorf("Decoded WorkerInfo == %+v, want %+v;(-want,+got)\n%s",
decoded, tc.info, diff)
}
}
}
wg.Wait() func TestSchedulerEntryEncoding(t *testing.T) {
tests := []struct {
entry SchedulerEntry
}{
{
entry: SchedulerEntry{
ID: uuid.NewString(),
Spec: "* * * * *",
Type: "task_A",
Payload: toBytes(map[string]interface{}{"foo": "bar"}),
Opts: []string{"Queue('email')"},
Next: time.Now().Add(30 * time.Second).UTC(),
Prev: time.Now().Add(-2 * time.Minute).UTC(),
},
},
}
for _, tc := range tests {
encoded, err := EncodeSchedulerEntry(&tc.entry)
if err != nil {
t.Errorf("EncodeSchedulerEntry(entry) returned error: %v", err)
continue
}
decoded, err := DecodeSchedulerEntry(encoded)
if err != nil {
t.Errorf("DecodeSchedulerEntry(encoded) returned error: %v", err)
continue
}
if diff := cmp.Diff(&tc.entry, decoded); diff != "" {
t.Errorf("Decoded SchedulerEntry == %+v, want %+v;(-want,+got)\n%s",
decoded, tc.entry, diff)
}
}
}
func TestSchedulerEnqueueEventEncoding(t *testing.T) {
tests := []struct {
event SchedulerEnqueueEvent
}{
{
event: SchedulerEnqueueEvent{
TaskID: uuid.NewString(),
EnqueuedAt: time.Now().Add(-30 * time.Second).UTC(),
},
},
}
for _, tc := range tests {
encoded, err := EncodeSchedulerEnqueueEvent(&tc.event)
if err != nil {
t.Errorf("EncodeSchedulerEnqueueEvent(event) returned error: %v", err)
continue
}
decoded, err := DecodeSchedulerEnqueueEvent(encoded)
if err != nil {
t.Errorf("DecodeSchedulerEnqueueEvent(encoded) returned error: %v", err)
continue
}
if diff := cmp.Diff(&tc.event, decoded); diff != "" {
t.Errorf("Decoded SchedulerEnqueueEvent == %+v, want %+v;(-want,+got)\n%s",
decoded, tc.event, diff)
}
}
} }
// Test for cancelations being accessed by multiple goroutines. // Test for cancelations being accessed by multiple goroutines.
@@ -420,3 +628,75 @@ func TestCancelationsConcurrentAccess(t *testing.T) {
t.Errorf("(*Cancelations).Get(%q) = _, true, want <nil>, false", key2) t.Errorf("(*Cancelations).Get(%q) = _, true, want <nil>, false", key2)
} }
} }
func TestLeaseReset(t *testing.T) {
now := time.Now()
clock := timeutil.NewSimulatedClock(now)
l := NewLease(now.Add(30 * time.Second))
l.Clock = clock
// Check initial state
if !l.IsValid() {
t.Errorf("lease should be valid when expiration is set to a future time")
}
if want := now.Add(30 * time.Second); l.Deadline() != want {
t.Errorf("Lease.Deadline() = %v, want %v", l.Deadline(), want)
}
// Test Reset
if !l.Reset(now.Add(45 * time.Second)) {
t.Fatalf("Lease.Reset returned false when extending")
}
if want := now.Add(45 * time.Second); l.Deadline() != want {
t.Errorf("After Reset: Lease.Deadline() = %v, want %v", l.Deadline(), want)
}
clock.AdvanceTime(1 * time.Minute) // simulate lease expiration
if l.IsValid() {
t.Errorf("lease should be invalid after expiration")
}
// Reset should return false if lease is expired.
if l.Reset(time.Now().Add(20 * time.Second)) {
t.Errorf("Lease.Reset should return false after expiration")
}
}
func TestLeaseNotifyExpiration(t *testing.T) {
now := time.Now()
clock := timeutil.NewSimulatedClock(now)
l := NewLease(now.Add(30 * time.Second))
l.Clock = clock
select {
case <-l.Done():
t.Fatalf("Lease.Done() did not block")
default:
}
if l.NotifyExpiration() {
t.Fatalf("Lease.NotifyExpiration() should return false when lease is still valid")
}
clock.AdvanceTime(1 * time.Minute) // simulate lease expiration
if l.IsValid() {
t.Errorf("Lease should be invalid after expiration")
}
if !l.NotifyExpiration() {
t.Errorf("Lease.NotifyExpiration() return return true after expiration")
}
if !l.NotifyExpiration() {
t.Errorf("It should be leagal to call Lease.NotifyExpiration multiple times")
}
select {
case <-l.Done():
// expected
default:
t.Errorf("Lease.Done() blocked after call to Lease.NotifyExpiration()")
}
}

View File

@@ -0,0 +1,87 @@
// Copyright 2020 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
package context
import (
"context"
"time"
"github.com/hibiken/asynq/internal/base"
)
// A taskMetadata holds task scoped data to put in context.
type taskMetadata struct {
id string
maxRetry int
retryCount int
qname string
}
// ctxKey type is unexported to prevent collisions with context keys defined in
// other packages.
type ctxKey int
// metadataCtxKey is the context key for the task metadata.
// Its value of zero is arbitrary.
const metadataCtxKey ctxKey = 0
// New returns a context and cancel function for a given task message.
func New(base context.Context, msg *base.TaskMessage, deadline time.Time) (context.Context, context.CancelFunc) {
metadata := taskMetadata{
id: msg.ID,
maxRetry: msg.Retry,
retryCount: msg.Retried,
qname: msg.Queue,
}
ctx := context.WithValue(base, metadataCtxKey, metadata)
return context.WithDeadline(ctx, deadline)
}
// GetTaskID extracts a task ID from a context, if any.
//
// ID of a task is guaranteed to be unique.
// ID of a task doesn't change if the task is being retried.
func GetTaskID(ctx context.Context) (id string, ok bool) {
metadata, ok := ctx.Value(metadataCtxKey).(taskMetadata)
if !ok {
return "", false
}
return metadata.id, true
}
// GetRetryCount extracts retry count from a context, if any.
//
// Return value n indicates the number of times associated task has been
// retried so far.
func GetRetryCount(ctx context.Context) (n int, ok bool) {
metadata, ok := ctx.Value(metadataCtxKey).(taskMetadata)
if !ok {
return 0, false
}
return metadata.retryCount, true
}
// GetMaxRetry extracts maximum retry from a context, if any.
//
// Return value n indicates the maximum number of times the assoicated task
// can be retried if ProcessTask returns a non-nil error.
func GetMaxRetry(ctx context.Context) (n int, ok bool) {
metadata, ok := ctx.Value(metadataCtxKey).(taskMetadata)
if !ok {
return 0, false
}
return metadata.maxRetry, true
}
// GetQueueName extracts queue name from a context, if any.
//
// Return value qname indicates which queue the task was pulled from.
func GetQueueName(ctx context.Context) (qname string, ok bool) {
metadata, ok := ctx.Value(metadataCtxKey).(taskMetadata)
if !ok {
return "", false
}
return metadata.qname, true
}

View File

@@ -2,10 +2,11 @@
// Use of this source code is governed by a MIT license // Use of this source code is governed by a MIT license
// that can be found in the LICENSE file. // that can be found in the LICENSE file.
package asynq package context
import ( import (
"context" "context"
"fmt"
"testing" "testing"
"time" "time"
@@ -24,12 +25,11 @@ func TestCreateContextWithFutureDeadline(t *testing.T) {
for _, tc := range tests { for _, tc := range tests {
msg := &base.TaskMessage{ msg := &base.TaskMessage{
Type: "something", Type: "something",
ID: uuid.New(), ID: uuid.NewString(),
Payload: nil, Payload: nil,
} }
ctx, cancel := createContext(msg, tc.deadline) ctx, cancel := New(context.Background(), msg, tc.deadline)
select { select {
case x := <-ctx.Done(): case x := <-ctx.Done():
t.Errorf("<-ctx.Done() == %v, want nothing (it should block)", x) t.Errorf("<-ctx.Done() == %v, want nothing (it should block)", x)
@@ -54,6 +54,53 @@ func TestCreateContextWithFutureDeadline(t *testing.T) {
} }
} }
func TestCreateContextWithBaseContext(t *testing.T) {
type ctxKey string
type ctxValue string
var key ctxKey = "key"
var value ctxValue = "value"
tests := []struct {
baseCtx context.Context
validate func(ctx context.Context, t *testing.T) error
}{
{
baseCtx: context.WithValue(context.Background(), key, value),
validate: func(ctx context.Context, t *testing.T) error {
got, ok := ctx.Value(key).(ctxValue)
if !ok {
return fmt.Errorf("ctx.Value().(ctxValue) returned false, expected to be true")
}
if want := value; got != want {
return fmt.Errorf("ctx.Value().(ctxValue) returned unknown value (%v), expected to be %s", got, value)
}
return nil
},
},
}
for _, tc := range tests {
msg := &base.TaskMessage{
Type: "something",
ID: uuid.NewString(),
Payload: nil,
}
ctx, cancel := New(tc.baseCtx, msg, time.Now().Add(30*time.Minute))
defer cancel()
select {
case x := <-ctx.Done():
t.Errorf("<-ctx.Done() == %v, want nothing (it should block)", x)
default:
}
if err := tc.validate(ctx, t); err != nil {
t.Errorf("%v", err)
}
}
}
func TestCreateContextWithPastDeadline(t *testing.T) { func TestCreateContextWithPastDeadline(t *testing.T) {
tests := []struct { tests := []struct {
deadline time.Time deadline time.Time
@@ -64,11 +111,11 @@ func TestCreateContextWithPastDeadline(t *testing.T) {
for _, tc := range tests { for _, tc := range tests {
msg := &base.TaskMessage{ msg := &base.TaskMessage{
Type: "something", Type: "something",
ID: uuid.New(), ID: uuid.NewString(),
Payload: nil, Payload: nil,
} }
ctx, cancel := createContext(msg, tc.deadline) ctx, cancel := New(context.Background(), msg, tc.deadline)
defer cancel() defer cancel()
select { select {
@@ -92,21 +139,21 @@ func TestGetTaskMetadataFromContext(t *testing.T) {
desc string desc string
msg *base.TaskMessage msg *base.TaskMessage
}{ }{
{"with zero retried message", &base.TaskMessage{Type: "something", ID: uuid.New(), Retry: 25, Retried: 0, Timeout: 1800, Queue: "default"}}, {"with zero retried message", &base.TaskMessage{Type: "something", ID: uuid.NewString(), Retry: 25, Retried: 0, Timeout: 1800, Queue: "default"}},
{"with non-zero retried message", &base.TaskMessage{Type: "something", ID: uuid.New(), Retry: 10, Retried: 5, Timeout: 1800, Queue: "default"}}, {"with non-zero retried message", &base.TaskMessage{Type: "something", ID: uuid.NewString(), Retry: 10, Retried: 5, Timeout: 1800, Queue: "default"}},
{"with custom queue name", &base.TaskMessage{Type: "something", ID: uuid.New(), Retry: 25, Retried: 0, Timeout: 1800, Queue: "custom"}}, {"with custom queue name", &base.TaskMessage{Type: "something", ID: uuid.NewString(), Retry: 25, Retried: 0, Timeout: 1800, Queue: "custom"}},
} }
for _, tc := range tests { for _, tc := range tests {
ctx, cancel := createContext(tc.msg, time.Now().Add(30*time.Minute)) ctx, cancel := New(context.Background(), tc.msg, time.Now().Add(30*time.Minute))
defer cancel() defer cancel()
id, ok := GetTaskID(ctx) id, ok := GetTaskID(ctx)
if !ok { if !ok {
t.Errorf("%s: GetTaskID(ctx) returned ok == false", tc.desc) t.Errorf("%s: GetTaskID(ctx) returned ok == false", tc.desc)
} }
if ok && id != tc.msg.ID.String() { if ok && id != tc.msg.ID {
t.Errorf("%s: GetTaskID(ctx) returned id == %q, want %q", tc.desc, id, tc.msg.ID.String()) t.Errorf("%s: GetTaskID(ctx) returned id == %q, want %q", tc.desc, id, tc.msg.ID)
} }
retried, ok := GetRetryCount(ctx) retried, ok := GetRetryCount(ctx)

288
internal/errors/errors.go Normal file
View File

@@ -0,0 +1,288 @@
// Copyright 2020 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
// Package errors defines the error type and functions used by
// asynq and its internal packages.
package errors
// Note: This package is inspired by a blog post about error handling in project Upspin
// https://commandcenter.blogspot.com/2017/12/error-handling-in-upspin.html.
import (
"errors"
"fmt"
"log"
"runtime"
"strings"
)
// Error is the type that implements the error interface.
// It contains a number of fields, each of different type.
// An Error value may leave some values unset.
type Error struct {
Code Code
Op Op
Err error
}
func (e *Error) DebugString() string {
var b strings.Builder
if e.Op != "" {
b.WriteString(string(e.Op))
}
if e.Code != Unspecified {
if b.Len() > 0 {
b.WriteString(": ")
}
b.WriteString(e.Code.String())
}
if e.Err != nil {
if b.Len() > 0 {
b.WriteString(": ")
}
b.WriteString(e.Err.Error())
}
return b.String()
}
func (e *Error) Error() string {
var b strings.Builder
if e.Code != Unspecified {
b.WriteString(e.Code.String())
}
if e.Err != nil {
if b.Len() > 0 {
b.WriteString(": ")
}
b.WriteString(e.Err.Error())
}
return b.String()
}
func (e *Error) Unwrap() error {
return e.Err
}
// Code defines the canonical error code.
type Code uint8
// List of canonical error codes.
const (
Unspecified Code = iota
NotFound
FailedPrecondition
Internal
AlreadyExists
Unknown
// Note: If you add a new value here, make sure to update String method.
)
func (c Code) String() string {
switch c {
case Unspecified:
return "ERROR_CODE_UNSPECIFIED"
case NotFound:
return "NOT_FOUND"
case FailedPrecondition:
return "FAILED_PRECONDITION"
case Internal:
return "INTERNAL_ERROR"
case AlreadyExists:
return "ALREADY_EXISTS"
case Unknown:
return "UNKNOWN"
}
panic(fmt.Sprintf("unknown error code %d", c))
}
// Op describes an operation, usually as the package and method,
// such as "rdb.Enqueue".
type Op string
// E builds an error value from its arguments.
// There must be at least one argument or E panics.
// The type of each argument determines its meaning.
// If more than one argument of a given type is presented,
// only the last one is recorded.
//
// The types are:
// errors.Op
// The operation being performed, usually the method
// being invoked (Get, Put, etc.).
// errors.Code
// The canonical error code, such as NOT_FOUND.
// string
// Treated as an error message and assigned to the
// Err field after a call to errors.New.
// error
// The underlying error that triggered this one.
//
// If the error is printed, only those items that have been
// set to non-zero values will appear in the result.
func E(args ...interface{}) error {
if len(args) == 0 {
panic("call to errors.E with no arguments")
}
e := &Error{}
for _, arg := range args {
switch arg := arg.(type) {
case Op:
e.Op = arg
case Code:
e.Code = arg
case error:
e.Err = arg
case string:
e.Err = errors.New(arg)
default:
_, file, line, _ := runtime.Caller(1)
log.Printf("errors.E: bad call from %s:%d: %v", file, line, args)
return fmt.Errorf("unknown type %T, value %v in error call", arg, arg)
}
}
return e
}
// CanonicalCode returns the canonical code of the given error if one is present.
// Otherwise it returns Unspecified.
func CanonicalCode(err error) Code {
if err == nil {
return Unspecified
}
e, ok := err.(*Error)
if !ok {
return Unspecified
}
if e.Code == Unspecified {
return CanonicalCode(e.Err)
}
return e.Code
}
/******************************************
Domin Specific Error Types & Values
*******************************************/
var (
// ErrNoProcessableTask indicates that there are no tasks ready to be processed.
ErrNoProcessableTask = errors.New("no tasks are ready for processing")
// ErrDuplicateTask indicates that another task with the same unique key holds the uniqueness lock.
ErrDuplicateTask = errors.New("task already exists")
// ErrTaskIdConflict indicates that another task with the same task ID already exist
ErrTaskIdConflict = errors.New("task id conflicts with another task")
)
// TaskNotFoundError indicates that a task with the given ID does not exist
// in the given queue.
type TaskNotFoundError struct {
Queue string // queue name
ID string // task id
}
func (e *TaskNotFoundError) Error() string {
return fmt.Sprintf("cannot find task with id=%s in queue %q", e.ID, e.Queue)
}
// IsTaskNotFound reports whether any error in err's chain is of type TaskNotFoundError.
func IsTaskNotFound(err error) bool {
var target *TaskNotFoundError
return As(err, &target)
}
// QueueNotFoundError indicates that a queue with the given name does not exist.
type QueueNotFoundError struct {
Queue string // queue name
}
func (e *QueueNotFoundError) Error() string {
return fmt.Sprintf("queue %q does not exist", e.Queue)
}
// IsQueueNotFound reports whether any error in err's chain is of type QueueNotFoundError.
func IsQueueNotFound(err error) bool {
var target *QueueNotFoundError
return As(err, &target)
}
// QueueNotEmptyError indicates that the given queue is not empty.
type QueueNotEmptyError struct {
Queue string // queue name
}
func (e *QueueNotEmptyError) Error() string {
return fmt.Sprintf("queue %q is not empty", e.Queue)
}
// IsQueueNotEmpty reports whether any error in err's chain is of type QueueNotEmptyError.
func IsQueueNotEmpty(err error) bool {
var target *QueueNotEmptyError
return As(err, &target)
}
// TaskAlreadyArchivedError indicates that the task in question is already archived.
type TaskAlreadyArchivedError struct {
Queue string // queue name
ID string // task id
}
func (e *TaskAlreadyArchivedError) Error() string {
return fmt.Sprintf("task is already archived: id=%s, queue=%s", e.ID, e.Queue)
}
// IsTaskAlreadyArchived reports whether any error in err's chain is of type TaskAlreadyArchivedError.
func IsTaskAlreadyArchived(err error) bool {
var target *TaskAlreadyArchivedError
return As(err, &target)
}
// RedisCommandError indicates that the given redis command returned error.
type RedisCommandError struct {
Command string // redis command (e.g. LRANGE, ZADD, etc)
Err error // underlying error
}
func (e *RedisCommandError) Error() string {
return fmt.Sprintf("redis command error: %s failed: %v", strings.ToUpper(e.Command), e.Err)
}
func (e *RedisCommandError) Unwrap() error { return e.Err }
// IsRedisCommandError reports whether any error in err's chain is of type RedisCommandError.
func IsRedisCommandError(err error) bool {
var target *RedisCommandError
return As(err, &target)
}
/*************************************************
Standard Library errors package functions
*************************************************/
// New returns an error that formats as the given text.
// Each call to New returns a distinct error value even if the text is identical.
//
// This function is the errors.New function from the standard libarary (https://golang.org/pkg/errors/#New).
// It is exported from this package for import convinience.
func New(text string) error { return errors.New(text) }
// Is reports whether any error in err's chain matches target.
//
// This function is the errors.Is function from the standard libarary (https://golang.org/pkg/errors/#Is).
// It is exported from this package for import convinience.
func Is(err, target error) bool { return errors.Is(err, target) }
// As finds the first error in err's chain that matches target, and if so, sets target to that error value and returns true.
// Otherwise, it returns false.
//
// This function is the errors.As function from the standard libarary (https://golang.org/pkg/errors/#As).
// It is exported from this package for import convinience.
func As(err error, target interface{}) bool { return errors.As(err, target) }
// Unwrap returns the result of calling the Unwrap method on err, if err's type contains an Unwrap method returning error.
// Otherwise, Unwrap returns nil.
//
// This function is the errors.Unwrap function from the standard libarary (https://golang.org/pkg/errors/#Unwrap).
// It is exported from this package for import convinience.
func Unwrap(err error) error { return errors.Unwrap(err) }

View File

@@ -0,0 +1,176 @@
// Copyright 2020 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
package errors
import "testing"
func TestErrorDebugString(t *testing.T) {
// DebugString should include Op since its meant to be used by
// maintainers/contributors of the asynq package.
tests := []struct {
desc string
err error
want string
}{
{
desc: "With Op, Code, and string",
err: E(Op("rdb.DeleteTask"), NotFound, "cannot find task with id=123"),
want: "rdb.DeleteTask: NOT_FOUND: cannot find task with id=123",
},
{
desc: "With Op, Code and error",
err: E(Op("rdb.DeleteTask"), NotFound, &TaskNotFoundError{Queue: "default", ID: "123"}),
want: `rdb.DeleteTask: NOT_FOUND: cannot find task with id=123 in queue "default"`,
},
}
for _, tc := range tests {
if got := tc.err.(*Error).DebugString(); got != tc.want {
t.Errorf("%s: got=%q, want=%q", tc.desc, got, tc.want)
}
}
}
func TestErrorString(t *testing.T) {
// String method should omit Op since op is an internal detail
// and we don't want to provide it to users of the package.
tests := []struct {
desc string
err error
want string
}{
{
desc: "With Op, Code, and string",
err: E(Op("rdb.DeleteTask"), NotFound, "cannot find task with id=123"),
want: "NOT_FOUND: cannot find task with id=123",
},
{
desc: "With Op, Code and error",
err: E(Op("rdb.DeleteTask"), NotFound, &TaskNotFoundError{Queue: "default", ID: "123"}),
want: `NOT_FOUND: cannot find task with id=123 in queue "default"`,
},
}
for _, tc := range tests {
if got := tc.err.Error(); got != tc.want {
t.Errorf("%s: got=%q, want=%q", tc.desc, got, tc.want)
}
}
}
func TestErrorIs(t *testing.T) {
var ErrCustom = New("custom sentinel error")
tests := []struct {
desc string
err error
target error
want bool
}{
{
desc: "should unwrap one level",
err: E(Op("rdb.DeleteTask"), ErrCustom),
target: ErrCustom,
want: true,
},
}
for _, tc := range tests {
if got := Is(tc.err, tc.target); got != tc.want {
t.Errorf("%s: got=%t, want=%t", tc.desc, got, tc.want)
}
}
}
func TestErrorAs(t *testing.T) {
tests := []struct {
desc string
err error
target interface{}
want bool
}{
{
desc: "should unwrap one level",
err: E(Op("rdb.DeleteTask"), NotFound, &QueueNotFoundError{Queue: "email"}),
target: &QueueNotFoundError{},
want: true,
},
}
for _, tc := range tests {
if got := As(tc.err, &tc.target); got != tc.want {
t.Errorf("%s: got=%t, want=%t", tc.desc, got, tc.want)
}
}
}
func TestErrorPredicates(t *testing.T) {
tests := []struct {
desc string
fn func(err error) bool
err error
want bool
}{
{
desc: "IsTaskNotFound should detect presence of TaskNotFoundError in err's chain",
fn: IsTaskNotFound,
err: E(Op("rdb.ArchiveTask"), NotFound, &TaskNotFoundError{Queue: "default", ID: "9876"}),
want: true,
},
{
desc: "IsTaskNotFound should detect absence of TaskNotFoundError in err's chain",
fn: IsTaskNotFound,
err: E(Op("rdb.ArchiveTask"), NotFound, &QueueNotFoundError{Queue: "default"}),
want: false,
},
{
desc: "IsQueueNotFound should detect presence of QueueNotFoundError in err's chain",
fn: IsQueueNotFound,
err: E(Op("rdb.ArchiveTask"), NotFound, &QueueNotFoundError{Queue: "default"}),
want: true,
},
}
for _, tc := range tests {
if got := tc.fn(tc.err); got != tc.want {
t.Errorf("%s: got=%t, want=%t", tc.desc, got, tc.want)
}
}
}
func TestCanonicalCode(t *testing.T) {
tests := []struct {
desc string
err error
want Code
}{
{
desc: "without nesting",
err: E(Op("rdb.DeleteTask"), NotFound, &TaskNotFoundError{Queue: "default", ID: "123"}),
want: NotFound,
},
{
desc: "with nesting",
err: E(FailedPrecondition, E(NotFound)),
want: FailedPrecondition,
},
{
desc: "returns Unspecified if err is not *Error",
err: New("some other error"),
want: Unspecified,
},
{
desc: "returns Unspecified if err is nil",
err: nil,
want: Unspecified,
},
}
for _, tc := range tests {
if got := CanonicalCode(tc.err); got != tc.want {
t.Errorf("%s: got=%s, want=%s", tc.desc, got, tc.want)
}
}
}

838
internal/proto/asynq.pb.go Normal file
View File

@@ -0,0 +1,838 @@
// Copyright 2020 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.25.0
// protoc v3.17.3
// source: asynq.proto
package proto
import (
proto "github.com/golang/protobuf/proto"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// This is a compile-time assertion that a sufficiently up-to-date version
// of the legacy proto package is being used.
const _ = proto.ProtoPackageIsVersion4
// TaskMessage is the internal representation of a task with additional
// metadata fields.
type TaskMessage struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Type indicates the kind of the task to be performed.
Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
// Payload holds data needed to process the task.
Payload []byte `protobuf:"bytes,2,opt,name=payload,proto3" json:"payload,omitempty"`
// Unique identifier for the task.
Id string `protobuf:"bytes,3,opt,name=id,proto3" json:"id,omitempty"`
// Name of the queue to which this task belongs.
Queue string `protobuf:"bytes,4,opt,name=queue,proto3" json:"queue,omitempty"`
// Max number of retries for this task.
Retry int32 `protobuf:"varint,5,opt,name=retry,proto3" json:"retry,omitempty"`
// Number of times this task has been retried so far.
Retried int32 `protobuf:"varint,6,opt,name=retried,proto3" json:"retried,omitempty"`
// Error message from the last failure.
ErrorMsg string `protobuf:"bytes,7,opt,name=error_msg,json=errorMsg,proto3" json:"error_msg,omitempty"`
// Time of last failure in Unix time,
// the number of seconds elapsed since January 1, 1970 UTC.
// Use zero to indicate no last failure.
LastFailedAt int64 `protobuf:"varint,11,opt,name=last_failed_at,json=lastFailedAt,proto3" json:"last_failed_at,omitempty"`
// Timeout specifies timeout in seconds.
// Use zero to indicate no timeout.
Timeout int64 `protobuf:"varint,8,opt,name=timeout,proto3" json:"timeout,omitempty"`
// Deadline specifies the deadline for the task in Unix time,
// the number of seconds elapsed since January 1, 1970 UTC.
// Use zero to indicate no deadline.
Deadline int64 `protobuf:"varint,9,opt,name=deadline,proto3" json:"deadline,omitempty"`
// UniqueKey holds the redis key used for uniqueness lock for this task.
// Empty string indicates that no uniqueness lock was used.
UniqueKey string `protobuf:"bytes,10,opt,name=unique_key,json=uniqueKey,proto3" json:"unique_key,omitempty"`
// Retention period specified in a number of seconds.
// The task will be stored in redis as a completed task until the TTL
// expires.
Retention int64 `protobuf:"varint,12,opt,name=retention,proto3" json:"retention,omitempty"`
// Time when the task completed in success in Unix time,
// the number of seconds elapsed since January 1, 1970 UTC.
// This field is populated if result_ttl > 0 upon completion.
CompletedAt int64 `protobuf:"varint,13,opt,name=completed_at,json=completedAt,proto3" json:"completed_at,omitempty"`
}
func (x *TaskMessage) Reset() {
*x = TaskMessage{}
if protoimpl.UnsafeEnabled {
mi := &file_asynq_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *TaskMessage) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TaskMessage) ProtoMessage() {}
func (x *TaskMessage) ProtoReflect() protoreflect.Message {
mi := &file_asynq_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TaskMessage.ProtoReflect.Descriptor instead.
func (*TaskMessage) Descriptor() ([]byte, []int) {
return file_asynq_proto_rawDescGZIP(), []int{0}
}
func (x *TaskMessage) GetType() string {
if x != nil {
return x.Type
}
return ""
}
func (x *TaskMessage) GetPayload() []byte {
if x != nil {
return x.Payload
}
return nil
}
func (x *TaskMessage) GetId() string {
if x != nil {
return x.Id
}
return ""
}
func (x *TaskMessage) GetQueue() string {
if x != nil {
return x.Queue
}
return ""
}
func (x *TaskMessage) GetRetry() int32 {
if x != nil {
return x.Retry
}
return 0
}
func (x *TaskMessage) GetRetried() int32 {
if x != nil {
return x.Retried
}
return 0
}
func (x *TaskMessage) GetErrorMsg() string {
if x != nil {
return x.ErrorMsg
}
return ""
}
func (x *TaskMessage) GetLastFailedAt() int64 {
if x != nil {
return x.LastFailedAt
}
return 0
}
func (x *TaskMessage) GetTimeout() int64 {
if x != nil {
return x.Timeout
}
return 0
}
func (x *TaskMessage) GetDeadline() int64 {
if x != nil {
return x.Deadline
}
return 0
}
func (x *TaskMessage) GetUniqueKey() string {
if x != nil {
return x.UniqueKey
}
return ""
}
func (x *TaskMessage) GetRetention() int64 {
if x != nil {
return x.Retention
}
return 0
}
func (x *TaskMessage) GetCompletedAt() int64 {
if x != nil {
return x.CompletedAt
}
return 0
}
// ServerInfo holds information about a running server.
type ServerInfo struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Host machine the server is running on.
Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"`
// PID of the server process.
Pid int32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"`
// Unique identifier for this server.
ServerId string `protobuf:"bytes,3,opt,name=server_id,json=serverId,proto3" json:"server_id,omitempty"`
// Maximum number of concurrency this server will use.
Concurrency int32 `protobuf:"varint,4,opt,name=concurrency,proto3" json:"concurrency,omitempty"`
// List of queue names with their priorities.
// The server will consume tasks from the queues and prioritize
// queues with higher priority numbers.
Queues map[string]int32 `protobuf:"bytes,5,rep,name=queues,proto3" json:"queues,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"`
// If set, the server will always consume tasks from a queue with higher
// priority.
StrictPriority bool `protobuf:"varint,6,opt,name=strict_priority,json=strictPriority,proto3" json:"strict_priority,omitempty"`
// Status indicates the status of the server.
Status string `protobuf:"bytes,7,opt,name=status,proto3" json:"status,omitempty"`
// Time this server was started.
StartTime *timestamppb.Timestamp `protobuf:"bytes,8,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
// Number of workers currently processing tasks.
ActiveWorkerCount int32 `protobuf:"varint,9,opt,name=active_worker_count,json=activeWorkerCount,proto3" json:"active_worker_count,omitempty"`
}
func (x *ServerInfo) Reset() {
*x = ServerInfo{}
if protoimpl.UnsafeEnabled {
mi := &file_asynq_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ServerInfo) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ServerInfo) ProtoMessage() {}
func (x *ServerInfo) ProtoReflect() protoreflect.Message {
mi := &file_asynq_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ServerInfo.ProtoReflect.Descriptor instead.
func (*ServerInfo) Descriptor() ([]byte, []int) {
return file_asynq_proto_rawDescGZIP(), []int{1}
}
func (x *ServerInfo) GetHost() string {
if x != nil {
return x.Host
}
return ""
}
func (x *ServerInfo) GetPid() int32 {
if x != nil {
return x.Pid
}
return 0
}
func (x *ServerInfo) GetServerId() string {
if x != nil {
return x.ServerId
}
return ""
}
func (x *ServerInfo) GetConcurrency() int32 {
if x != nil {
return x.Concurrency
}
return 0
}
func (x *ServerInfo) GetQueues() map[string]int32 {
if x != nil {
return x.Queues
}
return nil
}
func (x *ServerInfo) GetStrictPriority() bool {
if x != nil {
return x.StrictPriority
}
return false
}
func (x *ServerInfo) GetStatus() string {
if x != nil {
return x.Status
}
return ""
}
func (x *ServerInfo) GetStartTime() *timestamppb.Timestamp {
if x != nil {
return x.StartTime
}
return nil
}
func (x *ServerInfo) GetActiveWorkerCount() int32 {
if x != nil {
return x.ActiveWorkerCount
}
return 0
}
// WorkerInfo holds information about a running worker.
type WorkerInfo struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Host matchine this worker is running on.
Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"`
// PID of the process in which this worker is running.
Pid int32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"`
// ID of the server in which this worker is running.
ServerId string `protobuf:"bytes,3,opt,name=server_id,json=serverId,proto3" json:"server_id,omitempty"`
// ID of the task this worker is processing.
TaskId string `protobuf:"bytes,4,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"`
// Type of the task this worker is processing.
TaskType string `protobuf:"bytes,5,opt,name=task_type,json=taskType,proto3" json:"task_type,omitempty"`
// Payload of the task this worker is processing.
TaskPayload []byte `protobuf:"bytes,6,opt,name=task_payload,json=taskPayload,proto3" json:"task_payload,omitempty"`
// Name of the queue the task the worker is processing belongs.
Queue string `protobuf:"bytes,7,opt,name=queue,proto3" json:"queue,omitempty"`
// Time this worker started processing the task.
StartTime *timestamppb.Timestamp `protobuf:"bytes,8,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
// Deadline by which the worker needs to complete processing
// the task. If worker exceeds the deadline, the task will fail.
Deadline *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=deadline,proto3" json:"deadline,omitempty"`
}
func (x *WorkerInfo) Reset() {
*x = WorkerInfo{}
if protoimpl.UnsafeEnabled {
mi := &file_asynq_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *WorkerInfo) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WorkerInfo) ProtoMessage() {}
func (x *WorkerInfo) ProtoReflect() protoreflect.Message {
mi := &file_asynq_proto_msgTypes[2]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WorkerInfo.ProtoReflect.Descriptor instead.
func (*WorkerInfo) Descriptor() ([]byte, []int) {
return file_asynq_proto_rawDescGZIP(), []int{2}
}
func (x *WorkerInfo) GetHost() string {
if x != nil {
return x.Host
}
return ""
}
func (x *WorkerInfo) GetPid() int32 {
if x != nil {
return x.Pid
}
return 0
}
func (x *WorkerInfo) GetServerId() string {
if x != nil {
return x.ServerId
}
return ""
}
func (x *WorkerInfo) GetTaskId() string {
if x != nil {
return x.TaskId
}
return ""
}
func (x *WorkerInfo) GetTaskType() string {
if x != nil {
return x.TaskType
}
return ""
}
func (x *WorkerInfo) GetTaskPayload() []byte {
if x != nil {
return x.TaskPayload
}
return nil
}
func (x *WorkerInfo) GetQueue() string {
if x != nil {
return x.Queue
}
return ""
}
func (x *WorkerInfo) GetStartTime() *timestamppb.Timestamp {
if x != nil {
return x.StartTime
}
return nil
}
func (x *WorkerInfo) GetDeadline() *timestamppb.Timestamp {
if x != nil {
return x.Deadline
}
return nil
}
// SchedulerEntry holds information about a periodic task registered
// with a scheduler.
type SchedulerEntry struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Identifier of the scheduler entry.
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
// Periodic schedule spec of the entry.
Spec string `protobuf:"bytes,2,opt,name=spec,proto3" json:"spec,omitempty"`
// Task type of the periodic task.
TaskType string `protobuf:"bytes,3,opt,name=task_type,json=taskType,proto3" json:"task_type,omitempty"`
// Task payload of the periodic task.
TaskPayload []byte `protobuf:"bytes,4,opt,name=task_payload,json=taskPayload,proto3" json:"task_payload,omitempty"`
// Options used to enqueue the periodic task.
EnqueueOptions []string `protobuf:"bytes,5,rep,name=enqueue_options,json=enqueueOptions,proto3" json:"enqueue_options,omitempty"`
// Next time the task will be enqueued.
NextEnqueueTime *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=next_enqueue_time,json=nextEnqueueTime,proto3" json:"next_enqueue_time,omitempty"`
// Last time the task was enqueued.
// Zero time if task was never enqueued.
PrevEnqueueTime *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=prev_enqueue_time,json=prevEnqueueTime,proto3" json:"prev_enqueue_time,omitempty"`
}
func (x *SchedulerEntry) Reset() {
*x = SchedulerEntry{}
if protoimpl.UnsafeEnabled {
mi := &file_asynq_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *SchedulerEntry) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SchedulerEntry) ProtoMessage() {}
func (x *SchedulerEntry) ProtoReflect() protoreflect.Message {
mi := &file_asynq_proto_msgTypes[3]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SchedulerEntry.ProtoReflect.Descriptor instead.
func (*SchedulerEntry) Descriptor() ([]byte, []int) {
return file_asynq_proto_rawDescGZIP(), []int{3}
}
func (x *SchedulerEntry) GetId() string {
if x != nil {
return x.Id
}
return ""
}
func (x *SchedulerEntry) GetSpec() string {
if x != nil {
return x.Spec
}
return ""
}
func (x *SchedulerEntry) GetTaskType() string {
if x != nil {
return x.TaskType
}
return ""
}
func (x *SchedulerEntry) GetTaskPayload() []byte {
if x != nil {
return x.TaskPayload
}
return nil
}
func (x *SchedulerEntry) GetEnqueueOptions() []string {
if x != nil {
return x.EnqueueOptions
}
return nil
}
func (x *SchedulerEntry) GetNextEnqueueTime() *timestamppb.Timestamp {
if x != nil {
return x.NextEnqueueTime
}
return nil
}
func (x *SchedulerEntry) GetPrevEnqueueTime() *timestamppb.Timestamp {
if x != nil {
return x.PrevEnqueueTime
}
return nil
}
// SchedulerEnqueueEvent holds information about an enqueue event
// by a scheduler.
type SchedulerEnqueueEvent struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// ID of the task that was enqueued.
TaskId string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"`
// Time the task was enqueued.
EnqueueTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=enqueue_time,json=enqueueTime,proto3" json:"enqueue_time,omitempty"`
}
func (x *SchedulerEnqueueEvent) Reset() {
*x = SchedulerEnqueueEvent{}
if protoimpl.UnsafeEnabled {
mi := &file_asynq_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *SchedulerEnqueueEvent) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SchedulerEnqueueEvent) ProtoMessage() {}
func (x *SchedulerEnqueueEvent) ProtoReflect() protoreflect.Message {
mi := &file_asynq_proto_msgTypes[4]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SchedulerEnqueueEvent.ProtoReflect.Descriptor instead.
func (*SchedulerEnqueueEvent) Descriptor() ([]byte, []int) {
return file_asynq_proto_rawDescGZIP(), []int{4}
}
func (x *SchedulerEnqueueEvent) GetTaskId() string {
if x != nil {
return x.TaskId
}
return ""
}
func (x *SchedulerEnqueueEvent) GetEnqueueTime() *timestamppb.Timestamp {
if x != nil {
return x.EnqueueTime
}
return nil
}
var File_asynq_proto protoreflect.FileDescriptor
var file_asynq_proto_rawDesc = []byte{
0x0a, 0x0b, 0x61, 0x73, 0x79, 0x6e, 0x71, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x61,
0x73, 0x79, 0x6e, 0x71, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xea, 0x02, 0x0a, 0x0b, 0x54, 0x61, 0x73, 0x6b, 0x4d, 0x65,
0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20,
0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79,
0x6c, 0x6f, 0x61, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c,
0x6f, 0x61, 0x64, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
0x02, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01,
0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x72, 0x65, 0x74,
0x72, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x72, 0x65, 0x74, 0x72, 0x79, 0x12,
0x18, 0x0a, 0x07, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x05,
0x52, 0x07, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x72, 0x72,
0x6f, 0x72, 0x5f, 0x6d, 0x73, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x72,
0x72, 0x6f, 0x72, 0x4d, 0x73, 0x67, 0x12, 0x24, 0x0a, 0x0e, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x66,
0x61, 0x69, 0x6c, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c,
0x6c, 0x61, 0x73, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x41, 0x74, 0x12, 0x18, 0x0a, 0x07,
0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x74,
0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69,
0x6e, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69,
0x6e, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x5f, 0x6b, 0x65, 0x79,
0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x4b, 0x65,
0x79, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0c,
0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12,
0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18,
0x0d, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64,
0x41, 0x74, 0x22, 0x8f, 0x03, 0x0a, 0x0a, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x49, 0x6e, 0x66,
0x6f, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01,
0x28, 0x05, 0x52, 0x03, 0x70, 0x69, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x65, 0x72, 0x76, 0x65,
0x72, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x72, 0x76,
0x65, 0x72, 0x49, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65,
0x6e, 0x63, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75,
0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x35, 0x0a, 0x06, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73,
0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x61, 0x73, 0x79, 0x6e, 0x71, 0x2e, 0x53,
0x65, 0x72, 0x76, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x51, 0x75, 0x65, 0x75, 0x65, 0x73,
0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x12, 0x27, 0x0a,
0x0f, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x5f, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79,
0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x50, 0x72,
0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73,
0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x39,
0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01,
0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09,
0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x13, 0x61, 0x63, 0x74,
0x69, 0x76, 0x65, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74,
0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x11, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x57, 0x6f,
0x72, 0x6b, 0x65, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x1a, 0x39, 0x0a, 0x0b, 0x51, 0x75, 0x65,
0x75, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18,
0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61,
0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
0x3a, 0x02, 0x38, 0x01, 0x22, 0xb1, 0x02, 0x0a, 0x0a, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49,
0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28,
0x09, 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x02,
0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x70, 0x69, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x65, 0x72,
0x76, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65,
0x72, 0x76, 0x65, 0x72, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69,
0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12,
0x1b, 0x0a, 0x09, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01,
0x28, 0x09, 0x52, 0x08, 0x74, 0x61, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x12, 0x21, 0x0a, 0x0c,
0x74, 0x61, 0x73, 0x6b, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x06, 0x20, 0x01,
0x28, 0x0c, 0x52, 0x0b, 0x74, 0x61, 0x73, 0x6b, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12,
0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05,
0x71, 0x75, 0x65, 0x75, 0x65, 0x12, 0x39, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74,
0x69, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65,
0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65,
0x12, 0x36, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x09, 0x20, 0x01,
0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08,
0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x22, 0xad, 0x02, 0x0a, 0x0e, 0x53, 0x63, 0x68,
0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x69,
0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x73,
0x70, 0x65, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x73, 0x70, 0x65, 0x63, 0x12,
0x1b, 0x0a, 0x09, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01,
0x28, 0x09, 0x52, 0x08, 0x74, 0x61, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x12, 0x21, 0x0a, 0x0c,
0x74, 0x61, 0x73, 0x6b, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x04, 0x20, 0x01,
0x28, 0x0c, 0x52, 0x0b, 0x74, 0x61, 0x73, 0x6b, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12,
0x27, 0x0a, 0x0f, 0x65, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f,
0x6e, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x65, 0x6e, 0x71, 0x75, 0x65, 0x75,
0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x46, 0x0a, 0x11, 0x6e, 0x65, 0x78, 0x74,
0x5f, 0x65, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20,
0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52,
0x0f, 0x6e, 0x65, 0x78, 0x74, 0x45, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x54, 0x69, 0x6d, 0x65,
0x12, 0x46, 0x0a, 0x11, 0x70, 0x72, 0x65, 0x76, 0x5f, 0x65, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65,
0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69,
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0f, 0x70, 0x72, 0x65, 0x76, 0x45, 0x6e, 0x71,
0x75, 0x65, 0x75, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x22, 0x6f, 0x0a, 0x15, 0x53, 0x63, 0x68, 0x65,
0x64, 0x75, 0x6c, 0x65, 0x72, 0x45, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x45, 0x76, 0x65, 0x6e,
0x74, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01,
0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12, 0x3d, 0x0a, 0x0c, 0x65, 0x6e,
0x71, 0x75, 0x65, 0x75, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x65, 0x6e,
0x71, 0x75, 0x65, 0x75, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74,
0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x69, 0x62, 0x69, 0x6b, 0x65, 0x6e, 0x2f,
0x61, 0x73, 0x79, 0x6e, 0x71, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_asynq_proto_rawDescOnce sync.Once
file_asynq_proto_rawDescData = file_asynq_proto_rawDesc
)
func file_asynq_proto_rawDescGZIP() []byte {
file_asynq_proto_rawDescOnce.Do(func() {
file_asynq_proto_rawDescData = protoimpl.X.CompressGZIP(file_asynq_proto_rawDescData)
})
return file_asynq_proto_rawDescData
}
var file_asynq_proto_msgTypes = make([]protoimpl.MessageInfo, 6)
var file_asynq_proto_goTypes = []interface{}{
(*TaskMessage)(nil), // 0: asynq.TaskMessage
(*ServerInfo)(nil), // 1: asynq.ServerInfo
(*WorkerInfo)(nil), // 2: asynq.WorkerInfo
(*SchedulerEntry)(nil), // 3: asynq.SchedulerEntry
(*SchedulerEnqueueEvent)(nil), // 4: asynq.SchedulerEnqueueEvent
nil, // 5: asynq.ServerInfo.QueuesEntry
(*timestamppb.Timestamp)(nil), // 6: google.protobuf.Timestamp
}
var file_asynq_proto_depIdxs = []int32{
5, // 0: asynq.ServerInfo.queues:type_name -> asynq.ServerInfo.QueuesEntry
6, // 1: asynq.ServerInfo.start_time:type_name -> google.protobuf.Timestamp
6, // 2: asynq.WorkerInfo.start_time:type_name -> google.protobuf.Timestamp
6, // 3: asynq.WorkerInfo.deadline:type_name -> google.protobuf.Timestamp
6, // 4: asynq.SchedulerEntry.next_enqueue_time:type_name -> google.protobuf.Timestamp
6, // 5: asynq.SchedulerEntry.prev_enqueue_time:type_name -> google.protobuf.Timestamp
6, // 6: asynq.SchedulerEnqueueEvent.enqueue_time:type_name -> google.protobuf.Timestamp
7, // [7:7] is the sub-list for method output_type
7, // [7:7] is the sub-list for method input_type
7, // [7:7] is the sub-list for extension type_name
7, // [7:7] is the sub-list for extension extendee
0, // [0:7] is the sub-list for field type_name
}
func init() { file_asynq_proto_init() }
func file_asynq_proto_init() {
if File_asynq_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_asynq_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*TaskMessage); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_asynq_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ServerInfo); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_asynq_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*WorkerInfo); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_asynq_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SchedulerEntry); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_asynq_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SchedulerEnqueueEvent); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_asynq_proto_rawDesc,
NumEnums: 0,
NumMessages: 6,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_asynq_proto_goTypes,
DependencyIndexes: file_asynq_proto_depIdxs,
MessageInfos: file_asynq_proto_msgTypes,
}.Build()
File_asynq_proto = out.File
file_asynq_proto_rawDesc = nil
file_asynq_proto_goTypes = nil
file_asynq_proto_depIdxs = nil
}

163
internal/proto/asynq.proto Normal file
View File

@@ -0,0 +1,163 @@
// Copyright 2020 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
syntax = "proto3";
package asynq;
import "google/protobuf/timestamp.proto";
option go_package = "github.com/hibiken/asynq/internal/proto";
// TaskMessage is the internal representation of a task with additional
// metadata fields.
message TaskMessage {
// Type indicates the kind of the task to be performed.
string type = 1;
// Payload holds data needed to process the task.
bytes payload = 2;
// Unique identifier for the task.
string id = 3;
// Name of the queue to which this task belongs.
string queue = 4;
// Max number of retries for this task.
int32 retry = 5;
// Number of times this task has been retried so far.
int32 retried = 6;
// Error message from the last failure.
string error_msg = 7;
// Time of last failure in Unix time,
// the number of seconds elapsed since January 1, 1970 UTC.
// Use zero to indicate no last failure.
int64 last_failed_at = 11;
// Timeout specifies timeout in seconds.
// Use zero to indicate no timeout.
int64 timeout = 8;
// Deadline specifies the deadline for the task in Unix time,
// the number of seconds elapsed since January 1, 1970 UTC.
// Use zero to indicate no deadline.
int64 deadline = 9;
// UniqueKey holds the redis key used for uniqueness lock for this task.
// Empty string indicates that no uniqueness lock was used.
string unique_key = 10;
// Retention period specified in a number of seconds.
// The task will be stored in redis as a completed task until the TTL
// expires.
int64 retention = 12;
// Time when the task completed in success in Unix time,
// the number of seconds elapsed since January 1, 1970 UTC.
// This field is populated if result_ttl > 0 upon completion.
int64 completed_at = 13;
};
// ServerInfo holds information about a running server.
message ServerInfo {
// Host machine the server is running on.
string host = 1;
// PID of the server process.
int32 pid = 2;
// Unique identifier for this server.
string server_id = 3;
// Maximum number of concurrency this server will use.
int32 concurrency = 4;
// List of queue names with their priorities.
// The server will consume tasks from the queues and prioritize
// queues with higher priority numbers.
map<string, int32> queues = 5;
// If set, the server will always consume tasks from a queue with higher
// priority.
bool strict_priority = 6;
// Status indicates the status of the server.
string status = 7;
// Time this server was started.
google.protobuf.Timestamp start_time = 8;
// Number of workers currently processing tasks.
int32 active_worker_count = 9;
};
// WorkerInfo holds information about a running worker.
message WorkerInfo {
// Host matchine this worker is running on.
string host = 1;
// PID of the process in which this worker is running.
int32 pid = 2;
// ID of the server in which this worker is running.
string server_id = 3;
// ID of the task this worker is processing.
string task_id = 4;
// Type of the task this worker is processing.
string task_type = 5;
// Payload of the task this worker is processing.
bytes task_payload = 6;
// Name of the queue the task the worker is processing belongs.
string queue = 7;
// Time this worker started processing the task.
google.protobuf.Timestamp start_time = 8;
// Deadline by which the worker needs to complete processing
// the task. If worker exceeds the deadline, the task will fail.
google.protobuf.Timestamp deadline = 9;
};
// SchedulerEntry holds information about a periodic task registered
// with a scheduler.
message SchedulerEntry {
// Identifier of the scheduler entry.
string id = 1;
// Periodic schedule spec of the entry.
string spec = 2;
// Task type of the periodic task.
string task_type = 3;
// Task payload of the periodic task.
bytes task_payload = 4;
// Options used to enqueue the periodic task.
repeated string enqueue_options = 5;
// Next time the task will be enqueued.
google.protobuf.Timestamp next_enqueue_time = 6;
// Last time the task was enqueued.
// Zero time if task was never enqueued.
google.protobuf.Timestamp prev_enqueue_time = 7;
};
// SchedulerEnqueueEvent holds information about an enqueue event
// by a scheduler.
message SchedulerEnqueueEvent {
// ID of the task that was enqueued.
string task_id = 1;
// Time the task was enqueued.
google.protobuf.Timestamp enqueue_time = 2;
};

View File

@@ -5,6 +5,7 @@
package rdb package rdb
import ( import (
"context"
"fmt" "fmt"
"testing" "testing"
"time" "time"
@@ -15,6 +16,7 @@ import (
func BenchmarkEnqueue(b *testing.B) { func BenchmarkEnqueue(b *testing.B) {
r := setup(b) r := setup(b)
ctx := context.Background()
msg := asynqtest.NewTaskMessage("task1", nil) msg := asynqtest.NewTaskMessage("task1", nil)
b.ResetTimer() b.ResetTimer()
@@ -23,7 +25,7 @@ func BenchmarkEnqueue(b *testing.B) {
asynqtest.FlushDB(b, r.client) asynqtest.FlushDB(b, r.client)
b.StartTimer() b.StartTimer()
if err := r.Enqueue(msg); err != nil { if err := r.Enqueue(ctx, msg); err != nil {
b.Fatalf("Enqueue failed: %v", err) b.Fatalf("Enqueue failed: %v", err)
} }
} }
@@ -31,6 +33,7 @@ func BenchmarkEnqueue(b *testing.B) {
func BenchmarkEnqueueUnique(b *testing.B) { func BenchmarkEnqueueUnique(b *testing.B) {
r := setup(b) r := setup(b)
ctx := context.Background()
msg := &base.TaskMessage{ msg := &base.TaskMessage{
Type: "task1", Type: "task1",
Payload: nil, Payload: nil,
@@ -45,7 +48,7 @@ func BenchmarkEnqueueUnique(b *testing.B) {
asynqtest.FlushDB(b, r.client) asynqtest.FlushDB(b, r.client)
b.StartTimer() b.StartTimer()
if err := r.EnqueueUnique(msg, uniqueTTL); err != nil { if err := r.EnqueueUnique(ctx, msg, uniqueTTL); err != nil {
b.Fatalf("EnqueueUnique failed: %v", err) b.Fatalf("EnqueueUnique failed: %v", err)
} }
} }
@@ -53,6 +56,7 @@ func BenchmarkEnqueueUnique(b *testing.B) {
func BenchmarkSchedule(b *testing.B) { func BenchmarkSchedule(b *testing.B) {
r := setup(b) r := setup(b)
ctx := context.Background()
msg := asynqtest.NewTaskMessage("task1", nil) msg := asynqtest.NewTaskMessage("task1", nil)
processAt := time.Now().Add(3 * time.Minute) processAt := time.Now().Add(3 * time.Minute)
b.ResetTimer() b.ResetTimer()
@@ -62,7 +66,7 @@ func BenchmarkSchedule(b *testing.B) {
asynqtest.FlushDB(b, r.client) asynqtest.FlushDB(b, r.client)
b.StartTimer() b.StartTimer()
if err := r.Schedule(msg, processAt); err != nil { if err := r.Schedule(ctx, msg, processAt); err != nil {
b.Fatalf("Schedule failed: %v", err) b.Fatalf("Schedule failed: %v", err)
} }
} }
@@ -70,6 +74,7 @@ func BenchmarkSchedule(b *testing.B) {
func BenchmarkScheduleUnique(b *testing.B) { func BenchmarkScheduleUnique(b *testing.B) {
r := setup(b) r := setup(b)
ctx := context.Background()
msg := &base.TaskMessage{ msg := &base.TaskMessage{
Type: "task1", Type: "task1",
Payload: nil, Payload: nil,
@@ -85,7 +90,7 @@ func BenchmarkScheduleUnique(b *testing.B) {
asynqtest.FlushDB(b, r.client) asynqtest.FlushDB(b, r.client)
b.StartTimer() b.StartTimer()
if err := r.ScheduleUnique(msg, processAt, uniqueTTL); err != nil { if err := r.ScheduleUnique(ctx, msg, processAt, uniqueTTL); err != nil {
b.Fatalf("EnqueueUnique failed: %v", err) b.Fatalf("EnqueueUnique failed: %v", err)
} }
} }
@@ -93,6 +98,7 @@ func BenchmarkScheduleUnique(b *testing.B) {
func BenchmarkDequeueSingleQueue(b *testing.B) { func BenchmarkDequeueSingleQueue(b *testing.B) {
r := setup(b) r := setup(b)
ctx := context.Background()
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
@@ -101,7 +107,7 @@ func BenchmarkDequeueSingleQueue(b *testing.B) {
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
m := asynqtest.NewTaskMessageWithQueue( m := asynqtest.NewTaskMessageWithQueue(
fmt.Sprintf("task%d", i), nil, base.DefaultQueueName) fmt.Sprintf("task%d", i), nil, base.DefaultQueueName)
if err := r.Enqueue(m); err != nil { if err := r.Enqueue(ctx, m); err != nil {
b.Fatalf("Enqueue failed: %v", err) b.Fatalf("Enqueue failed: %v", err)
} }
} }
@@ -116,6 +122,7 @@ func BenchmarkDequeueSingleQueue(b *testing.B) {
func BenchmarkDequeueMultipleQueues(b *testing.B) { func BenchmarkDequeueMultipleQueues(b *testing.B) {
qnames := []string{"critical", "default", "low"} qnames := []string{"critical", "default", "low"}
r := setup(b) r := setup(b)
ctx := context.Background()
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
@@ -125,7 +132,7 @@ func BenchmarkDequeueMultipleQueues(b *testing.B) {
for _, qname := range qnames { for _, qname := range qnames {
m := asynqtest.NewTaskMessageWithQueue( m := asynqtest.NewTaskMessageWithQueue(
fmt.Sprintf("%s_task%d", qname, i), nil, qname) fmt.Sprintf("%s_task%d", qname, i), nil, qname)
if err := r.Enqueue(m); err != nil { if err := r.Enqueue(ctx, m); err != nil {
b.Fatalf("Enqueue failed: %v", err) b.Fatalf("Enqueue failed: %v", err)
} }
} }
@@ -149,16 +156,17 @@ func BenchmarkDone(b *testing.B) {
{Message: m2, Score: time.Now().Add(20 * time.Second).Unix()}, {Message: m2, Score: time.Now().Add(20 * time.Second).Unix()},
{Message: m3, Score: time.Now().Add(30 * time.Second).Unix()}, {Message: m3, Score: time.Now().Add(30 * time.Second).Unix()},
} }
ctx := context.Background()
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
b.StopTimer() b.StopTimer()
asynqtest.FlushDB(b, r.client) asynqtest.FlushDB(b, r.client)
asynqtest.SeedActiveQueue(b, r.client, msgs, base.DefaultQueueName) asynqtest.SeedActiveQueue(b, r.client, msgs, base.DefaultQueueName)
asynqtest.SeedDeadlines(b, r.client, zs, base.DefaultQueueName) asynqtest.SeedLease(b, r.client, zs, base.DefaultQueueName)
b.StartTimer() b.StartTimer()
if err := r.Done(msgs[0]); err != nil { if err := r.Done(ctx, msgs[0]); err != nil {
b.Fatalf("Done failed: %v", err) b.Fatalf("Done failed: %v", err)
} }
} }
@@ -175,16 +183,17 @@ func BenchmarkRetry(b *testing.B) {
{Message: m2, Score: time.Now().Add(20 * time.Second).Unix()}, {Message: m2, Score: time.Now().Add(20 * time.Second).Unix()},
{Message: m3, Score: time.Now().Add(30 * time.Second).Unix()}, {Message: m3, Score: time.Now().Add(30 * time.Second).Unix()},
} }
ctx := context.Background()
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
b.StopTimer() b.StopTimer()
asynqtest.FlushDB(b, r.client) asynqtest.FlushDB(b, r.client)
asynqtest.SeedActiveQueue(b, r.client, msgs, base.DefaultQueueName) asynqtest.SeedActiveQueue(b, r.client, msgs, base.DefaultQueueName)
asynqtest.SeedDeadlines(b, r.client, zs, base.DefaultQueueName) asynqtest.SeedLease(b, r.client, zs, base.DefaultQueueName)
b.StartTimer() b.StartTimer()
if err := r.Retry(msgs[0], time.Now().Add(1*time.Minute), "error"); err != nil { if err := r.Retry(ctx, msgs[0], time.Now().Add(1*time.Minute), "error", true /*isFailure*/); err != nil {
b.Fatalf("Retry failed: %v", err) b.Fatalf("Retry failed: %v", err)
} }
} }
@@ -201,16 +210,17 @@ func BenchmarkArchive(b *testing.B) {
{Message: m2, Score: time.Now().Add(20 * time.Second).Unix()}, {Message: m2, Score: time.Now().Add(20 * time.Second).Unix()},
{Message: m3, Score: time.Now().Add(30 * time.Second).Unix()}, {Message: m3, Score: time.Now().Add(30 * time.Second).Unix()},
} }
ctx := context.Background()
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
b.StopTimer() b.StopTimer()
asynqtest.FlushDB(b, r.client) asynqtest.FlushDB(b, r.client)
asynqtest.SeedActiveQueue(b, r.client, msgs, base.DefaultQueueName) asynqtest.SeedActiveQueue(b, r.client, msgs, base.DefaultQueueName)
asynqtest.SeedDeadlines(b, r.client, zs, base.DefaultQueueName) asynqtest.SeedLease(b, r.client, zs, base.DefaultQueueName)
b.StartTimer() b.StartTimer()
if err := r.Archive(msgs[0], "error"); err != nil { if err := r.Archive(ctx, msgs[0], "error"); err != nil {
b.Fatalf("Archive failed: %v", err) b.Fatalf("Archive failed: %v", err)
} }
} }
@@ -227,16 +237,17 @@ func BenchmarkRequeue(b *testing.B) {
{Message: m2, Score: time.Now().Add(20 * time.Second).Unix()}, {Message: m2, Score: time.Now().Add(20 * time.Second).Unix()},
{Message: m3, Score: time.Now().Add(30 * time.Second).Unix()}, {Message: m3, Score: time.Now().Add(30 * time.Second).Unix()},
} }
ctx := context.Background()
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
b.StopTimer() b.StopTimer()
asynqtest.FlushDB(b, r.client) asynqtest.FlushDB(b, r.client)
asynqtest.SeedActiveQueue(b, r.client, msgs, base.DefaultQueueName) asynqtest.SeedActiveQueue(b, r.client, msgs, base.DefaultQueueName)
asynqtest.SeedDeadlines(b, r.client, zs, base.DefaultQueueName) asynqtest.SeedLease(b, r.client, zs, base.DefaultQueueName)
b.StartTimer() b.StartTimer()
if err := r.Requeue(msgs[0]); err != nil { if err := r.Requeue(ctx, msgs[0]); err != nil {
b.Fatalf("Requeue failed: %v", err) b.Fatalf("Requeue failed: %v", err)
} }
} }
@@ -259,8 +270,8 @@ func BenchmarkCheckAndEnqueue(b *testing.B) {
asynqtest.SeedScheduledQueue(b, r.client, zs, base.DefaultQueueName) asynqtest.SeedScheduledQueue(b, r.client, zs, base.DefaultQueueName)
b.StartTimer() b.StartTimer()
if err := r.CheckAndEnqueue(base.DefaultQueueName); err != nil { if err := r.ForwardIfReady(base.DefaultQueueName); err != nil {
b.Fatalf("CheckAndEnqueue failed: %v", err) b.Fatalf("ForwardIfReady failed: %v", err)
} }
} }
} }

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -6,11 +6,12 @@
package testbroker package testbroker
import ( import (
"context"
"errors" "errors"
"sync" "sync"
"time" "time"
"github.com/go-redis/redis/v7" "github.com/go-redis/redis/v8"
"github.com/hibiken/asynq/internal/base" "github.com/hibiken/asynq/internal/base"
) )
@@ -45,22 +46,22 @@ func (tb *TestBroker) Wakeup() {
tb.sleeping = false tb.sleeping = false
} }
func (tb *TestBroker) Enqueue(msg *base.TaskMessage) error { func (tb *TestBroker) Enqueue(ctx context.Context, msg *base.TaskMessage) error {
tb.mu.Lock() tb.mu.Lock()
defer tb.mu.Unlock() defer tb.mu.Unlock()
if tb.sleeping { if tb.sleeping {
return errRedisDown return errRedisDown
} }
return tb.real.Enqueue(msg) return tb.real.Enqueue(ctx, msg)
} }
func (tb *TestBroker) EnqueueUnique(msg *base.TaskMessage, ttl time.Duration) error { func (tb *TestBroker) EnqueueUnique(ctx context.Context, msg *base.TaskMessage, ttl time.Duration) error {
tb.mu.Lock() tb.mu.Lock()
defer tb.mu.Unlock() defer tb.mu.Unlock()
if tb.sleeping { if tb.sleeping {
return errRedisDown return errRedisDown
} }
return tb.real.EnqueueUnique(msg, ttl) return tb.real.EnqueueUnique(ctx, msg, ttl)
} }
func (tb *TestBroker) Dequeue(qnames ...string) (*base.TaskMessage, time.Time, error) { func (tb *TestBroker) Dequeue(qnames ...string) (*base.TaskMessage, time.Time, error) {
@@ -72,76 +73,103 @@ func (tb *TestBroker) Dequeue(qnames ...string) (*base.TaskMessage, time.Time, e
return tb.real.Dequeue(qnames...) return tb.real.Dequeue(qnames...)
} }
func (tb *TestBroker) Done(msg *base.TaskMessage) error { func (tb *TestBroker) Done(ctx context.Context, msg *base.TaskMessage) error {
tb.mu.Lock() tb.mu.Lock()
defer tb.mu.Unlock() defer tb.mu.Unlock()
if tb.sleeping { if tb.sleeping {
return errRedisDown return errRedisDown
} }
return tb.real.Done(msg) return tb.real.Done(ctx, msg)
} }
func (tb *TestBroker) Requeue(msg *base.TaskMessage) error { func (tb *TestBroker) MarkAsComplete(ctx context.Context, msg *base.TaskMessage) error {
tb.mu.Lock() tb.mu.Lock()
defer tb.mu.Unlock() defer tb.mu.Unlock()
if tb.sleeping { if tb.sleeping {
return errRedisDown return errRedisDown
} }
return tb.real.Requeue(msg) return tb.real.MarkAsComplete(ctx, msg)
} }
func (tb *TestBroker) Schedule(msg *base.TaskMessage, processAt time.Time) error { func (tb *TestBroker) Requeue(ctx context.Context, msg *base.TaskMessage) error {
tb.mu.Lock() tb.mu.Lock()
defer tb.mu.Unlock() defer tb.mu.Unlock()
if tb.sleeping { if tb.sleeping {
return errRedisDown return errRedisDown
} }
return tb.real.Schedule(msg, processAt) return tb.real.Requeue(ctx, msg)
} }
func (tb *TestBroker) ScheduleUnique(msg *base.TaskMessage, processAt time.Time, ttl time.Duration) error { func (tb *TestBroker) Schedule(ctx context.Context, msg *base.TaskMessage, processAt time.Time) error {
tb.mu.Lock() tb.mu.Lock()
defer tb.mu.Unlock() defer tb.mu.Unlock()
if tb.sleeping { if tb.sleeping {
return errRedisDown return errRedisDown
} }
return tb.real.ScheduleUnique(msg, processAt, ttl) return tb.real.Schedule(ctx, msg, processAt)
} }
func (tb *TestBroker) Retry(msg *base.TaskMessage, processAt time.Time, errMsg string) error { func (tb *TestBroker) ScheduleUnique(ctx context.Context, msg *base.TaskMessage, processAt time.Time, ttl time.Duration) error {
tb.mu.Lock() tb.mu.Lock()
defer tb.mu.Unlock() defer tb.mu.Unlock()
if tb.sleeping { if tb.sleeping {
return errRedisDown return errRedisDown
} }
return tb.real.Retry(msg, processAt, errMsg) return tb.real.ScheduleUnique(ctx, msg, processAt, ttl)
} }
func (tb *TestBroker) Archive(msg *base.TaskMessage, errMsg string) error { func (tb *TestBroker) Retry(ctx context.Context, msg *base.TaskMessage, processAt time.Time, errMsg string, isFailure bool) error {
tb.mu.Lock() tb.mu.Lock()
defer tb.mu.Unlock() defer tb.mu.Unlock()
if tb.sleeping { if tb.sleeping {
return errRedisDown return errRedisDown
} }
return tb.real.Archive(msg, errMsg) return tb.real.Retry(ctx, msg, processAt, errMsg, isFailure)
} }
func (tb *TestBroker) CheckAndEnqueue(qnames ...string) error { func (tb *TestBroker) Archive(ctx context.Context, msg *base.TaskMessage, errMsg string) error {
tb.mu.Lock() tb.mu.Lock()
defer tb.mu.Unlock() defer tb.mu.Unlock()
if tb.sleeping { if tb.sleeping {
return errRedisDown return errRedisDown
} }
return tb.real.CheckAndEnqueue(qnames...) return tb.real.Archive(ctx, msg, errMsg)
} }
func (tb *TestBroker) ListDeadlineExceeded(deadline time.Time, qnames ...string) ([]*base.TaskMessage, error) { func (tb *TestBroker) ForwardIfReady(qnames ...string) error {
tb.mu.Lock()
defer tb.mu.Unlock()
if tb.sleeping {
return errRedisDown
}
return tb.real.ForwardIfReady(qnames...)
}
func (tb *TestBroker) DeleteExpiredCompletedTasks(qname string) error {
tb.mu.Lock()
defer tb.mu.Unlock()
if tb.sleeping {
return errRedisDown
}
return tb.real.DeleteExpiredCompletedTasks(qname)
}
func (tb *TestBroker) ListLeaseExpired(cutoff time.Time, qnames ...string) ([]*base.TaskMessage, error) {
tb.mu.Lock() tb.mu.Lock()
defer tb.mu.Unlock() defer tb.mu.Unlock()
if tb.sleeping { if tb.sleeping {
return nil, errRedisDown return nil, errRedisDown
} }
return tb.real.ListDeadlineExceeded(deadline, qnames...) return tb.real.ListLeaseExpired(cutoff, qnames...)
}
func (tb *TestBroker) ExtendLease(qname string, ids ...string) (time.Time, error) {
tb.mu.Lock()
defer tb.mu.Unlock()
if tb.sleeping {
return time.Time{}, errRedisDown
}
return tb.real.ExtendLease(qname, ids...)
} }
func (tb *TestBroker) WriteServerState(info *base.ServerInfo, workers []*base.WorkerInfo, ttl time.Duration) error { func (tb *TestBroker) WriteServerState(info *base.ServerInfo, workers []*base.WorkerInfo, ttl time.Duration) error {
@@ -180,6 +208,15 @@ func (tb *TestBroker) PublishCancelation(id string) error {
return tb.real.PublishCancelation(id) return tb.real.PublishCancelation(id)
} }
func (tb *TestBroker) WriteResult(qname, id string, data []byte) (int, error) {
tb.mu.Lock()
defer tb.mu.Unlock()
if tb.sleeping {
return 0, errRedisDown
}
return tb.real.WriteResult(qname, id, data)
}
func (tb *TestBroker) Ping() error { func (tb *TestBroker) Ping() error {
tb.mu.Lock() tb.mu.Lock()
defer tb.mu.Unlock() defer tb.mu.Unlock()

View File

@@ -0,0 +1,59 @@
// Copyright 2022 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
// Package timeutil exports functions and types related to time and date.
package timeutil
import (
"sync"
"time"
)
// A Clock is an object that can tell you the current time.
//
// This interface allows decoupling code that uses time from the code that creates
// a point in time. You can use this to your advantage by injecting Clocks into interfaces
// rather than having implementations call time.Now() directly.
//
// Use RealClock() in production.
// Use SimulatedClock() in test.
type Clock interface {
Now() time.Time
}
func NewRealClock() Clock { return &realTimeClock{} }
type realTimeClock struct{}
func (_ *realTimeClock) Now() time.Time { return time.Now() }
// A SimulatedClock is a concrete Clock implementation that doesn't "tick" on its own.
// Time is advanced by explicit call to the AdvanceTime() or SetTime() functions.
// This object is concurrency safe.
type SimulatedClock struct {
mu sync.Mutex
t time.Time // guarded by mu
}
func NewSimulatedClock(t time.Time) *SimulatedClock {
return &SimulatedClock{t: t}
}
func (c *SimulatedClock) Now() time.Time {
c.mu.Lock()
defer c.mu.Unlock()
return c.t
}
func (c *SimulatedClock) SetTime(t time.Time) {
c.mu.Lock()
defer c.mu.Unlock()
c.t = t
}
func (c *SimulatedClock) AdvanceTime(d time.Duration) {
c.mu.Lock()
defer c.mu.Unlock()
c.t = c.t.Add(d)
}

View File

@@ -0,0 +1,48 @@
// Copyright 2022 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
package timeutil
import (
"testing"
"time"
)
func TestSimulatedClock(t *testing.T) {
now := time.Now()
tests := []struct {
desc string
initTime time.Time
advanceBy time.Duration
wantTime time.Time
}{
{
desc: "advance time forward",
initTime: now,
advanceBy: 30 * time.Second,
wantTime: now.Add(30 * time.Second),
},
{
desc: "advance time backward",
initTime: now,
advanceBy: -10 * time.Second,
wantTime: now.Add(-10 * time.Second),
},
}
for _, tc := range tests {
c := NewSimulatedClock(tc.initTime)
if c.Now() != tc.initTime {
t.Errorf("%s: Before Advance; SimulatedClock.Now() = %v, want %v", tc.desc, c.Now(), tc.initTime)
}
c.AdvanceTime(tc.advanceBy)
if c.Now() != tc.wantTime {
t.Errorf("%s: After Advance; SimulatedClock.Now() = %v, want %v", tc.desc, c.Now(), tc.wantTime)
}
}
}

81
janitor.go Normal file
View File

@@ -0,0 +1,81 @@
// Copyright 2021 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
package asynq
import (
"sync"
"time"
"github.com/hibiken/asynq/internal/base"
"github.com/hibiken/asynq/internal/log"
)
// A janitor is responsible for deleting expired completed tasks from the specified
// queues. It periodically checks for any expired tasks in the completed set, and
// deletes them.
type janitor struct {
logger *log.Logger
broker base.Broker
// channel to communicate back to the long running "janitor" goroutine.
done chan struct{}
// list of queue names to check.
queues []string
// average interval between checks.
avgInterval time.Duration
}
type janitorParams struct {
logger *log.Logger
broker base.Broker
queues []string
interval time.Duration
}
func newJanitor(params janitorParams) *janitor {
return &janitor{
logger: params.logger,
broker: params.broker,
done: make(chan struct{}),
queues: params.queues,
avgInterval: params.interval,
}
}
func (j *janitor) shutdown() {
j.logger.Debug("Janitor shutting down...")
// Signal the janitor goroutine to stop.
j.done <- struct{}{}
}
// start starts the "janitor" goroutine.
func (j *janitor) start(wg *sync.WaitGroup) {
wg.Add(1)
timer := time.NewTimer(j.avgInterval) // randomize this interval with margin of 1s
go func() {
defer wg.Done()
for {
select {
case <-j.done:
j.logger.Debug("Janitor done")
return
case <-timer.C:
j.exec()
timer.Reset(j.avgInterval)
}
}
}()
}
func (j *janitor) exec() {
for _, qname := range j.queues {
if err := j.broker.DeleteExpiredCompletedTasks(qname); err != nil {
j.logger.Errorf("Failed to delete expired completed tasks from queue %q: %v",
qname, err)
}
}
}

89
janitor_test.go Normal file
View File

@@ -0,0 +1,89 @@
// Copyright 2021 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
package asynq
import (
"sync"
"testing"
"time"
"github.com/google/go-cmp/cmp"
h "github.com/hibiken/asynq/internal/asynqtest"
"github.com/hibiken/asynq/internal/base"
"github.com/hibiken/asynq/internal/rdb"
)
func newCompletedTask(qname, tasktype string, payload []byte, completedAt time.Time) *base.TaskMessage {
msg := h.NewTaskMessageWithQueue(tasktype, payload, qname)
msg.CompletedAt = completedAt.Unix()
return msg
}
func TestJanitor(t *testing.T) {
r := setup(t)
defer r.Close()
rdbClient := rdb.NewRDB(r)
const interval = 1 * time.Second
janitor := newJanitor(janitorParams{
logger: testLogger,
broker: rdbClient,
queues: []string{"default", "custom"},
interval: interval,
})
now := time.Now()
hourAgo := now.Add(-1 * time.Hour)
minuteAgo := now.Add(-1 * time.Minute)
halfHourAgo := now.Add(-30 * time.Minute)
halfHourFromNow := now.Add(30 * time.Minute)
fiveMinFromNow := now.Add(5 * time.Minute)
msg1 := newCompletedTask("default", "task1", nil, hourAgo)
msg2 := newCompletedTask("default", "task2", nil, minuteAgo)
msg3 := newCompletedTask("custom", "task3", nil, hourAgo)
msg4 := newCompletedTask("custom", "task4", nil, minuteAgo)
tests := []struct {
completed map[string][]base.Z // initial completed sets
wantCompleted map[string][]base.Z // expected completed sets after janitor runs
}{
{
completed: map[string][]base.Z{
"default": {
{Message: msg1, Score: halfHourAgo.Unix()},
{Message: msg2, Score: fiveMinFromNow.Unix()},
},
"custom": {
{Message: msg3, Score: halfHourFromNow.Unix()},
{Message: msg4, Score: minuteAgo.Unix()},
},
},
wantCompleted: map[string][]base.Z{
"default": {
{Message: msg2, Score: fiveMinFromNow.Unix()},
},
"custom": {
{Message: msg3, Score: halfHourFromNow.Unix()},
},
},
},
}
for _, tc := range tests {
h.FlushDB(t, r)
h.SeedAllCompletedQueues(t, r, tc.completed)
var wg sync.WaitGroup
janitor.start(&wg)
time.Sleep(2 * interval) // make sure to let janitor run at least one time
janitor.shutdown()
for qname, want := range tc.wantCompleted {
got := h.GetCompletedEntries(t, r, qname)
if diff := cmp.Diff(want, got, h.SortZSetEntryOpt); diff != "" {
t.Errorf("diff found in %q after running janitor: (-want, +got)\n%s", base.CompletedKey(qname), diff)
}
}
}
}

View File

@@ -1,230 +0,0 @@
// Copyright 2020 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
package asynq
import (
"encoding/json"
"fmt"
"time"
"github.com/spf13/cast"
)
// Payload holds arbitrary data needed for task execution.
type Payload struct {
data map[string]interface{}
}
type errKeyNotFound struct {
key string
}
func (e *errKeyNotFound) Error() string {
return fmt.Sprintf("key %q does not exist", e.key)
}
// Has reports whether key exists.
func (p Payload) Has(key string) bool {
_, ok := p.data[key]
return ok
}
func toInt(v interface{}) (int, error) {
switch v := v.(type) {
case json.Number:
val, err := v.Int64()
if err != nil {
return 0, err
}
return int(val), nil
default:
return cast.ToIntE(v)
}
}
// String returns a string representation of payload data.
func (p Payload) String() string {
return fmt.Sprint(p.data)
}
// MarshalJSON returns the JSON encoding of payload data.
func (p Payload) MarshalJSON() ([]byte, error) {
return json.Marshal(p.data)
}
// GetString returns a string value if a string type is associated with
// the key, otherwise reports an error.
func (p Payload) GetString(key string) (string, error) {
v, ok := p.data[key]
if !ok {
return "", &errKeyNotFound{key}
}
return cast.ToStringE(v)
}
// GetInt returns an int value if a numeric type is associated with
// the key, otherwise reports an error.
func (p Payload) GetInt(key string) (int, error) {
v, ok := p.data[key]
if !ok {
return 0, &errKeyNotFound{key}
}
return toInt(v)
}
// GetFloat64 returns a float64 value if a numeric type is associated with
// the key, otherwise reports an error.
func (p Payload) GetFloat64(key string) (float64, error) {
v, ok := p.data[key]
if !ok {
return 0, &errKeyNotFound{key}
}
switch v := v.(type) {
case json.Number:
return v.Float64()
default:
return cast.ToFloat64E(v)
}
}
// GetBool returns a boolean value if a boolean type is associated with
// the key, otherwise reports an error.
func (p Payload) GetBool(key string) (bool, error) {
v, ok := p.data[key]
if !ok {
return false, &errKeyNotFound{key}
}
return cast.ToBoolE(v)
}
// GetStringSlice returns a slice of strings if a string slice type is associated with
// the key, otherwise reports an error.
func (p Payload) GetStringSlice(key string) ([]string, error) {
v, ok := p.data[key]
if !ok {
return nil, &errKeyNotFound{key}
}
return cast.ToStringSliceE(v)
}
// GetIntSlice returns a slice of ints if a int slice type is associated with
// the key, otherwise reports an error.
func (p Payload) GetIntSlice(key string) ([]int, error) {
v, ok := p.data[key]
if !ok {
return nil, &errKeyNotFound{key}
}
switch v := v.(type) {
case []interface{}:
var res []int
for _, elem := range v {
val, err := toInt(elem)
if err != nil {
return nil, err
}
res = append(res, int(val))
}
return res, nil
default:
return cast.ToIntSliceE(v)
}
}
// GetStringMap returns a map of string to empty interface
// if a correct map type is associated with the key,
// otherwise reports an error.
func (p Payload) GetStringMap(key string) (map[string]interface{}, error) {
v, ok := p.data[key]
if !ok {
return nil, &errKeyNotFound{key}
}
return cast.ToStringMapE(v)
}
// GetStringMapString returns a map of string to string
// if a correct map type is associated with the key,
// otherwise reports an error.
func (p Payload) GetStringMapString(key string) (map[string]string, error) {
v, ok := p.data[key]
if !ok {
return nil, &errKeyNotFound{key}
}
return cast.ToStringMapStringE(v)
}
// GetStringMapStringSlice returns a map of string to string slice
// if a correct map type is associated with the key,
// otherwise reports an error.
func (p Payload) GetStringMapStringSlice(key string) (map[string][]string, error) {
v, ok := p.data[key]
if !ok {
return nil, &errKeyNotFound{key}
}
return cast.ToStringMapStringSliceE(v)
}
// GetStringMapInt returns a map of string to int
// if a correct map type is associated with the key,
// otherwise reports an error.
func (p Payload) GetStringMapInt(key string) (map[string]int, error) {
v, ok := p.data[key]
if !ok {
return nil, &errKeyNotFound{key}
}
switch v := v.(type) {
case map[string]interface{}:
res := make(map[string]int)
for key, val := range v {
ival, err := toInt(val)
if err != nil {
return nil, err
}
res[key] = ival
}
return res, nil
default:
return cast.ToStringMapIntE(v)
}
}
// GetStringMapBool returns a map of string to boolean
// if a correct map type is associated with the key,
// otherwise reports an error.
func (p Payload) GetStringMapBool(key string) (map[string]bool, error) {
v, ok := p.data[key]
if !ok {
return nil, &errKeyNotFound{key}
}
return cast.ToStringMapBoolE(v)
}
// GetTime returns a time value if a correct map type is associated with the key,
// otherwise reports an error.
func (p Payload) GetTime(key string) (time.Time, error) {
v, ok := p.data[key]
if !ok {
return time.Time{}, &errKeyNotFound{key}
}
return cast.ToTimeE(v)
}
// GetDuration returns a duration value if a correct map type is associated with the key,
// otherwise reports an error.
func (p Payload) GetDuration(key string) (time.Duration, error) {
v, ok := p.data[key]
if !ok {
return 0, &errKeyNotFound{key}
}
switch v := v.(type) {
case json.Number:
val, err := v.Int64()
if err != nil {
return 0, err
}
return time.Duration(val), nil
default:
return cast.ToDurationE(v)
}
}

View File

@@ -1,675 +0,0 @@
// Copyright 2020 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
package asynq
import (
"encoding/json"
"fmt"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
h "github.com/hibiken/asynq/internal/asynqtest"
"github.com/hibiken/asynq/internal/base"
)
type payloadTest struct {
data map[string]interface{}
key string
nonkey string
}
func TestPayloadString(t *testing.T) {
tests := []payloadTest{
{
data: map[string]interface{}{"name": "gopher"},
key: "name",
nonkey: "unknown",
},
}
for _, tc := range tests {
payload := Payload{tc.data}
got, err := payload.GetString(tc.key)
if err != nil || got != tc.data[tc.key] {
t.Errorf("Payload.GetString(%q) = %v, %v, want %v, nil",
tc.key, got, err, tc.data[tc.key])
}
// encode and then decode task messsage.
in := h.NewTaskMessage("testing", tc.data)
encoded, err := base.EncodeMessage(in)
if err != nil {
t.Fatal(err)
}
out, err := base.DecodeMessage(encoded)
if err != nil {
t.Fatal(err)
}
payload = Payload{out.Payload}
got, err = payload.GetString(tc.key)
if err != nil || got != tc.data[tc.key] {
t.Errorf("With Marshaling: Payload.GetString(%q) = %v, %v, want %v, nil",
tc.key, got, err, tc.data[tc.key])
}
// access non-existent key.
got, err = payload.GetString(tc.nonkey)
if err == nil || got != "" {
t.Errorf("Payload.GetString(%q) = %v, %v; want '', error",
tc.key, got, err)
}
}
}
func TestPayloadInt(t *testing.T) {
tests := []payloadTest{
{
data: map[string]interface{}{"user_id": 42},
key: "user_id",
nonkey: "unknown",
},
}
for _, tc := range tests {
payload := Payload{tc.data}
got, err := payload.GetInt(tc.key)
if err != nil || got != tc.data[tc.key] {
t.Errorf("Payload.GetInt(%q) = %v, %v, want %v, nil",
tc.key, got, err, tc.data[tc.key])
}
// encode and then decode task messsage.
in := h.NewTaskMessage("testing", tc.data)
encoded, err := base.EncodeMessage(in)
if err != nil {
t.Fatal(err)
}
out, err := base.DecodeMessage(encoded)
if err != nil {
t.Fatal(err)
}
payload = Payload{out.Payload}
got, err = payload.GetInt(tc.key)
if err != nil || got != tc.data[tc.key] {
t.Errorf("With Marshaling: Payload.GetInt(%q) = %v, %v, want %v, nil",
tc.key, got, err, tc.data[tc.key])
}
// access non-existent key.
got, err = payload.GetInt(tc.nonkey)
if err == nil || got != 0 {
t.Errorf("Payload.GetInt(%q) = %v, %v; want 0, error",
tc.key, got, err)
}
}
}
func TestPayloadFloat64(t *testing.T) {
tests := []payloadTest{
{
data: map[string]interface{}{"pi": 3.14},
key: "pi",
nonkey: "unknown",
},
}
for _, tc := range tests {
payload := Payload{tc.data}
got, err := payload.GetFloat64(tc.key)
if err != nil || got != tc.data[tc.key] {
t.Errorf("Payload.GetFloat64(%q) = %v, %v, want %v, nil",
tc.key, got, err, tc.data[tc.key])
}
// encode and then decode task messsage.
in := h.NewTaskMessage("testing", tc.data)
encoded, err := base.EncodeMessage(in)
if err != nil {
t.Fatal(err)
}
out, err := base.DecodeMessage(encoded)
if err != nil {
t.Fatal(err)
}
payload = Payload{out.Payload}
got, err = payload.GetFloat64(tc.key)
if err != nil || got != tc.data[tc.key] {
t.Errorf("With Marshaling: Payload.GetFloat64(%q) = %v, %v, want %v, nil",
tc.key, got, err, tc.data[tc.key])
}
// access non-existent key.
got, err = payload.GetFloat64(tc.nonkey)
if err == nil || got != 0 {
t.Errorf("Payload.GetFloat64(%q) = %v, %v; want 0, error",
tc.key, got, err)
}
}
}
func TestPayloadBool(t *testing.T) {
tests := []payloadTest{
{
data: map[string]interface{}{"enabled": true},
key: "enabled",
nonkey: "unknown",
},
}
for _, tc := range tests {
payload := Payload{tc.data}
got, err := payload.GetBool(tc.key)
if err != nil || got != tc.data[tc.key] {
t.Errorf("Payload.GetBool(%q) = %v, %v, want %v, nil",
tc.key, got, err, tc.data[tc.key])
}
// encode and then decode task messsage.
in := h.NewTaskMessage("testing", tc.data)
encoded, err := base.EncodeMessage(in)
if err != nil {
t.Fatal(err)
}
out, err := base.DecodeMessage(encoded)
if err != nil {
t.Fatal(err)
}
payload = Payload{out.Payload}
got, err = payload.GetBool(tc.key)
if err != nil || got != tc.data[tc.key] {
t.Errorf("With Marshaling: Payload.GetBool(%q) = %v, %v, want %v, nil",
tc.key, got, err, tc.data[tc.key])
}
// access non-existent key.
got, err = payload.GetBool(tc.nonkey)
if err == nil || got != false {
t.Errorf("Payload.GetBool(%q) = %v, %v; want false, error",
tc.key, got, err)
}
}
}
func TestPayloadStringSlice(t *testing.T) {
tests := []payloadTest{
{
data: map[string]interface{}{"names": []string{"luke", "rey", "anakin"}},
key: "names",
nonkey: "unknown",
},
}
for _, tc := range tests {
payload := Payload{tc.data}
got, err := payload.GetStringSlice(tc.key)
diff := cmp.Diff(got, tc.data[tc.key])
if err != nil || diff != "" {
t.Errorf("Payload.GetStringSlice(%q) = %v, %v, want %v, nil",
tc.key, got, err, tc.data[tc.key])
}
// encode and then decode task messsage.
in := h.NewTaskMessage("testing", tc.data)
encoded, err := base.EncodeMessage(in)
if err != nil {
t.Fatal(err)
}
out, err := base.DecodeMessage(encoded)
if err != nil {
t.Fatal(err)
}
payload = Payload{out.Payload}
got, err = payload.GetStringSlice(tc.key)
diff = cmp.Diff(got, tc.data[tc.key])
if err != nil || diff != "" {
t.Errorf("With Marshaling: Payload.GetStringSlice(%q) = %v, %v, want %v, nil",
tc.key, got, err, tc.data[tc.key])
}
// access non-existent key.
got, err = payload.GetStringSlice(tc.nonkey)
if err == nil || got != nil {
t.Errorf("Payload.GetStringSlice(%q) = %v, %v; want nil, error",
tc.key, got, err)
}
}
}
func TestPayloadIntSlice(t *testing.T) {
tests := []payloadTest{
{
data: map[string]interface{}{"nums": []int{9, 8, 7}},
key: "nums",
nonkey: "unknown",
},
}
for _, tc := range tests {
payload := Payload{tc.data}
got, err := payload.GetIntSlice(tc.key)
diff := cmp.Diff(got, tc.data[tc.key])
if err != nil || diff != "" {
t.Errorf("Payload.GetIntSlice(%q) = %v, %v, want %v, nil",
tc.key, got, err, tc.data[tc.key])
}
// encode and then decode task messsage.
in := h.NewTaskMessage("testing", tc.data)
encoded, err := base.EncodeMessage(in)
if err != nil {
t.Fatal(err)
}
out, err := base.DecodeMessage(encoded)
if err != nil {
t.Fatal(err)
}
payload = Payload{out.Payload}
got, err = payload.GetIntSlice(tc.key)
diff = cmp.Diff(got, tc.data[tc.key])
if err != nil || diff != "" {
t.Errorf("With Marshaling: Payload.GetIntSlice(%q) = %v, %v, want %v, nil",
tc.key, got, err, tc.data[tc.key])
}
// access non-existent key.
got, err = payload.GetIntSlice(tc.nonkey)
if err == nil || got != nil {
t.Errorf("Payload.GetIntSlice(%q) = %v, %v; want nil, error",
tc.key, got, err)
}
}
}
func TestPayloadStringMap(t *testing.T) {
tests := []payloadTest{
{
data: map[string]interface{}{"user": map[string]interface{}{"name": "Jon Doe", "score": 2.2}},
key: "user",
nonkey: "unknown",
},
}
for _, tc := range tests {
payload := Payload{tc.data}
got, err := payload.GetStringMap(tc.key)
diff := cmp.Diff(got, tc.data[tc.key])
if err != nil || diff != "" {
t.Errorf("Payload.GetStringMap(%q) = %v, %v, want %v, nil",
tc.key, got, err, tc.data[tc.key])
}
// encode and then decode task messsage.
in := h.NewTaskMessage("testing", tc.data)
encoded, err := base.EncodeMessage(in)
if err != nil {
t.Fatal(err)
}
out, err := base.DecodeMessage(encoded)
if err != nil {
t.Fatal(err)
}
payload = Payload{out.Payload}
got, err = payload.GetStringMap(tc.key)
ignoreOpt := cmpopts.IgnoreMapEntries(func(key string, val interface{}) bool {
switch val.(type) {
case json.Number:
return true
default:
return false
}
})
diff = cmp.Diff(got, tc.data[tc.key], ignoreOpt)
if err != nil || diff != "" {
t.Errorf("With Marshaling: Payload.GetStringMap(%q) = %v, %v, want %v, nil;(-want,+got)\n%s",
tc.key, got, err, tc.data[tc.key], diff)
}
// access non-existent key.
got, err = payload.GetStringMap(tc.nonkey)
if err == nil || got != nil {
t.Errorf("Payload.GetStringMap(%q) = %v, %v; want nil, error",
tc.key, got, err)
}
}
}
func TestPayloadStringMapString(t *testing.T) {
tests := []payloadTest{
{
data: map[string]interface{}{"address": map[string]string{"line": "123 Main St", "city": "San Francisco", "state": "CA"}},
key: "address",
nonkey: "unknown",
},
}
for _, tc := range tests {
payload := Payload{tc.data}
got, err := payload.GetStringMapString(tc.key)
diff := cmp.Diff(got, tc.data[tc.key])
if err != nil || diff != "" {
t.Errorf("Payload.GetStringMapString(%q) = %v, %v, want %v, nil",
tc.key, got, err, tc.data[tc.key])
}
// encode and then decode task messsage.
in := h.NewTaskMessage("testing", tc.data)
encoded, err := base.EncodeMessage(in)
if err != nil {
t.Fatal(err)
}
out, err := base.DecodeMessage(encoded)
if err != nil {
t.Fatal(err)
}
payload = Payload{out.Payload}
got, err = payload.GetStringMapString(tc.key)
diff = cmp.Diff(got, tc.data[tc.key])
if err != nil || diff != "" {
t.Errorf("With Marshaling: Payload.GetStringMapString(%q) = %v, %v, want %v, nil",
tc.key, got, err, tc.data[tc.key])
}
// access non-existent key.
got, err = payload.GetStringMapString(tc.nonkey)
if err == nil || got != nil {
t.Errorf("Payload.GetStringMapString(%q) = %v, %v; want nil, error",
tc.key, got, err)
}
}
}
func TestPayloadStringMapStringSlice(t *testing.T) {
favs := map[string][]string{
"movies": {"forrest gump", "star wars"},
"tv_shows": {"game of thrones", "HIMYM", "breaking bad"},
}
tests := []payloadTest{
{
data: map[string]interface{}{"favorites": favs},
key: "favorites",
nonkey: "unknown",
},
}
for _, tc := range tests {
payload := Payload{tc.data}
got, err := payload.GetStringMapStringSlice(tc.key)
diff := cmp.Diff(got, tc.data[tc.key])
if err != nil || diff != "" {
t.Errorf("Payload.GetStringMapStringSlice(%q) = %v, %v, want %v, nil",
tc.key, got, err, tc.data[tc.key])
}
// encode and then decode task messsage.
in := h.NewTaskMessage("testing", tc.data)
encoded, err := base.EncodeMessage(in)
if err != nil {
t.Fatal(err)
}
out, err := base.DecodeMessage(encoded)
if err != nil {
t.Fatal(err)
}
payload = Payload{out.Payload}
got, err = payload.GetStringMapStringSlice(tc.key)
diff = cmp.Diff(got, tc.data[tc.key])
if err != nil || diff != "" {
t.Errorf("With Marshaling: Payload.GetStringMapStringSlice(%q) = %v, %v, want %v, nil",
tc.key, got, err, tc.data[tc.key])
}
// access non-existent key.
got, err = payload.GetStringMapStringSlice(tc.nonkey)
if err == nil || got != nil {
t.Errorf("Payload.GetStringMapStringSlice(%q) = %v, %v; want nil, error",
tc.key, got, err)
}
}
}
func TestPayloadStringMapInt(t *testing.T) {
counter := map[string]int{
"a": 1,
"b": 101,
"c": 42,
}
tests := []payloadTest{
{
data: map[string]interface{}{"counts": counter},
key: "counts",
nonkey: "unknown",
},
}
for _, tc := range tests {
payload := Payload{tc.data}
got, err := payload.GetStringMapInt(tc.key)
diff := cmp.Diff(got, tc.data[tc.key])
if err != nil || diff != "" {
t.Errorf("Payload.GetStringMapInt(%q) = %v, %v, want %v, nil",
tc.key, got, err, tc.data[tc.key])
}
// encode and then decode task messsage.
in := h.NewTaskMessage("testing", tc.data)
encoded, err := base.EncodeMessage(in)
if err != nil {
t.Fatal(err)
}
out, err := base.DecodeMessage(encoded)
if err != nil {
t.Fatal(err)
}
payload = Payload{out.Payload}
got, err = payload.GetStringMapInt(tc.key)
diff = cmp.Diff(got, tc.data[tc.key])
if err != nil || diff != "" {
t.Errorf("With Marshaling: Payload.GetStringMapInt(%q) = %v, %v, want %v, nil",
tc.key, got, err, tc.data[tc.key])
}
// access non-existent key.
got, err = payload.GetStringMapInt(tc.nonkey)
if err == nil || got != nil {
t.Errorf("Payload.GetStringMapInt(%q) = %v, %v; want nil, error",
tc.key, got, err)
}
}
}
func TestPayloadStringMapBool(t *testing.T) {
features := map[string]bool{
"A": false,
"B": true,
"C": true,
}
tests := []payloadTest{
{
data: map[string]interface{}{"features": features},
key: "features",
nonkey: "unknown",
},
}
for _, tc := range tests {
payload := Payload{tc.data}
got, err := payload.GetStringMapBool(tc.key)
diff := cmp.Diff(got, tc.data[tc.key])
if err != nil || diff != "" {
t.Errorf("Payload.GetStringMapBool(%q) = %v, %v, want %v, nil",
tc.key, got, err, tc.data[tc.key])
}
// encode and then decode task messsage.
in := h.NewTaskMessage("testing", tc.data)
encoded, err := base.EncodeMessage(in)
if err != nil {
t.Fatal(err)
}
out, err := base.DecodeMessage(encoded)
if err != nil {
t.Fatal(err)
}
payload = Payload{out.Payload}
got, err = payload.GetStringMapBool(tc.key)
diff = cmp.Diff(got, tc.data[tc.key])
if err != nil || diff != "" {
t.Errorf("With Marshaling: Payload.GetStringMapBool(%q) = %v, %v, want %v, nil",
tc.key, got, err, tc.data[tc.key])
}
// access non-existent key.
got, err = payload.GetStringMapBool(tc.nonkey)
if err == nil || got != nil {
t.Errorf("Payload.GetStringMapBool(%q) = %v, %v; want nil, error",
tc.key, got, err)
}
}
}
func TestPayloadTime(t *testing.T) {
tests := []payloadTest{
{
data: map[string]interface{}{"current": time.Now()},
key: "current",
nonkey: "unknown",
},
}
for _, tc := range tests {
payload := Payload{tc.data}
got, err := payload.GetTime(tc.key)
diff := cmp.Diff(got, tc.data[tc.key])
if err != nil || diff != "" {
t.Errorf("Payload.GetTime(%q) = %v, %v, want %v, nil",
tc.key, got, err, tc.data[tc.key])
}
// encode and then decode task messsage.
in := h.NewTaskMessage("testing", tc.data)
encoded, err := base.EncodeMessage(in)
if err != nil {
t.Fatal(err)
}
out, err := base.DecodeMessage(encoded)
if err != nil {
t.Fatal(err)
}
payload = Payload{out.Payload}
got, err = payload.GetTime(tc.key)
diff = cmp.Diff(got, tc.data[tc.key])
if err != nil || diff != "" {
t.Errorf("With Marshaling: Payload.GetTime(%q) = %v, %v, want %v, nil",
tc.key, got, err, tc.data[tc.key])
}
// access non-existent key.
got, err = payload.GetTime(tc.nonkey)
if err == nil || !got.IsZero() {
t.Errorf("Payload.GetTime(%q) = %v, %v; want %v, error",
tc.key, got, err, time.Time{})
}
}
}
func TestPayloadDuration(t *testing.T) {
tests := []payloadTest{
{
data: map[string]interface{}{"duration": 15 * time.Minute},
key: "duration",
nonkey: "unknown",
},
}
for _, tc := range tests {
payload := Payload{tc.data}
got, err := payload.GetDuration(tc.key)
diff := cmp.Diff(got, tc.data[tc.key])
if err != nil || diff != "" {
t.Errorf("Payload.GetDuration(%q) = %v, %v, want %v, nil",
tc.key, got, err, tc.data[tc.key])
}
// encode and then decode task messsage.
in := h.NewTaskMessage("testing", tc.data)
encoded, err := base.EncodeMessage(in)
if err != nil {
t.Fatal(err)
}
out, err := base.DecodeMessage(encoded)
if err != nil {
t.Fatal(err)
}
payload = Payload{out.Payload}
got, err = payload.GetDuration(tc.key)
diff = cmp.Diff(got, tc.data[tc.key])
if err != nil || diff != "" {
t.Errorf("With Marshaling: Payload.GetDuration(%q) = %v, %v, want %v, nil",
tc.key, got, err, tc.data[tc.key])
}
// access non-existent key.
got, err = payload.GetDuration(tc.nonkey)
if err == nil || got != 0 {
t.Errorf("Payload.GetDuration(%q) = %v, %v; want %v, error",
tc.key, got, err, time.Duration(0))
}
}
}
func TestPayloadHas(t *testing.T) {
payload := Payload{map[string]interface{}{
"user_id": 123,
}}
if !payload.Has("user_id") {
t.Errorf("Payload.Has(%q) = false, want true", "user_id")
}
if payload.Has("name") {
t.Errorf("Payload.Has(%q) = true, want false", "name")
}
}
func TestPayloadDebuggingStrings(t *testing.T) {
data := map[string]interface{}{
"foo": 123,
"bar": "hello",
"baz": false,
}
payload := Payload{data: data}
if payload.String() != fmt.Sprint(data) {
t.Errorf("Payload.String() = %q, want %q",
payload.String(), fmt.Sprint(data))
}
got, err := payload.MarshalJSON()
if err != nil {
t.Fatal(err)
}
want, err := json.Marshal(data)
if err != nil {
t.Fatal(err)
}
if diff := cmp.Diff(got, want); diff != "" {
t.Errorf("Payload.MarhsalJSON() = %s, want %s; (-want,+got)\n%s",
got, want, diff)
}
}

243
periodic_task_manager.go Normal file
View File

@@ -0,0 +1,243 @@
// Copyright 2022 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
package asynq
import (
"crypto/sha256"
"fmt"
"io"
"sort"
"sync"
"time"
)
// PeriodicTaskManager manages scheduling of periodic tasks.
// It syncs scheduler's entries by calling the config provider periodically.
type PeriodicTaskManager struct {
s *Scheduler
p PeriodicTaskConfigProvider
syncInterval time.Duration
done chan (struct{})
wg sync.WaitGroup
m map[string]string // map[hash]entryID
}
type PeriodicTaskManagerOpts struct {
// Required: must be non nil
PeriodicTaskConfigProvider PeriodicTaskConfigProvider
// Required: must be non nil
RedisConnOpt RedisConnOpt
// Optional: scheduler options
*SchedulerOpts
// Optional: default is 3m
SyncInterval time.Duration
}
const defaultSyncInterval = 3 * time.Minute
// NewPeriodicTaskManager returns a new PeriodicTaskManager instance.
// The given opts should specify the RedisConnOp and PeriodicTaskConfigProvider at minimum.
func NewPeriodicTaskManager(opts PeriodicTaskManagerOpts) (*PeriodicTaskManager, error) {
if opts.PeriodicTaskConfigProvider == nil {
return nil, fmt.Errorf("PeriodicTaskConfigProvider cannot be nil")
}
if opts.RedisConnOpt == nil {
return nil, fmt.Errorf("RedisConnOpt cannot be nil")
}
scheduler := NewScheduler(opts.RedisConnOpt, opts.SchedulerOpts)
syncInterval := opts.SyncInterval
if syncInterval == 0 {
syncInterval = defaultSyncInterval
}
return &PeriodicTaskManager{
s: scheduler,
p: opts.PeriodicTaskConfigProvider,
syncInterval: syncInterval,
done: make(chan struct{}),
m: make(map[string]string),
}, nil
}
// PeriodicTaskConfigProvider provides configs for periodic tasks.
// GetConfigs will be called by a PeriodicTaskManager periodically to
// sync the scheduler's entries with the configs returned by the provider.
type PeriodicTaskConfigProvider interface {
GetConfigs() ([]*PeriodicTaskConfig, error)
}
// PeriodicTaskConfig specifies the details of a periodic task.
type PeriodicTaskConfig struct {
Cronspec string // required: must be non empty string
Task *Task // required: must be non nil
Opts []Option // optional: can be nil
}
func (c *PeriodicTaskConfig) hash() string {
h := sha256.New()
io.WriteString(h, c.Cronspec)
io.WriteString(h, c.Task.Type())
h.Write(c.Task.Payload())
opts := stringifyOptions(c.Opts)
sort.Strings(opts)
for _, opt := range opts {
io.WriteString(h, opt)
}
return fmt.Sprintf("%x", h.Sum(nil))
}
func validatePeriodicTaskConfig(c *PeriodicTaskConfig) error {
if c == nil {
return fmt.Errorf("PeriodicTaskConfig cannot be nil")
}
if c.Task == nil {
return fmt.Errorf("PeriodicTaskConfig.Task cannot be nil")
}
if c.Cronspec == "" {
return fmt.Errorf("PeriodicTaskConfig.Cronspec cannot be empty")
}
return nil
}
// Start starts a scheduler and background goroutine to sync the scheduler with the configs
// returned by the provider.
//
// Start returns any error encountered at start up time.
func (mgr *PeriodicTaskManager) Start() error {
if mgr.s == nil || mgr.p == nil {
panic("asynq: cannot start uninitialized PeriodicTaskManager; use NewPeriodicTaskManager to initialize")
}
if err := mgr.initialSync(); err != nil {
return fmt.Errorf("asynq: %v", err)
}
if err := mgr.s.Start(); err != nil {
return fmt.Errorf("asynq: %v", err)
}
mgr.wg.Add(1)
go func() {
defer mgr.wg.Done()
ticker := time.NewTicker(mgr.syncInterval)
for {
select {
case <-mgr.done:
mgr.s.logger.Debugf("Stopping syncer goroutine")
ticker.Stop()
return
case <-ticker.C:
mgr.sync()
}
}
}()
return nil
}
// Shutdown gracefully shuts down the manager.
// It notifies a background syncer goroutine to stop and stops scheduler.
func (mgr *PeriodicTaskManager) Shutdown() {
close(mgr.done)
mgr.wg.Wait()
mgr.s.Shutdown()
}
// Run starts the manager and blocks until an os signal to exit the program is received.
// Once it receives a signal, it gracefully shuts down the manager.
func (mgr *PeriodicTaskManager) Run() error {
if err := mgr.Start(); err != nil {
return err
}
mgr.s.waitForSignals()
mgr.Shutdown()
mgr.s.logger.Debugf("PeriodicTaskManager exiting")
return nil
}
func (mgr *PeriodicTaskManager) initialSync() error {
configs, err := mgr.p.GetConfigs()
if err != nil {
return fmt.Errorf("initial call to GetConfigs failed: %v", err)
}
for _, c := range configs {
if err := validatePeriodicTaskConfig(c); err != nil {
return fmt.Errorf("initial call to GetConfigs contained an invalid config: %v", err)
}
}
mgr.add(configs)
return nil
}
func (mgr *PeriodicTaskManager) add(configs []*PeriodicTaskConfig) {
for _, c := range configs {
entryID, err := mgr.s.Register(c.Cronspec, c.Task, c.Opts...)
if err != nil {
mgr.s.logger.Errorf("Failed to register periodic task: cronspec=%q task=%q",
c.Cronspec, c.Task.Type())
continue
}
mgr.m[c.hash()] = entryID
mgr.s.logger.Infof("Successfully registered periodic task: cronspec=%q task=%q, entryID=%s",
c.Cronspec, c.Task.Type(), entryID)
}
}
func (mgr *PeriodicTaskManager) remove(removed map[string]string) {
for hash, entryID := range removed {
if err := mgr.s.Unregister(entryID); err != nil {
mgr.s.logger.Errorf("Failed to unregister periodic task: %v", err)
continue
}
delete(mgr.m, hash)
mgr.s.logger.Infof("Successfully unregistered periodic task: entryID=%s", entryID)
}
}
func (mgr *PeriodicTaskManager) sync() {
configs, err := mgr.p.GetConfigs()
if err != nil {
mgr.s.logger.Errorf("Failed to get periodic task configs: %v", err)
return
}
for _, c := range configs {
if err := validatePeriodicTaskConfig(c); err != nil {
mgr.s.logger.Errorf("Failed to sync: GetConfigs returned an invalid config: %v", err)
return
}
}
// Diff and only register/unregister the newly added/removed entries.
removed := mgr.diffRemoved(configs)
added := mgr.diffAdded(configs)
mgr.remove(removed)
mgr.add(added)
}
// diffRemoved diffs the incoming configs with the registered config and returns
// a map containing hash and entryID of each config that was removed.
func (mgr *PeriodicTaskManager) diffRemoved(configs []*PeriodicTaskConfig) map[string]string {
newConfigs := make(map[string]string)
for _, c := range configs {
newConfigs[c.hash()] = "" // empty value since we don't have entryID yet
}
removed := make(map[string]string)
for k, v := range mgr.m {
// test whether existing config is present in the incoming configs
if _, found := newConfigs[k]; !found {
removed[k] = v
}
}
return removed
}
// diffAdded diffs the incoming configs with the registered configs and returns
// a list of configs that were added.
func (mgr *PeriodicTaskManager) diffAdded(configs []*PeriodicTaskConfig) []*PeriodicTaskConfig {
var added []*PeriodicTaskConfig
for _, c := range configs {
if _, found := mgr.m[c.hash()]; !found {
added = append(added, c)
}
}
return added
}

View File

@@ -0,0 +1,343 @@
// Copyright 2022 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
package asynq
import (
"sort"
"sync"
"testing"
"time"
"github.com/google/go-cmp/cmp"
)
// Trivial implementation of PeriodicTaskConfigProvider for testing purpose.
type FakeConfigProvider struct {
mu sync.Mutex
cfgs []*PeriodicTaskConfig
}
func (p *FakeConfigProvider) SetConfigs(cfgs []*PeriodicTaskConfig) {
p.mu.Lock()
defer p.mu.Unlock()
p.cfgs = cfgs
}
func (p *FakeConfigProvider) GetConfigs() ([]*PeriodicTaskConfig, error) {
p.mu.Lock()
defer p.mu.Unlock()
return p.cfgs, nil
}
func TestNewPeriodicTaskManager(t *testing.T) {
cfgs := []*PeriodicTaskConfig{
{Cronspec: "* * * * *", Task: NewTask("foo", nil)},
{Cronspec: "* * * * *", Task: NewTask("bar", nil)},
}
tests := []struct {
desc string
opts PeriodicTaskManagerOpts
}{
{
desc: "with provider and redisConnOpt",
opts: PeriodicTaskManagerOpts{
RedisConnOpt: RedisClientOpt{Addr: ":6379"},
PeriodicTaskConfigProvider: &FakeConfigProvider{cfgs: cfgs},
},
},
{
desc: "with sync option",
opts: PeriodicTaskManagerOpts{
RedisConnOpt: RedisClientOpt{Addr: ":6379"},
PeriodicTaskConfigProvider: &FakeConfigProvider{cfgs: cfgs},
SyncInterval: 5 * time.Minute,
},
},
{
desc: "with scheduler option",
opts: PeriodicTaskManagerOpts{
RedisConnOpt: RedisClientOpt{Addr: ":6379"},
PeriodicTaskConfigProvider: &FakeConfigProvider{cfgs: cfgs},
SyncInterval: 5 * time.Minute,
SchedulerOpts: &SchedulerOpts{
LogLevel: DebugLevel,
},
},
},
}
for _, tc := range tests {
_, err := NewPeriodicTaskManager(tc.opts)
if err != nil {
t.Errorf("%s; NewPeriodicTaskManager returned error: %v", tc.desc, err)
}
}
}
func TestNewPeriodicTaskManagerError(t *testing.T) {
cfgs := []*PeriodicTaskConfig{
{Cronspec: "* * * * *", Task: NewTask("foo", nil)},
{Cronspec: "* * * * *", Task: NewTask("bar", nil)},
}
tests := []struct {
desc string
opts PeriodicTaskManagerOpts
}{
{
desc: "without provider",
opts: PeriodicTaskManagerOpts{
RedisConnOpt: RedisClientOpt{Addr: ":6379"},
},
},
{
desc: "without redisConOpt",
opts: PeriodicTaskManagerOpts{
PeriodicTaskConfigProvider: &FakeConfigProvider{cfgs: cfgs},
},
},
}
for _, tc := range tests {
_, err := NewPeriodicTaskManager(tc.opts)
if err == nil {
t.Errorf("%s; NewPeriodicTaskManager did not return error", tc.desc)
}
}
}
func TestPeriodicTaskConfigHash(t *testing.T) {
tests := []struct {
desc string
a *PeriodicTaskConfig
b *PeriodicTaskConfig
isSame bool
}{
{
desc: "basic identity test",
a: &PeriodicTaskConfig{
Cronspec: "* * * * *",
Task: NewTask("foo", nil),
},
b: &PeriodicTaskConfig{
Cronspec: "* * * * *",
Task: NewTask("foo", nil),
},
isSame: true,
},
{
desc: "with a option",
a: &PeriodicTaskConfig{
Cronspec: "* * * * *",
Task: NewTask("foo", nil),
Opts: []Option{Queue("myqueue")},
},
b: &PeriodicTaskConfig{
Cronspec: "* * * * *",
Task: NewTask("foo", nil),
Opts: []Option{Queue("myqueue")},
},
isSame: true,
},
{
desc: "with multiple options (different order)",
a: &PeriodicTaskConfig{
Cronspec: "* * * * *",
Task: NewTask("foo", nil),
Opts: []Option{Unique(5 * time.Minute), Queue("myqueue")},
},
b: &PeriodicTaskConfig{
Cronspec: "* * * * *",
Task: NewTask("foo", nil),
Opts: []Option{Queue("myqueue"), Unique(5 * time.Minute)},
},
isSame: true,
},
{
desc: "with payload",
a: &PeriodicTaskConfig{
Cronspec: "* * * * *",
Task: NewTask("foo", []byte("hello world!")),
Opts: []Option{Queue("myqueue")},
},
b: &PeriodicTaskConfig{
Cronspec: "* * * * *",
Task: NewTask("foo", []byte("hello world!")),
Opts: []Option{Queue("myqueue")},
},
isSame: true,
},
{
desc: "with different cronspecs",
a: &PeriodicTaskConfig{
Cronspec: "* * * * *",
Task: NewTask("foo", nil),
},
b: &PeriodicTaskConfig{
Cronspec: "5 * * * *",
Task: NewTask("foo", nil),
},
isSame: false,
},
{
desc: "with different task type",
a: &PeriodicTaskConfig{
Cronspec: "* * * * *",
Task: NewTask("foo", nil),
},
b: &PeriodicTaskConfig{
Cronspec: "* * * * *",
Task: NewTask("bar", nil),
},
isSame: false,
},
{
desc: "with different options",
a: &PeriodicTaskConfig{
Cronspec: "* * * * *",
Task: NewTask("foo", nil),
Opts: []Option{Queue("myqueue")},
},
b: &PeriodicTaskConfig{
Cronspec: "* * * * *",
Task: NewTask("foo", nil),
Opts: []Option{Unique(10 * time.Minute)},
},
isSame: false,
},
{
desc: "with different options (one is subset of the other)",
a: &PeriodicTaskConfig{
Cronspec: "* * * * *",
Task: NewTask("foo", nil),
Opts: []Option{Queue("myqueue")},
},
b: &PeriodicTaskConfig{
Cronspec: "* * * * *",
Task: NewTask("foo", nil),
Opts: []Option{Queue("myqueue"), Unique(10 * time.Minute)},
},
isSame: false,
},
{
desc: "with different payload",
a: &PeriodicTaskConfig{
Cronspec: "* * * * *",
Task: NewTask("foo", []byte("hello!")),
Opts: []Option{Queue("myqueue")},
},
b: &PeriodicTaskConfig{
Cronspec: "* * * * *",
Task: NewTask("foo", []byte("HELLO!")),
Opts: []Option{Queue("myqueue"), Unique(10 * time.Minute)},
},
isSame: false,
},
}
for _, tc := range tests {
if tc.isSame && tc.a.hash() != tc.b.hash() {
t.Errorf("%s: a.hash=%s b.hash=%s expected to be equal",
tc.desc, tc.a.hash(), tc.b.hash())
}
if !tc.isSame && tc.a.hash() == tc.b.hash() {
t.Errorf("%s: a.hash=%s b.hash=%s expected to be not equal",
tc.desc, tc.a.hash(), tc.b.hash())
}
}
}
// Things to test.
// - Run the manager
// - Change provider to return new configs
// - Verify that the scheduler synced with the new config
func TestPeriodicTaskManager(t *testing.T) {
// Note: In this test, we'll use task type as an ID for each config.
cfgs := []*PeriodicTaskConfig{
{Task: NewTask("task1", nil), Cronspec: "* * * * 1"},
{Task: NewTask("task2", nil), Cronspec: "* * * * 2"},
}
const syncInterval = 3 * time.Second
provider := &FakeConfigProvider{cfgs: cfgs}
mgr, err := NewPeriodicTaskManager(PeriodicTaskManagerOpts{
RedisConnOpt: getRedisConnOpt(t),
PeriodicTaskConfigProvider: provider,
SyncInterval: syncInterval,
})
if err != nil {
t.Fatalf("Failed to initialize PeriodicTaskManager: %v", err)
}
if err := mgr.Start(); err != nil {
t.Fatalf("Failed to start PeriodicTaskManager: %v", err)
}
defer mgr.Shutdown()
got := extractCronEntries(mgr.s)
want := []*cronEntry{
{Cronspec: "* * * * 1", TaskType: "task1"},
{Cronspec: "* * * * 2", TaskType: "task2"},
}
if diff := cmp.Diff(want, got, sortCronEntry); diff != "" {
t.Errorf("Diff found in scheduler's registered entries: %s", diff)
}
// Change the underlying configs
// - task2 removed
// - task3 added
provider.SetConfigs([]*PeriodicTaskConfig{
{Task: NewTask("task1", nil), Cronspec: "* * * * 1"},
{Task: NewTask("task3", nil), Cronspec: "* * * * 3"},
})
// Wait for the next sync
time.Sleep(syncInterval * 2)
// Verify the entries are synced
got = extractCronEntries(mgr.s)
want = []*cronEntry{
{Cronspec: "* * * * 1", TaskType: "task1"},
{Cronspec: "* * * * 3", TaskType: "task3"},
}
if diff := cmp.Diff(want, got, sortCronEntry); diff != "" {
t.Errorf("Diff found in scheduler's registered entries: %s", diff)
}
// Change the underlying configs
// All configs removed, empty set.
provider.SetConfigs([]*PeriodicTaskConfig{})
// Wait for the next sync
time.Sleep(syncInterval * 2)
// Verify the entries are synced
got = extractCronEntries(mgr.s)
want = []*cronEntry{}
if diff := cmp.Diff(want, got, sortCronEntry); diff != "" {
t.Errorf("Diff found in scheduler's registered entries: %s", diff)
}
}
func extractCronEntries(s *Scheduler) []*cronEntry {
var out []*cronEntry
for _, e := range s.cron.Entries() {
job := e.Job.(*enqueueJob)
out = append(out, &cronEntry{Cronspec: job.cronspec, TaskType: job.task.Type()})
}
return out
}
var sortCronEntry = cmp.Transformer("sortCronEntry", func(in []*cronEntry) []*cronEntry {
out := append([]*cronEntry(nil), in...)
sort.Slice(out, func(i, j int) bool {
return out[i].TaskType < out[j].TaskType
})
return out
})
// A simple struct to allow for simpler comparison in test.
type cronEntry struct {
Cronspec string
TaskType string
}

View File

@@ -6,8 +6,8 @@ package asynq
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"math"
"math/rand" "math/rand"
"runtime" "runtime"
"runtime/debug" "runtime/debug"
@@ -17,16 +17,20 @@ import (
"time" "time"
"github.com/hibiken/asynq/internal/base" "github.com/hibiken/asynq/internal/base"
asynqcontext "github.com/hibiken/asynq/internal/context"
"github.com/hibiken/asynq/internal/errors"
"github.com/hibiken/asynq/internal/log" "github.com/hibiken/asynq/internal/log"
"github.com/hibiken/asynq/internal/rdb" "github.com/hibiken/asynq/internal/timeutil"
"golang.org/x/time/rate" "golang.org/x/time/rate"
) )
type processor struct { type processor struct {
logger *log.Logger logger *log.Logger
broker base.Broker broker base.Broker
clock timeutil.Clock
handler Handler handler Handler
baseCtxFn func() context.Context
queueConfig map[string]int queueConfig map[string]int
@@ -34,6 +38,7 @@ type processor struct {
orderedQueues []string orderedQueues []string
retryDelayFunc RetryDelayFunc retryDelayFunc RetryDelayFunc
isFailureFunc func(error) bool
errHandler ErrorHandler errHandler ErrorHandler
@@ -70,7 +75,9 @@ type processor struct {
type processorParams struct { type processorParams struct {
logger *log.Logger logger *log.Logger
broker base.Broker broker base.Broker
baseCtxFn func() context.Context
retryDelayFunc RetryDelayFunc retryDelayFunc RetryDelayFunc
isFailureFunc func(error) bool
syncCh chan<- *syncRequest syncCh chan<- *syncRequest
cancelations *base.Cancelations cancelations *base.Cancelations
concurrency int concurrency int
@@ -92,9 +99,12 @@ func newProcessor(params processorParams) *processor {
return &processor{ return &processor{
logger: params.logger, logger: params.logger,
broker: params.broker, broker: params.broker,
baseCtxFn: params.baseCtxFn,
clock: timeutil.NewRealClock(),
queueConfig: queues, queueConfig: queues,
orderedQueues: orderedQueues, orderedQueues: orderedQueues,
retryDelayFunc: params.retryDelayFunc, retryDelayFunc: params.retryDelayFunc,
isFailureFunc: params.isFailureFunc,
syncRequestCh: params.syncCh, syncRequestCh: params.syncCh,
cancelations: params.cancelations, cancelations: params.cancelations,
errLogLimiter: rate.NewLimiter(rate.Every(3*time.Second), 1), errLogLimiter: rate.NewLimiter(rate.Every(3*time.Second), 1),
@@ -123,8 +133,8 @@ func (p *processor) stop() {
}) })
} }
// NOTE: once terminated, processor cannot be re-started. // NOTE: once shutdown, processor cannot be re-started.
func (p *processor) terminate() { func (p *processor) shutdown() {
p.stop() p.stop()
time.AfterFunc(p.shutdownTimeout, func() { close(p.abort) }) time.AfterFunc(p.shutdownTimeout, func() { close(p.abort) })
@@ -161,9 +171,9 @@ func (p *processor) exec() {
return return
case p.sema <- struct{}{}: // acquire token case p.sema <- struct{}{}: // acquire token
qnames := p.queues() qnames := p.queues()
msg, deadline, err := p.broker.Dequeue(qnames...) msg, leaseExpirationTime, err := p.broker.Dequeue(qnames...)
switch { switch {
case err == rdb.ErrNoProcessableTask: case errors.Is(err, errors.ErrNoProcessableTask):
p.logger.Debug("All queues are empty") p.logger.Debug("All queues are empty")
// Queues are empty, this is a normal behavior. // Queues are empty, this is a normal behavior.
// Sleep to avoid slamming redis and let scheduler move tasks into queues. // Sleep to avoid slamming redis and let scheduler move tasks into queues.
@@ -180,60 +190,77 @@ func (p *processor) exec() {
return return
} }
p.starting <- &workerInfo{msg, time.Now(), deadline} lease := base.NewLease(leaseExpirationTime)
deadline := p.computeDeadline(msg)
p.starting <- &workerInfo{msg, time.Now(), deadline, lease}
go func() { go func() {
defer func() { defer func() {
p.finished <- msg p.finished <- msg
<-p.sema // release token <-p.sema // release token
}() }()
ctx, cancel := createContext(msg, deadline) ctx, cancel := asynqcontext.New(p.baseCtxFn(), msg, deadline)
p.cancelations.Add(msg.ID.String(), cancel) p.cancelations.Add(msg.ID, cancel)
defer func() { defer func() {
cancel() cancel()
p.cancelations.Delete(msg.ID.String()) p.cancelations.Delete(msg.ID)
}() }()
// check context before starting a worker goroutine. // check context before starting a worker goroutine.
select { select {
case <-ctx.Done(): case <-ctx.Done():
// already canceled (e.g. deadline exceeded). // already canceled (e.g. deadline exceeded).
p.retryOrKill(ctx, msg, ctx.Err()) p.handleFailedMessage(ctx, lease, msg, ctx.Err())
return return
default: default:
} }
resCh := make(chan error, 1) resCh := make(chan error, 1)
go func() { go func() {
resCh <- p.perform(ctx, NewTask(msg.Type, msg.Payload)) task := newTask(
msg.Type,
msg.Payload,
&ResultWriter{
id: msg.ID,
qname: msg.Queue,
broker: p.broker,
ctx: ctx,
},
)
resCh <- p.perform(ctx, task)
}() }()
select { select {
case <-p.abort: case <-p.abort:
// time is up, push the message back to queue and quit this worker goroutine. // time is up, push the message back to queue and quit this worker goroutine.
p.logger.Warnf("Quitting worker. task id=%s", msg.ID) p.logger.Warnf("Quitting worker. task id=%s", msg.ID)
p.requeue(msg) p.requeue(lease, msg)
return
case <-lease.Done():
cancel()
p.handleFailedMessage(ctx, lease, msg, ErrLeaseExpired)
return return
case <-ctx.Done(): case <-ctx.Done():
p.retryOrKill(ctx, msg, ctx.Err()) p.handleFailedMessage(ctx, lease, msg, ctx.Err())
return return
case resErr := <-resCh: case resErr := <-resCh:
// Note: One of three things should happen.
// 1) Done -> Removes the message from Active
// 2) Retry -> Removes the message from Active & Adds the message to Retry
// 3) Archive -> Removes the message from Active & Adds the message to archive
if resErr != nil { if resErr != nil {
p.retryOrKill(ctx, msg, resErr) p.handleFailedMessage(ctx, lease, msg, resErr)
return return
} }
p.markAsDone(ctx, msg) p.handleSucceededMessage(lease, msg)
} }
}() }()
} }
} }
func (p *processor) requeue(msg *base.TaskMessage) { func (p *processor) requeue(l *base.Lease, msg *base.TaskMessage) {
err := p.broker.Requeue(msg) if !l.IsValid() {
// If lease is not valid, do not write to redis; Let recoverer take care of it.
return
}
ctx, _ := context.WithDeadline(context.Background(), l.Deadline())
err := p.broker.Requeue(ctx, msg)
if err != nil { if err != nil {
p.logger.Errorf("Could not push task id=%s back to queue: %v", msg.ID, err) p.logger.Errorf("Could not push task id=%s back to queue: %v", msg.ID, err)
} else { } else {
@@ -241,21 +268,51 @@ func (p *processor) requeue(msg *base.TaskMessage) {
} }
} }
func (p *processor) markAsDone(ctx context.Context, msg *base.TaskMessage) { func (p *processor) handleSucceededMessage(l *base.Lease, msg *base.TaskMessage) {
err := p.broker.Done(msg) if msg.Retention > 0 {
p.markAsComplete(l, msg)
} else {
p.markAsDone(l, msg)
}
}
func (p *processor) markAsComplete(l *base.Lease, msg *base.TaskMessage) {
if !l.IsValid() {
// If lease is not valid, do not write to redis; Let recoverer take care of it.
return
}
ctx, _ := context.WithDeadline(context.Background(), l.Deadline())
err := p.broker.MarkAsComplete(ctx, msg)
if err != nil { if err != nil {
errMsg := fmt.Sprintf("Could not remove task id=%s type=%q from %q err: %+v", msg.ID, msg.Type, base.ActiveKey(msg.Queue), err) errMsg := fmt.Sprintf("Could not move task id=%s type=%q from %q to %q: %+v",
deadline, ok := ctx.Deadline() msg.ID, msg.Type, base.ActiveKey(msg.Queue), base.CompletedKey(msg.Queue), err)
if !ok {
panic("asynq: internal error: missing deadline in context")
}
p.logger.Warnf("%s; Will retry syncing", errMsg) p.logger.Warnf("%s; Will retry syncing", errMsg)
p.syncRequestCh <- &syncRequest{ p.syncRequestCh <- &syncRequest{
fn: func() error { fn: func() error {
return p.broker.Done(msg) return p.broker.MarkAsComplete(ctx, msg)
}, },
errMsg: errMsg, errMsg: errMsg,
deadline: deadline, deadline: l.Deadline(),
}
}
}
func (p *processor) markAsDone(l *base.Lease, msg *base.TaskMessage) {
if !l.IsValid() {
// If lease is not valid, do not write to redis; Let recoverer take care of it.
return
}
ctx, _ := context.WithDeadline(context.Background(), l.Deadline())
err := p.broker.Done(ctx, msg)
if err != nil {
errMsg := fmt.Sprintf("Could not remove task id=%s type=%q from %q err: %+v", msg.ID, msg.Type, base.ActiveKey(msg.Queue), err)
p.logger.Warnf("%s; Will retry syncing", errMsg)
p.syncRequestCh <- &syncRequest{
fn: func() error {
return p.broker.Done(ctx, msg)
},
errMsg: errMsg,
deadline: l.Deadline(),
} }
} }
} }
@@ -264,54 +321,61 @@ func (p *processor) markAsDone(ctx context.Context, msg *base.TaskMessage) {
// the task should not be retried and should be archived instead. // the task should not be retried and should be archived instead.
var SkipRetry = errors.New("skip retry for the task") var SkipRetry = errors.New("skip retry for the task")
func (p *processor) retryOrKill(ctx context.Context, msg *base.TaskMessage, err error) { func (p *processor) handleFailedMessage(ctx context.Context, l *base.Lease, msg *base.TaskMessage, err error) {
if p.errHandler != nil { if p.errHandler != nil {
p.errHandler.HandleError(ctx, NewTask(msg.Type, msg.Payload), err) p.errHandler.HandleError(ctx, NewTask(msg.Type, msg.Payload), err)
} }
if !p.isFailureFunc(err) {
// retry the task without marking it as failed
p.retry(l, msg, err, false /*isFailure*/)
return
}
if msg.Retried >= msg.Retry || errors.Is(err, SkipRetry) { if msg.Retried >= msg.Retry || errors.Is(err, SkipRetry) {
p.logger.Warnf("Retry exhausted for task id=%s", msg.ID) p.logger.Warnf("Retry exhausted for task id=%s", msg.ID)
p.archive(ctx, msg, err) p.archive(l, msg, err)
} else { } else {
p.retry(ctx, msg, err) p.retry(l, msg, err, true /*isFailure*/)
} }
} }
func (p *processor) retry(ctx context.Context, msg *base.TaskMessage, e error) { func (p *processor) retry(l *base.Lease, msg *base.TaskMessage, e error, isFailure bool) {
if !l.IsValid() {
// If lease is not valid, do not write to redis; Let recoverer take care of it.
return
}
ctx, _ := context.WithDeadline(context.Background(), l.Deadline())
d := p.retryDelayFunc(msg.Retried, e, NewTask(msg.Type, msg.Payload)) d := p.retryDelayFunc(msg.Retried, e, NewTask(msg.Type, msg.Payload))
retryAt := time.Now().Add(d) retryAt := time.Now().Add(d)
err := p.broker.Retry(msg, retryAt, e.Error()) err := p.broker.Retry(ctx, msg, retryAt, e.Error(), isFailure)
if err != nil { if err != nil {
errMsg := fmt.Sprintf("Could not move task id=%s from %q to %q", msg.ID, base.ActiveKey(msg.Queue), base.RetryKey(msg.Queue)) errMsg := fmt.Sprintf("Could not move task id=%s from %q to %q", msg.ID, base.ActiveKey(msg.Queue), base.RetryKey(msg.Queue))
deadline, ok := ctx.Deadline()
if !ok {
panic("asynq: internal error: missing deadline in context")
}
p.logger.Warnf("%s; Will retry syncing", errMsg) p.logger.Warnf("%s; Will retry syncing", errMsg)
p.syncRequestCh <- &syncRequest{ p.syncRequestCh <- &syncRequest{
fn: func() error { fn: func() error {
return p.broker.Retry(msg, retryAt, e.Error()) return p.broker.Retry(ctx, msg, retryAt, e.Error(), isFailure)
}, },
errMsg: errMsg, errMsg: errMsg,
deadline: deadline, deadline: l.Deadline(),
} }
} }
} }
func (p *processor) archive(ctx context.Context, msg *base.TaskMessage, e error) { func (p *processor) archive(l *base.Lease, msg *base.TaskMessage, e error) {
err := p.broker.Archive(msg, e.Error()) if !l.IsValid() {
// If lease is not valid, do not write to redis; Let recoverer take care of it.
return
}
ctx, _ := context.WithDeadline(context.Background(), l.Deadline())
err := p.broker.Archive(ctx, msg, e.Error())
if err != nil { if err != nil {
errMsg := fmt.Sprintf("Could not move task id=%s from %q to %q", msg.ID, base.ActiveKey(msg.Queue), base.ArchivedKey(msg.Queue)) errMsg := fmt.Sprintf("Could not move task id=%s from %q to %q", msg.ID, base.ActiveKey(msg.Queue), base.ArchivedKey(msg.Queue))
deadline, ok := ctx.Deadline()
if !ok {
panic("asynq: internal error: missing deadline in context")
}
p.logger.Warnf("%s; Will retry syncing", errMsg) p.logger.Warnf("%s; Will retry syncing", errMsg)
p.syncRequestCh <- &syncRequest{ p.syncRequestCh <- &syncRequest{
fn: func() error { fn: func() error {
return p.broker.Archive(msg, e.Error()) return p.broker.Archive(ctx, msg, e.Error())
}, },
errMsg: errMsg, errMsg: errMsg,
deadline: deadline, deadline: l.Deadline(),
} }
} }
} }
@@ -441,3 +505,19 @@ func gcd(xs ...int) int {
} }
return res return res
} }
// computeDeadline returns the given task's deadline,
func (p *processor) computeDeadline(msg *base.TaskMessage) time.Time {
if msg.Timeout == 0 && msg.Deadline == 0 {
p.logger.Errorf("asynq: internal error: both timeout and deadline are not set for the task message: %s", msg.ID)
return p.clock.Now().Add(defaultTimeout)
}
if msg.Timeout != 0 && msg.Deadline != 0 {
deadlineUnix := math.Min(float64(p.clock.Now().Unix()+msg.Timeout), float64(msg.Deadline))
return time.Unix(int64(deadlineUnix), 0)
}
if msg.Timeout != 0 {
return p.clock.Now().Add(time.Duration(msg.Timeout) * time.Second)
}
return time.Unix(msg.Deadline, 0)
}

View File

@@ -6,6 +6,7 @@ package asynq
import ( import (
"context" "context"
"encoding/json"
"fmt" "fmt"
"sort" "sort"
"sync" "sync"
@@ -16,9 +17,18 @@ import (
"github.com/google/go-cmp/cmp/cmpopts" "github.com/google/go-cmp/cmp/cmpopts"
h "github.com/hibiken/asynq/internal/asynqtest" h "github.com/hibiken/asynq/internal/asynqtest"
"github.com/hibiken/asynq/internal/base" "github.com/hibiken/asynq/internal/base"
"github.com/hibiken/asynq/internal/errors"
"github.com/hibiken/asynq/internal/log"
"github.com/hibiken/asynq/internal/rdb" "github.com/hibiken/asynq/internal/rdb"
"github.com/hibiken/asynq/internal/timeutil"
) )
var taskCmpOpts = []cmp.Option{
sortTaskOpt, // sort the tasks
cmp.AllowUnexported(Task{}), // allow typename, payload fields to be compared
cmpopts.IgnoreFields(Task{}, "opts", "w"), // ignore opts, w fields
}
// fakeHeartbeater receives from starting and finished channels and do nothing. // fakeHeartbeater receives from starting and finished channels and do nothing.
func fakeHeartbeater(starting <-chan *workerInfo, finished <-chan *base.TaskMessage, done <-chan struct{}) { func fakeHeartbeater(starting <-chan *workerInfo, finished <-chan *base.TaskMessage, done <-chan struct{}) {
for { for {
@@ -42,8 +52,38 @@ func fakeSyncer(syncCh <-chan *syncRequest, done <-chan struct{}) {
} }
} }
// Returns a processor instance configured for testing purpose.
func newProcessorForTest(t *testing.T, r *rdb.RDB, h Handler) *processor {
starting := make(chan *workerInfo)
finished := make(chan *base.TaskMessage)
syncCh := make(chan *syncRequest)
done := make(chan struct{})
t.Cleanup(func() { close(done) })
go fakeHeartbeater(starting, finished, done)
go fakeSyncer(syncCh, done)
p := newProcessor(processorParams{
logger: testLogger,
broker: r,
baseCtxFn: context.Background,
retryDelayFunc: DefaultRetryDelayFunc,
isFailureFunc: defaultIsFailureFunc,
syncCh: syncCh,
cancelations: base.NewCancelations(),
concurrency: 10,
queues: defaultQueueConfig,
strictPriority: false,
errHandler: nil,
shutdownTimeout: defaultShutdownTimeout,
starting: starting,
finished: finished,
})
p.handler = h
return p
}
func TestProcessorSuccessWithSingleQueue(t *testing.T) { func TestProcessorSuccessWithSingleQueue(t *testing.T) {
r := setup(t) r := setup(t)
defer r.Close()
rdbClient := rdb.NewRDB(r) rdbClient := rdb.NewRDB(r)
m1 := h.NewTaskMessage("task1", nil) m1 := h.NewTaskMessage("task1", nil)
@@ -86,45 +126,24 @@ func TestProcessorSuccessWithSingleQueue(t *testing.T) {
processed = append(processed, task) processed = append(processed, task)
return nil return nil
} }
starting := make(chan *workerInfo) p := newProcessorForTest(t, rdbClient, HandlerFunc(handler))
finished := make(chan *base.TaskMessage)
syncCh := make(chan *syncRequest)
done := make(chan struct{})
defer func() { close(done) }()
go fakeHeartbeater(starting, finished, done)
go fakeSyncer(syncCh, done)
p := newProcessor(processorParams{
logger: testLogger,
broker: rdbClient,
retryDelayFunc: DefaultRetryDelayFunc,
syncCh: syncCh,
cancelations: base.NewCancelations(),
concurrency: 10,
queues: defaultQueueConfig,
strictPriority: false,
errHandler: nil,
shutdownTimeout: defaultShutdownTimeout,
starting: starting,
finished: finished,
})
p.handler = HandlerFunc(handler)
p.start(&sync.WaitGroup{}) p.start(&sync.WaitGroup{})
for _, msg := range tc.incoming { for _, msg := range tc.incoming {
err := rdbClient.Enqueue(msg) err := rdbClient.Enqueue(context.Background(), msg)
if err != nil { if err != nil {
p.terminate() p.shutdown()
t.Fatal(err) t.Fatal(err)
} }
} }
time.Sleep(2 * time.Second) // wait for two second to allow all pending tasks to be processed. time.Sleep(2 * time.Second) // wait for two second to allow all pending tasks to be processed.
if l := r.LLen(base.ActiveKey(base.DefaultQueueName)).Val(); l != 0 { if l := r.LLen(context.Background(), base.ActiveKey(base.DefaultQueueName)).Val(); l != 0 {
t.Errorf("%q has %d tasks, want 0", base.ActiveKey(base.DefaultQueueName), l) t.Errorf("%q has %d tasks, want 0", base.ActiveKey(base.DefaultQueueName), l)
} }
p.terminate() p.shutdown()
mu.Lock() mu.Lock()
if diff := cmp.Diff(tc.wantProcessed, processed, sortTaskOpt, cmp.AllowUnexported(Payload{})); diff != "" { if diff := cmp.Diff(tc.wantProcessed, processed, taskCmpOpts...); diff != "" {
t.Errorf("mismatch found in processed tasks; (-want, +got)\n%s", diff) t.Errorf("mismatch found in processed tasks; (-want, +got)\n%s", diff)
} }
mu.Unlock() mu.Unlock()
@@ -146,6 +165,7 @@ func TestProcessorSuccessWithMultipleQueues(t *testing.T) {
t3 = NewTask(m3.Type, m3.Payload) t3 = NewTask(m3.Type, m3.Payload)
t4 = NewTask(m4.Type, m4.Payload) t4 = NewTask(m4.Type, m4.Payload)
) )
defer r.Close()
tests := []struct { tests := []struct {
pending map[string][]*base.TaskMessage pending map[string][]*base.TaskMessage
@@ -177,46 +197,26 @@ func TestProcessorSuccessWithMultipleQueues(t *testing.T) {
processed = append(processed, task) processed = append(processed, task)
return nil return nil
} }
starting := make(chan *workerInfo) p := newProcessorForTest(t, rdbClient, HandlerFunc(handler))
finished := make(chan *base.TaskMessage) p.queueConfig = map[string]int{
syncCh := make(chan *syncRequest) "default": 2,
done := make(chan struct{}) "high": 3,
defer func() { close(done) }() "low": 1,
go fakeHeartbeater(starting, finished, done) }
go fakeSyncer(syncCh, done)
p := newProcessor(processorParams{
logger: testLogger,
broker: rdbClient,
retryDelayFunc: DefaultRetryDelayFunc,
syncCh: syncCh,
cancelations: base.NewCancelations(),
concurrency: 10,
queues: map[string]int{
"default": 2,
"high": 3,
"low": 1,
},
strictPriority: false,
errHandler: nil,
shutdownTimeout: defaultShutdownTimeout,
starting: starting,
finished: finished,
})
p.handler = HandlerFunc(handler)
p.start(&sync.WaitGroup{}) p.start(&sync.WaitGroup{})
// Wait for two second to allow all pending tasks to be processed. // Wait for two second to allow all pending tasks to be processed.
time.Sleep(2 * time.Second) time.Sleep(2 * time.Second)
// Make sure no messages are stuck in active list. // Make sure no messages are stuck in active list.
for _, qname := range tc.queues { for _, qname := range tc.queues {
if l := r.LLen(base.ActiveKey(qname)).Val(); l != 0 { if l := r.LLen(context.Background(), base.ActiveKey(qname)).Val(); l != 0 {
t.Errorf("%q has %d tasks, want 0", base.ActiveKey(qname), l) t.Errorf("%q has %d tasks, want 0", base.ActiveKey(qname), l)
} }
} }
p.terminate() p.shutdown()
mu.Lock() mu.Lock()
if diff := cmp.Diff(tc.wantProcessed, processed, sortTaskOpt, cmp.AllowUnexported(Payload{})); diff != "" { if diff := cmp.Diff(tc.wantProcessed, processed, taskCmpOpts...); diff != "" {
t.Errorf("mismatch found in processed tasks; (-want, +got)\n%s", diff) t.Errorf("mismatch found in processed tasks; (-want, +got)\n%s", diff)
} }
mu.Unlock() mu.Unlock()
@@ -226,9 +226,10 @@ func TestProcessorSuccessWithMultipleQueues(t *testing.T) {
// https://github.com/hibiken/asynq/issues/166 // https://github.com/hibiken/asynq/issues/166
func TestProcessTasksWithLargeNumberInPayload(t *testing.T) { func TestProcessTasksWithLargeNumberInPayload(t *testing.T) {
r := setup(t) r := setup(t)
defer r.Close()
rdbClient := rdb.NewRDB(r) rdbClient := rdb.NewRDB(r)
m1 := h.NewTaskMessage("large_number", map[string]interface{}{"data": 111111111111111111}) m1 := h.NewTaskMessage("large_number", h.JSON(map[string]interface{}{"data": 111111111111111111}))
t1 := NewTask(m1.Type, m1.Payload) t1 := NewTask(m1.Type, m1.Payload)
tests := []struct { tests := []struct {
@@ -250,46 +251,29 @@ func TestProcessTasksWithLargeNumberInPayload(t *testing.T) {
handler := func(ctx context.Context, task *Task) error { handler := func(ctx context.Context, task *Task) error {
mu.Lock() mu.Lock()
defer mu.Unlock() defer mu.Unlock()
if data, err := task.Payload.GetInt("data"); err != nil { var payload map[string]int
t.Errorf("coult not get data from payload: %v", err) if err := json.Unmarshal(task.Payload(), &payload); err != nil {
} else { t.Errorf("coult not decode payload: %v", err)
}
if data, ok := payload["data"]; ok {
t.Logf("data == %d", data) t.Logf("data == %d", data)
} else {
t.Errorf("could not get data from payload")
} }
processed = append(processed, task) processed = append(processed, task)
return nil return nil
} }
starting := make(chan *workerInfo) p := newProcessorForTest(t, rdbClient, HandlerFunc(handler))
finished := make(chan *base.TaskMessage)
syncCh := make(chan *syncRequest)
done := make(chan struct{})
defer func() { close(done) }()
go fakeHeartbeater(starting, finished, done)
go fakeSyncer(syncCh, done)
p := newProcessor(processorParams{
logger: testLogger,
broker: rdbClient,
retryDelayFunc: DefaultRetryDelayFunc,
syncCh: syncCh,
cancelations: base.NewCancelations(),
concurrency: 10,
queues: defaultQueueConfig,
strictPriority: false,
errHandler: nil,
shutdownTimeout: defaultShutdownTimeout,
starting: starting,
finished: finished,
})
p.handler = HandlerFunc(handler)
p.start(&sync.WaitGroup{}) p.start(&sync.WaitGroup{})
time.Sleep(2 * time.Second) // wait for two second to allow all pending tasks to be processed. time.Sleep(2 * time.Second) // wait for two second to allow all pending tasks to be processed.
if l := r.LLen(base.ActiveKey(base.DefaultQueueName)).Val(); l != 0 { if l := r.LLen(context.Background(), base.ActiveKey(base.DefaultQueueName)).Val(); l != 0 {
t.Errorf("%q has %d tasks, want 0", base.ActiveKey(base.DefaultQueueName), l) t.Errorf("%q has %d tasks, want 0", base.ActiveKey(base.DefaultQueueName), l)
} }
p.terminate() p.shutdown()
mu.Lock() mu.Lock()
if diff := cmp.Diff(tc.wantProcessed, processed, sortTaskOpt, cmpopts.IgnoreUnexported(Payload{})); diff != "" { if diff := cmp.Diff(tc.wantProcessed, processed, taskCmpOpts...); diff != "" {
t.Errorf("mismatch found in processed tasks; (-want, +got)\n%s", diff) t.Errorf("mismatch found in processed tasks; (-want, +got)\n%s", diff)
} }
mu.Unlock() mu.Unlock()
@@ -298,6 +282,7 @@ func TestProcessTasksWithLargeNumberInPayload(t *testing.T) {
func TestProcessorRetry(t *testing.T) { func TestProcessorRetry(t *testing.T) {
r := setup(t) r := setup(t)
defer r.Close()
rdbClient := rdb.NewRDB(r) rdbClient := rdb.NewRDB(r)
m1 := h.NewTaskMessage("send_email", nil) m1 := h.NewTaskMessage("send_email", nil)
@@ -308,66 +293,55 @@ func TestProcessorRetry(t *testing.T) {
errMsg := "something went wrong" errMsg := "something went wrong"
wrappedSkipRetry := fmt.Errorf("%s:%w", errMsg, SkipRetry) wrappedSkipRetry := fmt.Errorf("%s:%w", errMsg, SkipRetry)
now := time.Now()
tests := []struct { tests := []struct {
desc string // test description desc string // test description
pending []*base.TaskMessage // initial default queue state pending []*base.TaskMessage // initial default queue state
incoming []*base.TaskMessage // tasks to be enqueued during run
delay time.Duration // retry delay duration delay time.Duration // retry delay duration
handler Handler // task handler handler Handler // task handler
wait time.Duration // wait duration between starting and stopping processor for this test case wait time.Duration // wait duration between starting and stopping processor for this test case
wantRetry []base.Z // tasks in retry queue at the end wantErrMsg string // error message the task should record
wantRetry []*base.TaskMessage // tasks in retry queue at the end
wantArchived []*base.TaskMessage // tasks in archived queue at the end wantArchived []*base.TaskMessage // tasks in archived queue at the end
wantErrCount int // number of times error handler should be called wantErrCount int // number of times error handler should be called
}{ }{
{ {
desc: "Should automatically retry errored tasks", desc: "Should automatically retry errored tasks",
pending: []*base.TaskMessage{m1, m2}, pending: []*base.TaskMessage{m1, m2, m3, m4},
incoming: []*base.TaskMessage{m3, m4}, delay: time.Minute,
delay: time.Minute,
handler: HandlerFunc(func(ctx context.Context, task *Task) error { handler: HandlerFunc(func(ctx context.Context, task *Task) error {
return fmt.Errorf(errMsg) return fmt.Errorf(errMsg)
}), }),
wait: 2 * time.Second, wait: 2 * time.Second,
wantRetry: []base.Z{ wantErrMsg: errMsg,
{Message: h.TaskMessageAfterRetry(*m2, errMsg), Score: now.Add(time.Minute).Unix()}, wantRetry: []*base.TaskMessage{m2, m3, m4},
{Message: h.TaskMessageAfterRetry(*m3, errMsg), Score: now.Add(time.Minute).Unix()}, wantArchived: []*base.TaskMessage{m1},
{Message: h.TaskMessageAfterRetry(*m4, errMsg), Score: now.Add(time.Minute).Unix()},
},
wantArchived: []*base.TaskMessage{h.TaskMessageWithError(*m1, errMsg)},
wantErrCount: 4, wantErrCount: 4,
}, },
{ {
desc: "Should skip retry errored tasks", desc: "Should skip retry errored tasks",
pending: []*base.TaskMessage{m1, m2}, pending: []*base.TaskMessage{m1, m2},
incoming: []*base.TaskMessage{}, delay: time.Minute,
delay: time.Minute,
handler: HandlerFunc(func(ctx context.Context, task *Task) error { handler: HandlerFunc(func(ctx context.Context, task *Task) error {
return SkipRetry // return SkipRetry without wrapping return SkipRetry // return SkipRetry without wrapping
}), }),
wait: 2 * time.Second, wait: 2 * time.Second,
wantRetry: []base.Z{}, wantErrMsg: SkipRetry.Error(),
wantArchived: []*base.TaskMessage{ wantRetry: []*base.TaskMessage{},
h.TaskMessageWithError(*m1, SkipRetry.Error()), wantArchived: []*base.TaskMessage{m1, m2},
h.TaskMessageWithError(*m2, SkipRetry.Error()),
},
wantErrCount: 2, // ErrorHandler should still be called with SkipRetry error wantErrCount: 2, // ErrorHandler should still be called with SkipRetry error
}, },
{ {
desc: "Should skip retry errored tasks (with error wrapping)", desc: "Should skip retry errored tasks (with error wrapping)",
pending: []*base.TaskMessage{m1, m2}, pending: []*base.TaskMessage{m1, m2},
incoming: []*base.TaskMessage{}, delay: time.Minute,
delay: time.Minute,
handler: HandlerFunc(func(ctx context.Context, task *Task) error { handler: HandlerFunc(func(ctx context.Context, task *Task) error {
return wrappedSkipRetry return wrappedSkipRetry
}), }),
wait: 2 * time.Second, wait: 2 * time.Second,
wantRetry: []base.Z{}, wantErrMsg: wrappedSkipRetry.Error(),
wantArchived: []*base.TaskMessage{ wantRetry: []*base.TaskMessage{},
h.TaskMessageWithError(*m1, wrappedSkipRetry.Error()), wantArchived: []*base.TaskMessage{m1, m2},
h.TaskMessageWithError(*m2, wrappedSkipRetry.Error()),
},
wantErrCount: 2, // ErrorHandler should still be called with SkipRetry error wantErrCount: 2, // ErrorHandler should still be called with SkipRetry error
}, },
} }
@@ -389,50 +363,43 @@ func TestProcessorRetry(t *testing.T) {
defer mu.Unlock() defer mu.Unlock()
n++ n++
} }
starting := make(chan *workerInfo) p := newProcessorForTest(t, rdbClient, tc.handler)
finished := make(chan *base.TaskMessage) p.errHandler = ErrorHandlerFunc(errHandler)
done := make(chan struct{}) p.retryDelayFunc = delayFunc
defer func() { close(done) }()
go fakeHeartbeater(starting, finished, done)
p := newProcessor(processorParams{
logger: testLogger,
broker: rdbClient,
retryDelayFunc: delayFunc,
syncCh: nil,
cancelations: base.NewCancelations(),
concurrency: 10,
queues: defaultQueueConfig,
strictPriority: false,
errHandler: ErrorHandlerFunc(errHandler),
shutdownTimeout: defaultShutdownTimeout,
starting: starting,
finished: finished,
})
p.handler = tc.handler
p.start(&sync.WaitGroup{}) p.start(&sync.WaitGroup{})
for _, msg := range tc.incoming { runTime := time.Now() // time when processor is running
err := rdbClient.Enqueue(msg) time.Sleep(tc.wait) // FIXME: This makes test flaky.
if err != nil { p.shutdown()
p.terminate()
t.Fatal(err)
}
}
time.Sleep(tc.wait) // FIXME: This makes test flaky.
p.terminate()
cmpOpt := h.EquateInt64Approx(1) // allow up to a second difference in zset score cmpOpt := h.EquateInt64Approx(int64(tc.wait.Seconds())) // allow up to a wait-second difference in zset score
gotRetry := h.GetRetryEntries(t, r, base.DefaultQueueName) gotRetry := h.GetRetryEntries(t, r, base.DefaultQueueName)
if diff := cmp.Diff(tc.wantRetry, gotRetry, h.SortZSetEntryOpt, cmpOpt); diff != "" { var wantRetry []base.Z // Note: construct wantRetry here since `LastFailedAt` and ZSCORE is relative to each test run.
for _, msg := range tc.wantRetry {
wantRetry = append(wantRetry,
base.Z{
Message: h.TaskMessageAfterRetry(*msg, tc.wantErrMsg, runTime),
Score: runTime.Add(tc.delay).Unix(),
})
}
if diff := cmp.Diff(wantRetry, gotRetry, h.SortZSetEntryOpt, cmpOpt); diff != "" {
t.Errorf("%s: mismatch found in %q after running processor; (-want, +got)\n%s", tc.desc, base.RetryKey(base.DefaultQueueName), diff) t.Errorf("%s: mismatch found in %q after running processor; (-want, +got)\n%s", tc.desc, base.RetryKey(base.DefaultQueueName), diff)
} }
gotDead := h.GetArchivedMessages(t, r, base.DefaultQueueName) gotArchived := h.GetArchivedEntries(t, r, base.DefaultQueueName)
if diff := cmp.Diff(tc.wantArchived, gotDead, h.SortMsgOpt); diff != "" { var wantArchived []base.Z // Note: construct wantArchived here since `LastFailedAt` and ZSCORE is relative to each test run.
for _, msg := range tc.wantArchived {
wantArchived = append(wantArchived,
base.Z{
Message: h.TaskMessageWithError(*msg, tc.wantErrMsg, runTime),
Score: runTime.Unix(),
})
}
if diff := cmp.Diff(wantArchived, gotArchived, h.SortZSetEntryOpt, cmpOpt); diff != "" {
t.Errorf("%s: mismatch found in %q after running processor; (-want, +got)\n%s", tc.desc, base.ArchivedKey(base.DefaultQueueName), diff) t.Errorf("%s: mismatch found in %q after running processor; (-want, +got)\n%s", tc.desc, base.ArchivedKey(base.DefaultQueueName), diff)
} }
if l := r.LLen(base.ActiveKey(base.DefaultQueueName)).Val(); l != 0 { if l := r.LLen(context.Background(), base.ActiveKey(base.DefaultQueueName)).Val(); l != 0 {
t.Errorf("%s: %q has %d tasks, want 0", base.ActiveKey(base.DefaultQueueName), tc.desc, l) t.Errorf("%s: %q has %d tasks, want 0", base.ActiveKey(base.DefaultQueueName), tc.desc, l)
} }
@@ -442,6 +409,179 @@ func TestProcessorRetry(t *testing.T) {
} }
} }
func TestProcessorMarkAsComplete(t *testing.T) {
r := setup(t)
defer r.Close()
rdbClient := rdb.NewRDB(r)
msg1 := h.NewTaskMessage("one", nil)
msg2 := h.NewTaskMessage("two", nil)
msg3 := h.NewTaskMessageWithQueue("three", nil, "custom")
msg1.Retention = 3600
msg3.Retention = 7200
handler := func(ctx context.Context, task *Task) error { return nil }
tests := []struct {
pending map[string][]*base.TaskMessage
completed map[string][]base.Z
queueCfg map[string]int
wantPending map[string][]*base.TaskMessage
wantCompleted func(completedAt time.Time) map[string][]base.Z
}{
{
pending: map[string][]*base.TaskMessage{
"default": {msg1, msg2},
"custom": {msg3},
},
completed: map[string][]base.Z{
"default": {},
"custom": {},
},
queueCfg: map[string]int{
"default": 1,
"custom": 1,
},
wantPending: map[string][]*base.TaskMessage{
"default": {},
"custom": {},
},
wantCompleted: func(completedAt time.Time) map[string][]base.Z {
return map[string][]base.Z{
"default": {{Message: h.TaskMessageWithCompletedAt(*msg1, completedAt), Score: completedAt.Unix() + msg1.Retention}},
"custom": {{Message: h.TaskMessageWithCompletedAt(*msg3, completedAt), Score: completedAt.Unix() + msg3.Retention}},
}
},
},
}
for _, tc := range tests {
h.FlushDB(t, r)
h.SeedAllPendingQueues(t, r, tc.pending)
h.SeedAllCompletedQueues(t, r, tc.completed)
p := newProcessorForTest(t, rdbClient, HandlerFunc(handler))
p.queueConfig = tc.queueCfg
p.start(&sync.WaitGroup{})
runTime := time.Now() // time when processor is running
time.Sleep(2 * time.Second)
p.shutdown()
for qname, want := range tc.wantPending {
gotPending := h.GetPendingMessages(t, r, qname)
if diff := cmp.Diff(want, gotPending, cmpopts.EquateEmpty()); diff != "" {
t.Errorf("diff found in %q pending set; want=%v, got=%v\n%s", qname, want, gotPending, diff)
}
}
for qname, want := range tc.wantCompleted(runTime) {
gotCompleted := h.GetCompletedEntries(t, r, qname)
if diff := cmp.Diff(want, gotCompleted, cmpopts.EquateEmpty()); diff != "" {
t.Errorf("diff found in %q completed set; want=%v, got=%v\n%s", qname, want, gotCompleted, diff)
}
}
}
}
// Test a scenario where the worker server cannot communicate with redis due to a network failure
// and the lease expires
func TestProcessorWithExpiredLease(t *testing.T) {
r := setup(t)
defer r.Close()
rdbClient := rdb.NewRDB(r)
m1 := h.NewTaskMessage("task1", nil)
tests := []struct {
pending []*base.TaskMessage
handler Handler
wantErrCount int
}{
{
pending: []*base.TaskMessage{m1},
handler: HandlerFunc(func(ctx context.Context, task *Task) error {
// make sure the task processing time exceeds lease duration
// to test expired lease.
time.Sleep(rdb.LeaseDuration + 10*time.Second)
return nil
}),
wantErrCount: 1, // ErrorHandler should still be called with ErrLeaseExpired
},
}
for _, tc := range tests {
h.FlushDB(t, r)
h.SeedPendingQueue(t, r, tc.pending, base.DefaultQueueName)
starting := make(chan *workerInfo)
finished := make(chan *base.TaskMessage)
syncCh := make(chan *syncRequest)
done := make(chan struct{})
t.Cleanup(func() { close(done) })
// fake heartbeater which notifies lease expiration
go func() {
for {
select {
case w := <-starting:
// simulate expiration by resetting to some time in the past
w.lease.Reset(time.Now().Add(-5 * time.Second))
if !w.lease.NotifyExpiration() {
panic("Failed to notifiy lease expiration")
}
case <-finished:
// do nothing
case <-done:
return
}
}
}()
go fakeSyncer(syncCh, done)
p := newProcessor(processorParams{
logger: testLogger,
broker: rdbClient,
baseCtxFn: context.Background,
retryDelayFunc: DefaultRetryDelayFunc,
isFailureFunc: defaultIsFailureFunc,
syncCh: syncCh,
cancelations: base.NewCancelations(),
concurrency: 10,
queues: defaultQueueConfig,
strictPriority: false,
errHandler: nil,
shutdownTimeout: defaultShutdownTimeout,
starting: starting,
finished: finished,
})
p.handler = tc.handler
var (
mu sync.Mutex // guards n and errs
n int // number of times error handler is called
errs []error // error passed to error handler
)
p.errHandler = ErrorHandlerFunc(func(ctx context.Context, t *Task, err error) {
mu.Lock()
defer mu.Unlock()
n++
errs = append(errs, err)
})
p.start(&sync.WaitGroup{})
time.Sleep(4 * time.Second)
p.shutdown()
if n != tc.wantErrCount {
t.Errorf("Unexpected number of error count: got %d, want %d", n, tc.wantErrCount)
continue
}
for i := 0; i < tc.wantErrCount; i++ {
if !errors.Is(errs[i], ErrLeaseExpired) {
t.Errorf("Unexpected error was passed to ErrorHandler: got %v want %v", errs[i], ErrLeaseExpired)
}
}
}
}
func TestProcessorQueues(t *testing.T) { func TestProcessorQueues(t *testing.T) {
sortOpt := cmp.Transformer("SortStrings", func(in []string) []string { sortOpt := cmp.Transformer("SortStrings", func(in []string) []string {
out := append([]string(nil), in...) // Copy input to avoid mutating it out := append([]string(nil), in...) // Copy input to avoid mutating it
@@ -470,25 +610,10 @@ func TestProcessorQueues(t *testing.T) {
} }
for _, tc := range tests { for _, tc := range tests {
starting := make(chan *workerInfo) // Note: rdb and handler not needed for this test.
finished := make(chan *base.TaskMessage) p := newProcessorForTest(t, nil, nil)
done := make(chan struct{}) p.queueConfig = tc.queueCfg
defer func() { close(done) }()
go fakeHeartbeater(starting, finished, done)
p := newProcessor(processorParams{
logger: testLogger,
broker: nil,
retryDelayFunc: DefaultRetryDelayFunc,
syncCh: nil,
cancelations: base.NewCancelations(),
concurrency: 10,
queues: tc.queueCfg,
strictPriority: false,
errHandler: nil,
shutdownTimeout: defaultShutdownTimeout,
starting: starting,
finished: finished,
})
got := p.queues() got := p.queues()
if diff := cmp.Diff(tc.want, got, sortOpt); diff != "" { if diff := cmp.Diff(tc.want, got, sortOpt); diff != "" {
t.Errorf("with queue config: %v\n(*processor).queues() = %v, want %v\n(-want,+got):\n%s", t.Errorf("with queue config: %v\n(*processor).queues() = %v, want %v\n(-want,+got):\n%s",
@@ -569,7 +694,9 @@ func TestProcessorWithStrictPriority(t *testing.T) {
p := newProcessor(processorParams{ p := newProcessor(processorParams{
logger: testLogger, logger: testLogger,
broker: rdbClient, broker: rdbClient,
baseCtxFn: context.Background,
retryDelayFunc: DefaultRetryDelayFunc, retryDelayFunc: DefaultRetryDelayFunc,
isFailureFunc: defaultIsFailureFunc,
syncCh: syncCh, syncCh: syncCh,
cancelations: base.NewCancelations(), cancelations: base.NewCancelations(),
concurrency: 1, // Set concurrency to 1 to make sure tasks are processed one at a time. concurrency: 1, // Set concurrency to 1 to make sure tasks are processed one at a time.
@@ -586,13 +713,13 @@ func TestProcessorWithStrictPriority(t *testing.T) {
time.Sleep(tc.wait) time.Sleep(tc.wait)
// Make sure no tasks are stuck in active list. // Make sure no tasks are stuck in active list.
for _, qname := range tc.queues { for _, qname := range tc.queues {
if l := r.LLen(base.ActiveKey(qname)).Val(); l != 0 { if l := r.LLen(context.Background(), base.ActiveKey(qname)).Val(); l != 0 {
t.Errorf("%q has %d tasks, want 0", base.ActiveKey(qname), l) t.Errorf("%q has %d tasks, want 0", base.ActiveKey(qname), l)
} }
} }
p.terminate() p.shutdown()
if diff := cmp.Diff(tc.wantProcessed, processed, cmp.AllowUnexported(Payload{})); diff != "" { if diff := cmp.Diff(tc.wantProcessed, processed, taskCmpOpts...); diff != "" {
t.Errorf("mismatch found in processed tasks; (-want, +got)\n%s", diff) t.Errorf("mismatch found in processed tasks; (-want, +got)\n%s", diff)
} }
@@ -611,7 +738,7 @@ func TestProcessorPerform(t *testing.T) {
handler: func(ctx context.Context, t *Task) error { handler: func(ctx context.Context, t *Task) error {
return nil return nil
}, },
task: NewTask("gen_thumbnail", map[string]interface{}{"src": "some/img/path"}), task: NewTask("gen_thumbnail", h.JSON(map[string]interface{}{"src": "some/img/path"})),
wantErr: false, wantErr: false,
}, },
{ {
@@ -619,7 +746,7 @@ func TestProcessorPerform(t *testing.T) {
handler: func(ctx context.Context, t *Task) error { handler: func(ctx context.Context, t *Task) error {
return fmt.Errorf("something went wrong") return fmt.Errorf("something went wrong")
}, },
task: NewTask("gen_thumbnail", map[string]interface{}{"src": "some/img/path"}), task: NewTask("gen_thumbnail", h.JSON(map[string]interface{}{"src": "some/img/path"})),
wantErr: true, wantErr: true,
}, },
{ {
@@ -627,16 +754,13 @@ func TestProcessorPerform(t *testing.T) {
handler: func(ctx context.Context, t *Task) error { handler: func(ctx context.Context, t *Task) error {
panic("something went terribly wrong") panic("something went terribly wrong")
}, },
task: NewTask("gen_thumbnail", map[string]interface{}{"src": "some/img/path"}), task: NewTask("gen_thumbnail", h.JSON(map[string]interface{}{"src": "some/img/path"})),
wantErr: true, wantErr: true,
}, },
} }
// Note: We don't need to fully initialize the processor since we are only testing // Note: We don't need to fully initialized the processor since we are only testing
// perform method. // perform method.
p := newProcessor(processorParams{ p := newProcessorForTest(t, nil, nil)
logger: testLogger,
queues: defaultQueueConfig,
})
for _, tc := range tests { for _, tc := range tests {
p.handler = tc.handler p.handler = tc.handler
@@ -731,3 +855,69 @@ func TestNormalizeQueues(t *testing.T) {
} }
} }
} }
func TestProcessorComputeDeadline(t *testing.T) {
now := time.Now()
p := processor{
logger: log.NewLogger(nil),
clock: timeutil.NewSimulatedClock(now),
}
tests := []struct {
desc string
msg *base.TaskMessage
want time.Time
}{
{
desc: "message with only timeout specified",
msg: &base.TaskMessage{
Timeout: int64((30 * time.Minute).Seconds()),
},
want: now.Add(30 * time.Minute),
},
{
desc: "message with only deadline specified",
msg: &base.TaskMessage{
Deadline: now.Add(24 * time.Hour).Unix(),
},
want: now.Add(24 * time.Hour),
},
{
desc: "message with both timeout and deadline set (now+timeout < deadline)",
msg: &base.TaskMessage{
Deadline: now.Add(24 * time.Hour).Unix(),
Timeout: int64((30 * time.Minute).Seconds()),
},
want: now.Add(30 * time.Minute),
},
{
desc: "message with both timeout and deadline set (now+timeout > deadline)",
msg: &base.TaskMessage{
Deadline: now.Add(10 * time.Minute).Unix(),
Timeout: int64((30 * time.Minute).Seconds()),
},
want: now.Add(10 * time.Minute),
},
{
desc: "message with both timeout and deadline set (now+timeout == deadline)",
msg: &base.TaskMessage{
Deadline: now.Add(30 * time.Minute).Unix(),
Timeout: int64((30 * time.Minute).Seconds()),
},
want: now.Add(30 * time.Minute),
},
{
desc: "message without timeout and deadline",
msg: &base.TaskMessage{},
want: now.Add(defaultTimeout),
},
}
for _, tc := range tests {
got := p.computeDeadline(tc.msg)
// Compare the Unix epoch with seconds granularity
if got.Unix() != tc.want.Unix() {
t.Errorf("%s: got=%v, want=%v", tc.desc, got.Unix(), tc.want.Unix())
}
}
}

View File

@@ -5,11 +5,12 @@
package asynq package asynq
import ( import (
"fmt" "context"
"sync" "sync"
"time" "time"
"github.com/hibiken/asynq/internal/base" "github.com/hibiken/asynq/internal/base"
"github.com/hibiken/asynq/internal/errors"
"github.com/hibiken/asynq/internal/log" "github.com/hibiken/asynq/internal/log"
) )
@@ -17,6 +18,7 @@ type recoverer struct {
logger *log.Logger logger *log.Logger
broker base.Broker broker base.Broker
retryDelayFunc RetryDelayFunc retryDelayFunc RetryDelayFunc
isFailureFunc func(error) bool
// channel to communicate back to the long running "recoverer" goroutine. // channel to communicate back to the long running "recoverer" goroutine.
done chan struct{} done chan struct{}
@@ -34,6 +36,7 @@ type recovererParams struct {
queues []string queues []string
interval time.Duration interval time.Duration
retryDelayFunc RetryDelayFunc retryDelayFunc RetryDelayFunc
isFailureFunc func(error) bool
} }
func newRecoverer(params recovererParams) *recoverer { func newRecoverer(params recovererParams) *recoverer {
@@ -44,10 +47,11 @@ func newRecoverer(params recovererParams) *recoverer {
queues: params.queues, queues: params.queues,
interval: params.interval, interval: params.interval,
retryDelayFunc: params.retryDelayFunc, retryDelayFunc: params.retryDelayFunc,
isFailureFunc: params.isFailureFunc,
} }
} }
func (r *recoverer) terminate() { func (r *recoverer) shutdown() {
r.logger.Debug("Recoverer shutting down...") r.logger.Debug("Recoverer shutting down...")
// Signal the recoverer goroutine to stop polling. // Signal the recoverer goroutine to stop polling.
r.done <- struct{}{} r.done <- struct{}{}
@@ -57,6 +61,7 @@ func (r *recoverer) start(wg *sync.WaitGroup) {
wg.Add(1) wg.Add(1)
go func() { go func() {
defer wg.Done() defer wg.Done()
r.recover()
timer := time.NewTimer(r.interval) timer := time.NewTimer(r.interval)
for { for {
select { select {
@@ -65,37 +70,44 @@ func (r *recoverer) start(wg *sync.WaitGroup) {
timer.Stop() timer.Stop()
return return
case <-timer.C: case <-timer.C:
// Get all tasks which have expired 30 seconds ago or earlier. r.recover()
deadline := time.Now().Add(-30 * time.Second) timer.Reset(r.interval)
msgs, err := r.broker.ListDeadlineExceeded(deadline, r.queues...)
if err != nil {
r.logger.Warn("recoverer: could not list deadline exceeded tasks")
continue
}
const errMsg = "deadline exceeded" // TODO: better error message
for _, msg := range msgs {
if msg.Retried >= msg.Retry {
r.archive(msg, errMsg)
} else {
r.retry(msg, errMsg)
}
}
} }
} }
}() }()
} }
func (r *recoverer) retry(msg *base.TaskMessage, errMsg string) { // ErrLeaseExpired error indicates that the task failed because the worker working on the task
delay := r.retryDelayFunc(msg.Retried, fmt.Errorf(errMsg), NewTask(msg.Type, msg.Payload)) // could not extend its lease due to missing heartbeats. The worker may have crashed or got cutoff from the network.
retryAt := time.Now().Add(delay) var ErrLeaseExpired = errors.New("asynq: task lease expired")
if err := r.broker.Retry(msg, retryAt, errMsg); err != nil {
r.logger.Warnf("recoverer: could not retry deadline exceeded task: %v", err) func (r *recoverer) recover() {
// Get all tasks which have expired 30 seconds ago or earlier to accomodate certain amount of clock skew.
cutoff := time.Now().Add(-30 * time.Second)
msgs, err := r.broker.ListLeaseExpired(cutoff, r.queues...)
if err != nil {
r.logger.Warn("recoverer: could not list lease expired tasks")
return
}
for _, msg := range msgs {
if msg.Retried >= msg.Retry {
r.archive(msg, ErrLeaseExpired)
} else {
r.retry(msg, ErrLeaseExpired)
}
} }
} }
func (r *recoverer) archive(msg *base.TaskMessage, errMsg string) { func (r *recoverer) retry(msg *base.TaskMessage, err error) {
if err := r.broker.Archive(msg, errMsg); err != nil { delay := r.retryDelayFunc(msg.Retried, err, NewTask(msg.Type, msg.Payload))
retryAt := time.Now().Add(delay)
if err := r.broker.Retry(context.Background(), msg, retryAt, err.Error(), r.isFailureFunc(err)); err != nil {
r.logger.Warnf("recoverer: could not retry lease expired task: %v", err)
}
}
func (r *recoverer) archive(msg *base.TaskMessage, err error) {
if err := r.broker.Archive(context.Background(), msg, err.Error()); err != nil {
r.logger.Warnf("recoverer: could not move task to archive: %v", err) r.logger.Warnf("recoverer: could not move task to archive: %v", err)
} }
} }

View File

@@ -27,29 +27,25 @@ func TestRecoverer(t *testing.T) {
t4.Retried = t4.Retry // t4 has reached its max retry count t4.Retried = t4.Retry // t4 has reached its max retry count
now := time.Now() now := time.Now()
oneHourFromNow := now.Add(1 * time.Hour)
fiveMinutesFromNow := now.Add(5 * time.Minute)
fiveMinutesAgo := now.Add(-5 * time.Minute)
oneHourAgo := now.Add(-1 * time.Hour)
tests := []struct { tests := []struct {
desc string desc string
inProgress map[string][]*base.TaskMessage active map[string][]*base.TaskMessage
deadlines map[string][]base.Z lease map[string][]base.Z
retry map[string][]base.Z retry map[string][]base.Z
archived map[string][]base.Z archived map[string][]base.Z
wantActive map[string][]*base.TaskMessage wantActive map[string][]*base.TaskMessage
wantDeadlines map[string][]base.Z wantLease map[string][]base.Z
wantRetry map[string][]*base.TaskMessage wantRetry map[string][]*base.TaskMessage
wantArchived map[string][]*base.TaskMessage wantArchived map[string][]*base.TaskMessage
}{ }{
{ {
desc: "with one active task", desc: "with one active task",
inProgress: map[string][]*base.TaskMessage{ active: map[string][]*base.TaskMessage{
"default": {t1}, "default": {t1},
}, },
deadlines: map[string][]base.Z{ lease: map[string][]base.Z{
"default": {{Message: t1, Score: fiveMinutesAgo.Unix()}}, "default": {{Message: t1, Score: now.Add(-1 * time.Minute).Unix()}},
}, },
retry: map[string][]base.Z{ retry: map[string][]base.Z{
"default": {}, "default": {},
@@ -60,11 +56,11 @@ func TestRecoverer(t *testing.T) {
wantActive: map[string][]*base.TaskMessage{ wantActive: map[string][]*base.TaskMessage{
"default": {}, "default": {},
}, },
wantDeadlines: map[string][]base.Z{ wantLease: map[string][]base.Z{
"default": {}, "default": {},
}, },
wantRetry: map[string][]*base.TaskMessage{ wantRetry: map[string][]*base.TaskMessage{
"default": {h.TaskMessageAfterRetry(*t1, "deadline exceeded")}, "default": {t1},
}, },
wantArchived: map[string][]*base.TaskMessage{ wantArchived: map[string][]*base.TaskMessage{
"default": {}, "default": {},
@@ -72,12 +68,12 @@ func TestRecoverer(t *testing.T) {
}, },
{ {
desc: "with a task with max-retry reached", desc: "with a task with max-retry reached",
inProgress: map[string][]*base.TaskMessage{ active: map[string][]*base.TaskMessage{
"default": {t4}, "default": {t4},
"critical": {}, "critical": {},
}, },
deadlines: map[string][]base.Z{ lease: map[string][]base.Z{
"default": {{Message: t4, Score: fiveMinutesAgo.Unix()}}, "default": {{Message: t4, Score: now.Add(-40 * time.Second).Unix()}},
"critical": {}, "critical": {},
}, },
retry: map[string][]base.Z{ retry: map[string][]base.Z{
@@ -92,7 +88,7 @@ func TestRecoverer(t *testing.T) {
"default": {}, "default": {},
"critical": {}, "critical": {},
}, },
wantDeadlines: map[string][]base.Z{ wantLease: map[string][]base.Z{
"default": {}, "default": {},
"critical": {}, "critical": {},
}, },
@@ -101,23 +97,23 @@ func TestRecoverer(t *testing.T) {
"critical": {}, "critical": {},
}, },
wantArchived: map[string][]*base.TaskMessage{ wantArchived: map[string][]*base.TaskMessage{
"default": {h.TaskMessageWithError(*t4, "deadline exceeded")}, "default": {t4},
"critical": {}, "critical": {},
}, },
}, },
{ {
desc: "with multiple active tasks, and one expired", desc: "with multiple active tasks, and one expired",
inProgress: map[string][]*base.TaskMessage{ active: map[string][]*base.TaskMessage{
"default": {t1, t2}, "default": {t1, t2},
"critical": {t3}, "critical": {t3},
}, },
deadlines: map[string][]base.Z{ lease: map[string][]base.Z{
"default": { "default": {
{Message: t1, Score: oneHourAgo.Unix()}, {Message: t1, Score: now.Add(-2 * time.Minute).Unix()},
{Message: t2, Score: fiveMinutesFromNow.Unix()}, {Message: t2, Score: now.Add(20 * time.Second).Unix()},
}, },
"critical": { "critical": {
{Message: t3, Score: oneHourFromNow.Unix()}, {Message: t3, Score: now.Add(20 * time.Second).Unix()},
}, },
}, },
retry: map[string][]base.Z{ retry: map[string][]base.Z{
@@ -132,12 +128,12 @@ func TestRecoverer(t *testing.T) {
"default": {t2}, "default": {t2},
"critical": {t3}, "critical": {t3},
}, },
wantDeadlines: map[string][]base.Z{ wantLease: map[string][]base.Z{
"default": {{Message: t2, Score: fiveMinutesFromNow.Unix()}}, "default": {{Message: t2, Score: now.Add(20 * time.Second).Unix()}},
"critical": {{Message: t3, Score: oneHourFromNow.Unix()}}, "critical": {{Message: t3, Score: now.Add(20 * time.Second).Unix()}},
}, },
wantRetry: map[string][]*base.TaskMessage{ wantRetry: map[string][]*base.TaskMessage{
"default": {h.TaskMessageAfterRetry(*t1, "deadline exceeded")}, "default": {t1},
"critical": {}, "critical": {},
}, },
wantArchived: map[string][]*base.TaskMessage{ wantArchived: map[string][]*base.TaskMessage{
@@ -147,17 +143,17 @@ func TestRecoverer(t *testing.T) {
}, },
{ {
desc: "with multiple expired active tasks", desc: "with multiple expired active tasks",
inProgress: map[string][]*base.TaskMessage{ active: map[string][]*base.TaskMessage{
"default": {t1, t2}, "default": {t1, t2},
"critical": {t3}, "critical": {t3},
}, },
deadlines: map[string][]base.Z{ lease: map[string][]base.Z{
"default": { "default": {
{Message: t1, Score: oneHourAgo.Unix()}, {Message: t1, Score: now.Add(-1 * time.Minute).Unix()},
{Message: t2, Score: oneHourFromNow.Unix()}, {Message: t2, Score: now.Add(10 * time.Second).Unix()},
}, },
"critical": { "critical": {
{Message: t3, Score: fiveMinutesAgo.Unix()}, {Message: t3, Score: now.Add(-1 * time.Minute).Unix()},
}, },
}, },
retry: map[string][]base.Z{ retry: map[string][]base.Z{
@@ -172,12 +168,12 @@ func TestRecoverer(t *testing.T) {
"default": {t2}, "default": {t2},
"critical": {}, "critical": {},
}, },
wantDeadlines: map[string][]base.Z{ wantLease: map[string][]base.Z{
"default": {{Message: t2, Score: oneHourFromNow.Unix()}}, "default": {{Message: t2, Score: now.Add(10 * time.Second).Unix()}},
}, },
wantRetry: map[string][]*base.TaskMessage{ wantRetry: map[string][]*base.TaskMessage{
"default": {h.TaskMessageAfterRetry(*t1, "deadline exceeded")}, "default": {t1},
"critical": {h.TaskMessageAfterRetry(*t3, "deadline exceeded")}, "critical": {t3},
}, },
wantArchived: map[string][]*base.TaskMessage{ wantArchived: map[string][]*base.TaskMessage{
"default": {}, "default": {},
@@ -186,11 +182,11 @@ func TestRecoverer(t *testing.T) {
}, },
{ {
desc: "with empty active queue", desc: "with empty active queue",
inProgress: map[string][]*base.TaskMessage{ active: map[string][]*base.TaskMessage{
"default": {}, "default": {},
"critical": {}, "critical": {},
}, },
deadlines: map[string][]base.Z{ lease: map[string][]base.Z{
"default": {}, "default": {},
"critical": {}, "critical": {},
}, },
@@ -206,7 +202,7 @@ func TestRecoverer(t *testing.T) {
"default": {}, "default": {},
"critical": {}, "critical": {},
}, },
wantDeadlines: map[string][]base.Z{ wantLease: map[string][]base.Z{
"default": {}, "default": {},
"critical": {}, "critical": {},
}, },
@@ -223,8 +219,8 @@ func TestRecoverer(t *testing.T) {
for _, tc := range tests { for _, tc := range tests {
h.FlushDB(t, r) h.FlushDB(t, r)
h.SeedAllActiveQueues(t, r, tc.inProgress) h.SeedAllActiveQueues(t, r, tc.active)
h.SeedAllDeadlines(t, r, tc.deadlines) h.SeedAllLease(t, r, tc.lease)
h.SeedAllRetryQueues(t, r, tc.retry) h.SeedAllRetryQueues(t, r, tc.retry)
h.SeedAllArchivedQueues(t, r, tc.archived) h.SeedAllArchivedQueues(t, r, tc.archived)
@@ -234,12 +230,14 @@ func TestRecoverer(t *testing.T) {
queues: []string{"default", "critical"}, queues: []string{"default", "critical"},
interval: 1 * time.Second, interval: 1 * time.Second,
retryDelayFunc: func(n int, err error, task *Task) time.Duration { return 30 * time.Second }, retryDelayFunc: func(n int, err error, task *Task) time.Duration { return 30 * time.Second },
isFailureFunc: defaultIsFailureFunc,
}) })
var wg sync.WaitGroup var wg sync.WaitGroup
recoverer.start(&wg) recoverer.start(&wg)
runTime := time.Now() // time when recoverer is running
time.Sleep(2 * time.Second) time.Sleep(2 * time.Second)
recoverer.terminate() recoverer.shutdown()
for qname, want := range tc.wantActive { for qname, want := range tc.wantActive {
gotActive := h.GetActiveMessages(t, r, qname) gotActive := h.GetActiveMessages(t, r, qname)
@@ -247,21 +245,30 @@ func TestRecoverer(t *testing.T) {
t.Errorf("%s; mismatch found in %q; (-want,+got)\n%s", tc.desc, base.ActiveKey(qname), diff) t.Errorf("%s; mismatch found in %q; (-want,+got)\n%s", tc.desc, base.ActiveKey(qname), diff)
} }
} }
for qname, want := range tc.wantDeadlines { for qname, want := range tc.wantLease {
gotDeadlines := h.GetDeadlinesEntries(t, r, qname) gotLease := h.GetLeaseEntries(t, r, qname)
if diff := cmp.Diff(want, gotDeadlines, h.SortZSetEntryOpt); diff != "" { if diff := cmp.Diff(want, gotLease, h.SortZSetEntryOpt); diff != "" {
t.Errorf("%s; mismatch found in %q; (-want,+got)\n%s", tc.desc, base.DeadlinesKey(qname), diff) t.Errorf("%s; mismatch found in %q; (-want,+got)\n%s", tc.desc, base.LeaseKey(qname), diff)
} }
} }
for qname, want := range tc.wantRetry { cmpOpt := h.EquateInt64Approx(2) // allow up to two-second difference in `LastFailedAt`
for qname, msgs := range tc.wantRetry {
gotRetry := h.GetRetryMessages(t, r, qname) gotRetry := h.GetRetryMessages(t, r, qname)
if diff := cmp.Diff(want, gotRetry, h.SortMsgOpt); diff != "" { var wantRetry []*base.TaskMessage // Note: construct message here since `LastFailedAt` is relative to each test run
for _, msg := range msgs {
wantRetry = append(wantRetry, h.TaskMessageAfterRetry(*msg, ErrLeaseExpired.Error(), runTime))
}
if diff := cmp.Diff(wantRetry, gotRetry, h.SortMsgOpt, cmpOpt); diff != "" {
t.Errorf("%s; mismatch found in %q: (-want, +got)\n%s", tc.desc, base.RetryKey(qname), diff) t.Errorf("%s; mismatch found in %q: (-want, +got)\n%s", tc.desc, base.RetryKey(qname), diff)
} }
} }
for qname, want := range tc.wantArchived { for qname, msgs := range tc.wantArchived {
gotDead := h.GetArchivedMessages(t, r, qname) gotArchived := h.GetArchivedMessages(t, r, qname)
if diff := cmp.Diff(want, gotDead, h.SortMsgOpt); diff != "" { var wantArchived []*base.TaskMessage
for _, msg := range msgs {
wantArchived = append(wantArchived, h.TaskMessageWithError(*msg, ErrLeaseExpired.Error(), runTime))
}
if diff := cmp.Diff(wantArchived, gotArchived, h.SortMsgOpt, cmpOpt); diff != "" {
t.Errorf("%s; mismatch found in %q: (-want, +got)\n%s", tc.desc, base.ArchivedKey(qname), diff) t.Errorf("%s; mismatch found in %q: (-want, +got)\n%s", tc.desc, base.ArchivedKey(qname), diff)
} }
} }

View File

@@ -10,7 +10,7 @@ import (
"sync" "sync"
"time" "time"
"github.com/go-redis/redis/v7" "github.com/go-redis/redis/v8"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/hibiken/asynq/internal/base" "github.com/hibiken/asynq/internal/base"
"github.com/hibiken/asynq/internal/log" "github.com/hibiken/asynq/internal/log"
@@ -19,9 +19,13 @@ import (
) )
// A Scheduler kicks off tasks at regular intervals based on the user defined schedule. // A Scheduler kicks off tasks at regular intervals based on the user defined schedule.
//
// Schedulers are safe for concurrent use by multiple goroutines.
type Scheduler struct { type Scheduler struct {
id string id string
status *base.ServerStatus
state *serverState
logger *log.Logger logger *log.Logger
client *Client client *Client
rdb *rdb.RDB rdb *rdb.RDB
@@ -30,6 +34,9 @@ type Scheduler struct {
done chan struct{} done chan struct{}
wg sync.WaitGroup wg sync.WaitGroup
errHandler func(task *Task, opts []Option, err error) errHandler func(task *Task, opts []Option, err error)
// guards idmap
mu sync.Mutex
// idmap maps Scheduler's entry ID to cron.EntryID // idmap maps Scheduler's entry ID to cron.EntryID
// to avoid using cron.EntryID as the public API of // to avoid using cron.EntryID as the public API of
// the Scheduler. // the Scheduler.
@@ -61,7 +68,7 @@ func NewScheduler(r RedisConnOpt, opts *SchedulerOpts) *Scheduler {
return &Scheduler{ return &Scheduler{
id: generateSchedulerID(), id: generateSchedulerID(),
status: base.NewServerStatus(base.StatusIdle), state: &serverState{value: srvStateNew},
logger: logger, logger: logger,
client: NewClient(r), client: NewClient(r),
rdb: rdb.NewRDB(c), rdb: rdb.NewRDB(c),
@@ -117,7 +124,7 @@ type enqueueJob struct {
} }
func (j *enqueueJob) Run() { func (j *enqueueJob) Run() {
res, err := j.client.Enqueue(j.task, j.opts...) info, err := j.client.Enqueue(j.task, j.opts...)
if err != nil { if err != nil {
j.logger.Errorf("scheduler could not enqueue a task %+v: %v", j.task, err) j.logger.Errorf("scheduler could not enqueue a task %+v: %v", j.task, err)
if j.errHandler != nil { if j.errHandler != nil {
@@ -125,10 +132,10 @@ func (j *enqueueJob) Run() {
} }
return return
} }
j.logger.Debugf("scheduler enqueued a task: %+v", res) j.logger.Debugf("scheduler enqueued a task: %+v", info)
event := &base.SchedulerEnqueueEvent{ event := &base.SchedulerEnqueueEvent{
TaskID: res.ID, TaskID: info.ID,
EnqueuedAt: res.EnqueuedAt.In(j.location), EnqueuedAt: time.Now().In(j.location),
} }
err = j.rdb.RecordSchedulerEnqueueEvent(j.id.String(), event) err = j.rdb.RecordSchedulerEnqueueEvent(j.id.String(), event)
if err != nil { if err != nil {
@@ -154,55 +161,77 @@ func (s *Scheduler) Register(cronspec string, task *Task, opts ...Option) (entry
if err != nil { if err != nil {
return "", err return "", err
} }
s.mu.Lock()
s.idmap[job.id.String()] = cronID s.idmap[job.id.String()] = cronID
s.mu.Unlock()
return job.id.String(), nil return job.id.String(), nil
} }
// Unregister removes a registered entry by entry ID. // Unregister removes a registered entry by entry ID.
// Unregister returns a non-nil error if no entries were found for the given entryID. // Unregister returns a non-nil error if no entries were found for the given entryID.
func (s *Scheduler) Unregister(entryID string) error { func (s *Scheduler) Unregister(entryID string) error {
s.mu.Lock()
defer s.mu.Unlock()
cronID, ok := s.idmap[entryID] cronID, ok := s.idmap[entryID]
if !ok { if !ok {
return fmt.Errorf("asynq: no scheduler entry found") return fmt.Errorf("asynq: no scheduler entry found")
} }
delete(s.idmap, entryID)
s.cron.Remove(cronID) s.cron.Remove(cronID)
return nil return nil
} }
// Run starts the scheduler until an os signal to exit the program is received. // Run starts the scheduler until an os signal to exit the program is received.
// It returns an error if scheduler is already running or has been stopped. // It returns an error if scheduler is already running or has been shutdown.
func (s *Scheduler) Run() error { func (s *Scheduler) Run() error {
if err := s.Start(); err != nil { if err := s.Start(); err != nil {
return err return err
} }
s.waitForSignals() s.waitForSignals()
return s.Stop() s.Shutdown()
return nil
} }
// Start starts the scheduler. // Start starts the scheduler.
// It returns an error if the scheduler is already running or has been stopped. // It returns an error if the scheduler is already running or has been shutdown.
func (s *Scheduler) Start() error { func (s *Scheduler) Start() error {
switch s.status.Get() { if err := s.start(); err != nil {
case base.StatusRunning: return err
return fmt.Errorf("asynq: the scheduler is already running")
case base.StatusStopped:
return fmt.Errorf("asynq: the scheduler has already been stopped")
} }
s.logger.Info("Scheduler starting") s.logger.Info("Scheduler starting")
s.logger.Infof("Scheduler timezone is set to %v", s.location) s.logger.Infof("Scheduler timezone is set to %v", s.location)
s.cron.Start() s.cron.Start()
s.wg.Add(1) s.wg.Add(1)
go s.runHeartbeater() go s.runHeartbeater()
s.status.Set(base.StatusRunning)
return nil return nil
} }
// Stop stops the scheduler. // Checks server state and returns an error if pre-condition is not met.
// It returns an error if the scheduler is not currently running. // Otherwise it sets the server state to active.
func (s *Scheduler) Stop() error { func (s *Scheduler) start() error {
if s.status.Get() != base.StatusRunning { s.state.mu.Lock()
return fmt.Errorf("asynq: the scheduler is not running") defer s.state.mu.Unlock()
switch s.state.value {
case srvStateActive:
return fmt.Errorf("asynq: the scheduler is already running")
case srvStateClosed:
return fmt.Errorf("asynq: the scheduler has already been stopped")
} }
s.state.value = srvStateActive
return nil
}
// Shutdown stops and shuts down the scheduler.
func (s *Scheduler) Shutdown() {
s.state.mu.Lock()
if s.state.value == srvStateNew || s.state.value == srvStateClosed {
// scheduler is not running, do nothing and return.
s.state.mu.Unlock()
return
}
s.state.value = srvStateClosed
s.state.mu.Unlock()
s.logger.Info("Scheduler shutting down") s.logger.Info("Scheduler shutting down")
close(s.done) // signal heartbeater to stop close(s.done) // signal heartbeater to stop
ctx := s.cron.Stop() ctx := s.cron.Stop()
@@ -212,9 +241,7 @@ func (s *Scheduler) Stop() error {
s.clearHistory() s.clearHistory()
s.client.Close() s.client.Close()
s.rdb.Close() s.rdb.Close()
s.status.Set(base.StatusStopped)
s.logger.Info("Scheduler stopped") s.logger.Info("Scheduler stopped")
return nil
} }
func (s *Scheduler) runHeartbeater() { func (s *Scheduler) runHeartbeater() {
@@ -240,8 +267,8 @@ func (s *Scheduler) beat() {
e := &base.SchedulerEntry{ e := &base.SchedulerEntry{
ID: job.id.String(), ID: job.id.String(),
Spec: job.cronspec, Spec: job.cronspec,
Type: job.task.Type, Type: job.task.Type(),
Payload: job.task.Payload.data, Payload: job.task.Payload(),
Opts: stringifyOptions(job.opts), Opts: stringifyOptions(job.opts),
Next: entry.Next, Next: entry.Next,
Prev: entry.Prev, Prev: entry.Prev,

View File

@@ -67,9 +67,7 @@ func TestSchedulerRegister(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
time.Sleep(tc.wait) time.Sleep(tc.wait)
if err := scheduler.Stop(); err != nil { scheduler.Shutdown()
t.Fatal(err)
}
got := asynqtest.GetPendingMessages(t, r, tc.queue) got := asynqtest.GetPendingMessages(t, r, tc.queue)
if diff := cmp.Diff(tc.want, got, asynqtest.IgnoreIDOpt); diff != "" { if diff := cmp.Diff(tc.want, got, asynqtest.IgnoreIDOpt); diff != "" {
@@ -106,9 +104,7 @@ func TestSchedulerWhenRedisDown(t *testing.T) {
} }
// Scheduler should attempt to enqueue the task three times (every 3s). // Scheduler should attempt to enqueue the task three times (every 3s).
time.Sleep(10 * time.Second) time.Sleep(10 * time.Second)
if err := scheduler.Stop(); err != nil { scheduler.Shutdown()
t.Fatal(err)
}
mu.Lock() mu.Lock()
if counter != 3 { if counter != 3 {
@@ -150,9 +146,7 @@ func TestSchedulerUnregister(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
time.Sleep(tc.wait) time.Sleep(tc.wait)
if err := scheduler.Stop(); err != nil { scheduler.Shutdown()
t.Fatal(err)
}
got := asynqtest.GetPendingMessages(t, r, tc.queue) got := asynqtest.GetPendingMessages(t, r, tc.queue)
if len(got) != 0 { if len(got) != 0 {

View File

@@ -62,7 +62,7 @@ func (mux *ServeMux) Handler(t *Task) (h Handler, pattern string) {
mux.mu.RLock() mux.mu.RLock()
defer mux.mu.RUnlock() defer mux.mu.RUnlock()
h, pattern = mux.match(t.Type) h, pattern = mux.match(t.Type())
if h == nil { if h == nil {
h, pattern = NotFoundHandler(), "" h, pattern = NotFoundHandler(), ""
} }
@@ -98,7 +98,7 @@ func (mux *ServeMux) Handle(pattern string, handler Handler) {
mux.mu.Lock() mux.mu.Lock()
defer mux.mu.Unlock() defer mux.mu.Unlock()
if pattern == "" { if strings.TrimSpace(pattern) == "" {
panic("asynq: invalid pattern") panic("asynq: invalid pattern")
} }
if handler == nil { if handler == nil {
@@ -151,7 +151,7 @@ func (mux *ServeMux) Use(mws ...MiddlewareFunc) {
// NotFound returns an error indicating that the handler was not found for the given task. // NotFound returns an error indicating that the handler was not found for the given task.
func NotFound(ctx context.Context, task *Task) error { func NotFound(ctx context.Context, task *Task) error {
return fmt.Errorf("handler not found for task %q", task.Type) return fmt.Errorf("handler not found for task %q", task.Type())
} }
// NotFoundHandler returns a simple task handler that returns a ``not found`` error. // NotFoundHandler returns a simple task handler that returns a ``not found`` error.

View File

@@ -68,7 +68,7 @@ func TestServeMux(t *testing.T) {
} }
if called != tc.want { if called != tc.want {
t.Errorf("%q handler was called for task %q, want %q to be called", called, task.Type, tc.want) t.Errorf("%q handler was called for task %q, want %q to be called", called, task.Type(), tc.want)
} }
} }
} }
@@ -124,7 +124,7 @@ func TestServeMuxNotFound(t *testing.T) {
task := NewTask(tc.typename, nil) task := NewTask(tc.typename, nil)
err := mux.ProcessTask(context.Background(), task) err := mux.ProcessTask(context.Background(), task)
if err == nil { if err == nil {
t.Errorf("ProcessTask did not return error for task %q, should return 'not found' error", task.Type) t.Errorf("ProcessTask did not return error for task %q, should return 'not found' error", task.Type())
} }
} }
} }
@@ -164,7 +164,7 @@ func TestServeMuxMiddlewares(t *testing.T) {
} }
if called != tc.want { if called != tc.want {
t.Errorf("%q handler was called for task %q, want %q to be called", called, task.Type, tc.want) t.Errorf("%q handler was called for task %q, want %q to be called", called, task.Type(), tc.want)
} }
} }
} }

225
server.go
View File

@@ -15,29 +15,30 @@ import (
"sync" "sync"
"time" "time"
"github.com/go-redis/redis/v7" "github.com/go-redis/redis/v8"
"github.com/hibiken/asynq/internal/base" "github.com/hibiken/asynq/internal/base"
"github.com/hibiken/asynq/internal/log" "github.com/hibiken/asynq/internal/log"
"github.com/hibiken/asynq/internal/rdb" "github.com/hibiken/asynq/internal/rdb"
) )
// Server is responsible for managing the task processing. // Server is responsible for task processing and task lifecycle management.
// //
// Server pulls tasks off queues and processes them. // Server pulls tasks off queues and processes them.
// If the processing of a task is unsuccessful, server will schedule it for a retry. // If the processing of a task is unsuccessful, server will schedule it for a retry.
//
// A task will be retried until either the task gets processed successfully // A task will be retried until either the task gets processed successfully
// or until it reaches its max retry count. // or until it reaches its max retry count.
// //
// If a task exhausts its retries, it will be moved to the archive and // If a task exhausts its retries, it will be moved to the archive and
// will be kept in the archive for some time until a certain condition is met // will be kept in the archive set.
// (e.g., archive size reaches a certain limit, or the task has been in the // Note that the archive size is finite and once it reaches its max size,
// archive for a certain amount of time). // oldest tasks in the archive will be deleted.
type Server struct { type Server struct {
logger *log.Logger logger *log.Logger
broker base.Broker broker base.Broker
status *base.ServerStatus state *serverState
// wait group to wait for all goroutines to finish. // wait group to wait for all goroutines to finish.
wg sync.WaitGroup wg sync.WaitGroup
@@ -48,6 +49,44 @@ type Server struct {
subscriber *subscriber subscriber *subscriber
recoverer *recoverer recoverer *recoverer
healthchecker *healthchecker healthchecker *healthchecker
janitor *janitor
}
type serverState struct {
mu sync.Mutex
value serverStateValue
}
type serverStateValue int
const (
// StateNew represents a new server. Server begins in
// this state and then transition to StatusActive when
// Start or Run is callled.
srvStateNew serverStateValue = iota
// StateActive indicates the server is up and active.
srvStateActive
// StateStopped indicates the server is up but no longer processing new tasks.
srvStateStopped
// StateClosed indicates the server has been shutdown.
srvStateClosed
)
var serverStates = []string{
"new",
"active",
"stopped",
"closed",
}
func (s serverStateValue) String() string {
if srvStateNew <= s && s <= srvStateClosed {
return serverStates[s]
}
return "unknown status"
} }
// Config specifies the server's background-task processing behavior. // Config specifies the server's background-task processing behavior.
@@ -55,14 +94,28 @@ type Config struct {
// Maximum number of concurrent processing of tasks. // Maximum number of concurrent processing of tasks.
// //
// If set to a zero or negative value, NewServer will overwrite the value // If set to a zero or negative value, NewServer will overwrite the value
// to the number of CPUs usable by the currennt process. // to the number of CPUs usable by the current process.
Concurrency int Concurrency int
// BaseContext optionally specifies a function that returns the base context for Handler invocations on this server.
//
// If BaseContext is nil, the default is context.Background().
// If this is defined, then it MUST return a non-nil context
BaseContext func() context.Context
// Function to calculate retry delay for a failed task. // Function to calculate retry delay for a failed task.
// //
// By default, it uses exponential backoff algorithm to calculate the delay. // By default, it uses exponential backoff algorithm to calculate the delay.
RetryDelayFunc RetryDelayFunc RetryDelayFunc RetryDelayFunc
// Predicate function to determine whether the error returned from Handler is a failure.
// If the function returns false, Server will not increment the retried counter for the task,
// and Server won't record the queue stats (processed and failed stats) to avoid skewing the error
// rate of the queue.
//
// By default, if the given error is non-nil the function returns true.
IsFailure func(error) bool
// List of queues to process with given priority value. Keys are the names of the // List of queues to process with given priority value. Keys are the names of the
// queues and values are associated priority value. // queues and values are associated priority value.
// //
@@ -134,6 +187,12 @@ type Config struct {
// //
// If unset or zero, the interval is set to 15 seconds. // If unset or zero, the interval is set to 15 seconds.
HealthCheckInterval time.Duration HealthCheckInterval time.Duration
// DelayedTaskCheckInterval specifies the interval between checks run on 'scheduled' and 'retry'
// tasks, and forwarding them to 'pending' state if they are ready to be processed.
//
// If unset or zero, the interval is set to 5 seconds.
DelayedTaskCheckInterval time.Duration
} }
// An ErrorHandler handles an error occured during task processing. // An ErrorHandler handles an error occured during task processing.
@@ -267,6 +326,8 @@ func DefaultRetryDelayFunc(n int, e error, t *Task) time.Duration {
return time.Duration(s) * time.Second return time.Duration(s) * time.Second
} }
func defaultIsFailureFunc(err error) bool { return err != nil }
var defaultQueueConfig = map[string]int{ var defaultQueueConfig = map[string]int{
base.DefaultQueueName: 1, base.DefaultQueueName: 1,
} }
@@ -275,15 +336,21 @@ const (
defaultShutdownTimeout = 8 * time.Second defaultShutdownTimeout = 8 * time.Second
defaultHealthCheckInterval = 15 * time.Second defaultHealthCheckInterval = 15 * time.Second
defaultDelayedTaskCheckInterval = 5 * time.Second
) )
// NewServer returns a new Server given a redis connection option // NewServer returns a new Server given a redis connection option
// and background processing configuration. // and server configuration.
func NewServer(r RedisConnOpt, cfg Config) *Server { func NewServer(r RedisConnOpt, cfg Config) *Server {
c, ok := r.MakeRedisClient().(redis.UniversalClient) c, ok := r.MakeRedisClient().(redis.UniversalClient)
if !ok { if !ok {
panic(fmt.Sprintf("asynq: unsupported RedisConnOpt type %T", r)) panic(fmt.Sprintf("asynq: unsupported RedisConnOpt type %T", r))
} }
baseCtxFn := cfg.BaseContext
if baseCtxFn == nil {
baseCtxFn = context.Background
}
n := cfg.Concurrency n := cfg.Concurrency
if n < 1 { if n < 1 {
n = runtime.NumCPU() n = runtime.NumCPU()
@@ -292,8 +359,15 @@ func NewServer(r RedisConnOpt, cfg Config) *Server {
if delayFunc == nil { if delayFunc == nil {
delayFunc = DefaultRetryDelayFunc delayFunc = DefaultRetryDelayFunc
} }
isFailureFunc := cfg.IsFailure
if isFailureFunc == nil {
isFailureFunc = defaultIsFailureFunc
}
queues := make(map[string]int) queues := make(map[string]int)
for qname, p := range cfg.Queues { for qname, p := range cfg.Queues {
if err := base.ValidateQueueName(qname); err != nil {
continue // ignore invalid queue names
}
if p > 0 { if p > 0 {
queues[qname] = p queues[qname] = p
} }
@@ -324,7 +398,7 @@ func NewServer(r RedisConnOpt, cfg Config) *Server {
starting := make(chan *workerInfo) starting := make(chan *workerInfo)
finished := make(chan *base.TaskMessage) finished := make(chan *base.TaskMessage)
syncCh := make(chan *syncRequest) syncCh := make(chan *syncRequest)
status := base.NewServerStatus(base.StatusIdle) srvState := &serverState{value: srvStateNew}
cancels := base.NewCancelations() cancels := base.NewCancelations()
syncer := newSyncer(syncerParams{ syncer := newSyncer(syncerParams{
@@ -339,15 +413,19 @@ func NewServer(r RedisConnOpt, cfg Config) *Server {
concurrency: n, concurrency: n,
queues: queues, queues: queues,
strictPriority: cfg.StrictPriority, strictPriority: cfg.StrictPriority,
status: status, state: srvState,
starting: starting, starting: starting,
finished: finished, finished: finished,
}) })
delayedTaskCheckInterval := cfg.DelayedTaskCheckInterval
if delayedTaskCheckInterval == 0 {
delayedTaskCheckInterval = defaultDelayedTaskCheckInterval
}
forwarder := newForwarder(forwarderParams{ forwarder := newForwarder(forwarderParams{
logger: logger, logger: logger,
broker: rdb, broker: rdb,
queues: qnames, queues: qnames,
interval: 5 * time.Second, interval: delayedTaskCheckInterval,
}) })
subscriber := newSubscriber(subscriberParams{ subscriber := newSubscriber(subscriberParams{
logger: logger, logger: logger,
@@ -358,6 +436,8 @@ func NewServer(r RedisConnOpt, cfg Config) *Server {
logger: logger, logger: logger,
broker: rdb, broker: rdb,
retryDelayFunc: delayFunc, retryDelayFunc: delayFunc,
baseCtxFn: baseCtxFn,
isFailureFunc: isFailureFunc,
syncCh: syncCh, syncCh: syncCh,
cancelations: cancels, cancelations: cancels,
concurrency: n, concurrency: n,
@@ -372,6 +452,7 @@ func NewServer(r RedisConnOpt, cfg Config) *Server {
logger: logger, logger: logger,
broker: rdb, broker: rdb,
retryDelayFunc: delayFunc, retryDelayFunc: delayFunc,
isFailureFunc: isFailureFunc,
queues: qnames, queues: qnames,
interval: 1 * time.Minute, interval: 1 * time.Minute,
}) })
@@ -381,10 +462,16 @@ func NewServer(r RedisConnOpt, cfg Config) *Server {
interval: healthcheckInterval, interval: healthcheckInterval,
healthcheckFunc: cfg.HealthCheckFunc, healthcheckFunc: cfg.HealthCheckFunc,
}) })
janitor := newJanitor(janitorParams{
logger: logger,
broker: rdb,
queues: qnames,
interval: 8 * time.Second,
})
return &Server{ return &Server{
logger: logger, logger: logger,
broker: rdb, broker: rdb,
status: status, state: srvState,
forwarder: forwarder, forwarder: forwarder,
processor: processor, processor: processor,
syncer: syncer, syncer: syncer,
@@ -392,6 +479,7 @@ func NewServer(r RedisConnOpt, cfg Config) *Server {
subscriber: subscriber, subscriber: subscriber,
recoverer: recoverer, recoverer: recoverer,
healthchecker: healthchecker, healthchecker: healthchecker,
janitor: janitor,
} }
} }
@@ -400,11 +488,13 @@ func NewServer(r RedisConnOpt, cfg Config) *Server {
// ProcessTask should return nil if the processing of a task // ProcessTask should return nil if the processing of a task
// is successful. // is successful.
// //
// If ProcessTask return a non-nil error or panics, the task // If ProcessTask returns a non-nil error or panics, the task
// will be retried after delay. // will be retried after delay if retry-count is remaining,
// One exception to this rule is when ProcessTask returns SkipRetry error. // otherwise the task will be archived.
// If the returned error is SkipRetry or the error wraps SkipRetry, retry is //
// skipped and task will be archived instead. // One exception to this rule is when ProcessTask returns a SkipRetry error.
// If the returned error is SkipRetry or an error wraps SkipRetry, retry is
// skipped and the task will be immediately archived instead.
type Handler interface { type Handler interface {
ProcessTask(context.Context, *Task) error ProcessTask(context.Context, *Task) error
} }
@@ -420,45 +510,42 @@ func (fn HandlerFunc) ProcessTask(ctx context.Context, task *Task) error {
return fn(ctx, task) return fn(ctx, task)
} }
// ErrServerStopped indicates that the operation is now illegal because of the server being stopped. // ErrServerClosed indicates that the operation is now illegal because of the server has been shutdown.
var ErrServerStopped = errors.New("asynq: the server has been stopped") var ErrServerClosed = errors.New("asynq: Server closed")
// Run starts the background-task processing and blocks until // Run starts the task processing and blocks until
// an os signal to exit the program is received. Once it receives // an os signal to exit the program is received. Once it receives
// a signal, it gracefully shuts down all active workers and other // a signal, it gracefully shuts down all active workers and other
// goroutines to process the tasks. // goroutines to process the tasks.
// //
// Run returns any error encountered during server startup time. // Run returns any error encountered at server startup time.
// If the server has already been stopped, ErrServerStopped is returned. // If the server has already been shutdown, ErrServerClosed is returned.
func (srv *Server) Run(handler Handler) error { func (srv *Server) Run(handler Handler) error {
if err := srv.Start(handler); err != nil { if err := srv.Start(handler); err != nil {
return err return err
} }
srv.waitForSignals() srv.waitForSignals()
srv.Stop() srv.Shutdown()
return nil return nil
} }
// Start starts the worker server. Once the server has started, // Start starts the worker server. Once the server has started,
// it pulls tasks off queues and starts a worker goroutine for each task. // it pulls tasks off queues and starts a worker goroutine for each task
// Tasks are processed concurrently by the workers up to the number of // and then call Handler to process it.
// concurrency specified at the initialization time. // Tasks are processed concurrently by the workers up to the number of
// concurrency specified in Config.Concurrency.
// //
// Start returns any error encountered during server startup time. // Start returns any error encountered at server startup time.
// If the server has already been stopped, ErrServerStopped is returned. // If the server has already been shutdown, ErrServerClosed is returned.
func (srv *Server) Start(handler Handler) error { func (srv *Server) Start(handler Handler) error {
if handler == nil { if handler == nil {
return fmt.Errorf("asynq: server cannot run with nil handler") return fmt.Errorf("asynq: server cannot run with nil handler")
} }
switch srv.status.Get() {
case base.StatusRunning:
return fmt.Errorf("asynq: the server is already running")
case base.StatusStopped:
return ErrServerStopped
}
srv.status.Set(base.StatusRunning)
srv.processor.handler = handler srv.processor.handler = handler
if err := srv.start(); err != nil {
return err
}
srv.logger.Info("Starting processing") srv.logger.Info("Starting processing")
srv.heartbeater.start(&srv.wg) srv.heartbeater.start(&srv.wg)
@@ -468,46 +555,76 @@ func (srv *Server) Start(handler Handler) error {
srv.recoverer.start(&srv.wg) srv.recoverer.start(&srv.wg)
srv.forwarder.start(&srv.wg) srv.forwarder.start(&srv.wg)
srv.processor.start(&srv.wg) srv.processor.start(&srv.wg)
srv.janitor.start(&srv.wg)
return nil return nil
} }
// Stop stops the worker server. // Checks server state and returns an error if pre-condition is not met.
// Otherwise it sets the server state to active.
func (srv *Server) start() error {
srv.state.mu.Lock()
defer srv.state.mu.Unlock()
switch srv.state.value {
case srvStateActive:
return fmt.Errorf("asynq: the server is already running")
case srvStateStopped:
return fmt.Errorf("asynq: the server is in the stopped state. Waiting for shutdown.")
case srvStateClosed:
return ErrServerClosed
}
srv.state.value = srvStateActive
return nil
}
// Shutdown gracefully shuts down the server.
// It gracefully closes all active workers. The server will wait for // It gracefully closes all active workers. The server will wait for
// active workers to finish processing tasks for duration specified in Config.ShutdownTimeout. // active workers to finish processing tasks for duration specified in Config.ShutdownTimeout.
// If worker didn't finish processing a task during the timeout, the task will be pushed back to Redis. // If worker didn't finish processing a task during the timeout, the task will be pushed back to Redis.
func (srv *Server) Stop() { func (srv *Server) Shutdown() {
switch srv.status.Get() { srv.state.mu.Lock()
case base.StatusIdle, base.StatusStopped: if srv.state.value == srvStateNew || srv.state.value == srvStateClosed {
srv.state.mu.Unlock()
// server is not running, do nothing and return. // server is not running, do nothing and return.
return return
} }
srv.state.value = srvStateClosed
srv.state.mu.Unlock()
srv.logger.Info("Starting graceful shutdown") srv.logger.Info("Starting graceful shutdown")
// Note: The order of termination is important. // Note: The order of shutdown is important.
// Sender goroutines should be terminated before the receiver goroutines. // Sender goroutines should be terminated before the receiver goroutines.
// processor -> syncer (via syncCh) // processor -> syncer (via syncCh)
// processor -> heartbeater (via starting, finished channels) // processor -> heartbeater (via starting, finished channels)
srv.forwarder.terminate() srv.forwarder.shutdown()
srv.processor.terminate() srv.processor.shutdown()
srv.recoverer.terminate() srv.recoverer.shutdown()
srv.syncer.terminate() srv.syncer.shutdown()
srv.subscriber.terminate() srv.subscriber.shutdown()
srv.healthchecker.terminate() srv.janitor.shutdown()
srv.heartbeater.terminate() srv.healthchecker.shutdown()
srv.heartbeater.shutdown()
srv.wg.Wait() srv.wg.Wait()
srv.broker.Close() srv.broker.Close()
srv.status.Set(base.StatusStopped)
srv.logger.Info("Exiting") srv.logger.Info("Exiting")
} }
// Quiet signals the server to stop pulling new tasks off queues. // Stop signals the server to stop pulling new tasks off queues.
// Quiet should be used before stopping the server. // Stop can be used before shutting down the server to ensure that all
func (srv *Server) Quiet() { // currently active tasks are processed before server shutdown.
//
// Stop does not shutdown the server, make sure to call Shutdown before exit.
func (srv *Server) Stop() {
srv.state.mu.Lock()
if srv.state.value != srvStateActive {
// Invalid calll to Stop, server can only go from Active state to Stopped state.
srv.state.mu.Unlock()
return
}
srv.state.value = srvStateStopped
srv.state.mu.Unlock()
srv.logger.Info("Stopping processor") srv.logger.Info("Stopping processor")
srv.processor.stop() srv.processor.stop()
srv.status.Set(base.StatusQuiet)
srv.logger.Info("Processor stopped") srv.logger.Info("Processor stopped")
} }

View File

@@ -11,6 +11,7 @@ import (
"testing" "testing"
"time" "time"
"github.com/hibiken/asynq/internal/asynqtest"
"github.com/hibiken/asynq/internal/rdb" "github.com/hibiken/asynq/internal/rdb"
"github.com/hibiken/asynq/internal/testbroker" "github.com/hibiken/asynq/internal/testbroker"
"go.uber.org/goleak" "go.uber.org/goleak"
@@ -18,7 +19,7 @@ import (
func TestServer(t *testing.T) { func TestServer(t *testing.T) {
// https://github.com/go-redis/redis/issues/1029 // https://github.com/go-redis/redis/issues/1029
ignoreOpt := goleak.IgnoreTopFunction("github.com/go-redis/redis/v7/internal/pool.(*ConnPool).reaper") ignoreOpt := goleak.IgnoreTopFunction("github.com/go-redis/redis/v8/internal/pool.(*ConnPool).reaper")
defer goleak.VerifyNoLeaks(t, ignoreOpt) defer goleak.VerifyNoLeaks(t, ignoreOpt)
redisConnOpt := getRedisConnOpt(t) redisConnOpt := getRedisConnOpt(t)
@@ -39,22 +40,22 @@ func TestServer(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
_, err = c.Enqueue(NewTask("send_email", map[string]interface{}{"recipient_id": 123})) _, err = c.Enqueue(NewTask("send_email", asynqtest.JSON(map[string]interface{}{"recipient_id": 123})))
if err != nil { if err != nil {
t.Errorf("could not enqueue a task: %v", err) t.Errorf("could not enqueue a task: %v", err)
} }
_, err = c.Enqueue(NewTask("send_email", map[string]interface{}{"recipient_id": 456}), ProcessIn(1*time.Hour)) _, err = c.Enqueue(NewTask("send_email", asynqtest.JSON(map[string]interface{}{"recipient_id": 456})), ProcessIn(1*time.Hour))
if err != nil { if err != nil {
t.Errorf("could not enqueue a task: %v", err) t.Errorf("could not enqueue a task: %v", err)
} }
srv.Stop() srv.Shutdown()
} }
func TestServerRun(t *testing.T) { func TestServerRun(t *testing.T) {
// https://github.com/go-redis/redis/issues/1029 // https://github.com/go-redis/redis/issues/1029
ignoreOpt := goleak.IgnoreTopFunction("github.com/go-redis/redis/v7/internal/pool.(*ConnPool).reaper") ignoreOpt := goleak.IgnoreTopFunction("github.com/go-redis/redis/v8/internal/pool.(*ConnPool).reaper")
defer goleak.VerifyNoLeaks(t, ignoreOpt) defer goleak.VerifyNoLeaks(t, ignoreOpt)
srv := NewServer(RedisClientOpt{Addr: ":6379"}, Config{LogLevel: testLogLevel}) srv := NewServer(RedisClientOpt{Addr: ":6379"}, Config{LogLevel: testLogLevel})
@@ -81,16 +82,16 @@ func TestServerRun(t *testing.T) {
} }
} }
func TestServerErrServerStopped(t *testing.T) { func TestServerErrServerClosed(t *testing.T) {
srv := NewServer(RedisClientOpt{Addr: ":6379"}, Config{LogLevel: testLogLevel}) srv := NewServer(RedisClientOpt{Addr: ":6379"}, Config{LogLevel: testLogLevel})
handler := NewServeMux() handler := NewServeMux()
if err := srv.Start(handler); err != nil { if err := srv.Start(handler); err != nil {
t.Fatal(err) t.Fatal(err)
} }
srv.Stop() srv.Shutdown()
err := srv.Start(handler) err := srv.Start(handler)
if err != ErrServerStopped { if err != ErrServerClosed {
t.Errorf("Restarting server: (*Server).Start(handler) = %v, want ErrServerStopped error", err) t.Errorf("Restarting server: (*Server).Start(handler) = %v, want ErrServerClosed error", err)
} }
} }
@@ -99,7 +100,7 @@ func TestServerErrNilHandler(t *testing.T) {
err := srv.Start(nil) err := srv.Start(nil)
if err == nil { if err == nil {
t.Error("Starting server with nil handler: (*Server).Start(nil) did not return error") t.Error("Starting server with nil handler: (*Server).Start(nil) did not return error")
srv.Stop() srv.Shutdown()
} }
} }
@@ -113,7 +114,7 @@ func TestServerErrServerRunning(t *testing.T) {
if err == nil { if err == nil {
t.Error("Calling (*Server).Start(handler) on already running server did not return error") t.Error("Calling (*Server).Start(handler) on already running server did not return error")
} }
srv.Stop() srv.Shutdown()
} }
func TestServerWithRedisDown(t *testing.T) { func TestServerWithRedisDown(t *testing.T) {
@@ -145,7 +146,7 @@ func TestServerWithRedisDown(t *testing.T) {
time.Sleep(3 * time.Second) time.Sleep(3 * time.Second)
srv.Stop() srv.Shutdown()
} }
func TestServerWithFlakyBroker(t *testing.T) { func TestServerWithFlakyBroker(t *testing.T) {
@@ -169,8 +170,8 @@ func TestServerWithFlakyBroker(t *testing.T) {
h := func(ctx context.Context, task *Task) error { h := func(ctx context.Context, task *Task) error {
// force task retry. // force task retry.
if task.Type == "bad_task" { if task.Type() == "bad_task" {
return fmt.Errorf("could not process %q", task.Type) return fmt.Errorf("could not process %q", task.Type())
} }
time.Sleep(2 * time.Second) time.Sleep(2 * time.Second)
return nil return nil
@@ -206,7 +207,7 @@ func TestServerWithFlakyBroker(t *testing.T) {
time.Sleep(3 * time.Second) time.Sleep(3 * time.Second)
srv.Stop() srv.Shutdown()
} }
func TestLogLevel(t *testing.T) { func TestLogLevel(t *testing.T) {

View File

@@ -22,7 +22,7 @@ func (srv *Server) waitForSignals() {
for { for {
sig := <-sigs sig := <-sigs
if sig == unix.SIGTSTP { if sig == unix.SIGTSTP {
srv.Quiet() srv.Stop()
continue continue
} }
break break

View File

@@ -8,7 +8,7 @@ import (
"sync" "sync"
"time" "time"
"github.com/go-redis/redis/v7" "github.com/go-redis/redis/v8"
"github.com/hibiken/asynq/internal/base" "github.com/hibiken/asynq/internal/base"
"github.com/hibiken/asynq/internal/log" "github.com/hibiken/asynq/internal/log"
) )
@@ -43,7 +43,7 @@ func newSubscriber(params subscriberParams) *subscriber {
} }
} }
func (s *subscriber) terminate() { func (s *subscriber) shutdown() {
s.logger.Debug("Subscriber shutting down...") s.logger.Debug("Subscriber shutting down...")
// Signal the subscriber goroutine to stop. // Signal the subscriber goroutine to stop.
s.done <- struct{}{} s.done <- struct{}{}

View File

@@ -46,7 +46,7 @@ func TestSubscriber(t *testing.T) {
}) })
var wg sync.WaitGroup var wg sync.WaitGroup
subscriber.start(&wg) subscriber.start(&wg)
defer subscriber.terminate() defer subscriber.shutdown()
// wait for subscriber to establish connection to pubsub channel // wait for subscriber to establish connection to pubsub channel
time.Sleep(time.Second) time.Sleep(time.Second)
@@ -91,7 +91,7 @@ func TestSubscriberWithRedisDown(t *testing.T) {
testBroker.Sleep() // simulate a situation where subscriber cannot connect to redis. testBroker.Sleep() // simulate a situation where subscriber cannot connect to redis.
var wg sync.WaitGroup var wg sync.WaitGroup
subscriber.start(&wg) subscriber.start(&wg)
defer subscriber.terminate() defer subscriber.shutdown()
time.Sleep(2 * time.Second) // subscriber should wait and retry connecting to redis. time.Sleep(2 * time.Second) // subscriber should wait and retry connecting to redis.

View File

@@ -46,7 +46,7 @@ func newSyncer(params syncerParams) *syncer {
} }
} }
func (s *syncer) terminate() { func (s *syncer) shutdown() {
s.logger.Debug("Syncer shutting down...") s.logger.Debug("Syncer shutting down...")
// Signal the syncer goroutine to stop. // Signal the syncer goroutine to stop.
s.done <- struct{}{} s.done <- struct{}{}

View File

@@ -5,6 +5,7 @@
package asynq package asynq
import ( import (
"context"
"fmt" "fmt"
"sync" "sync"
"testing" "testing"
@@ -35,13 +36,13 @@ func TestSyncer(t *testing.T) {
}) })
var wg sync.WaitGroup var wg sync.WaitGroup
syncer.start(&wg) syncer.start(&wg)
defer syncer.terminate() defer syncer.shutdown()
for _, msg := range inProgress { for _, msg := range inProgress {
m := msg m := msg
syncRequestCh <- &syncRequest{ syncRequestCh <- &syncRequest{
fn: func() error { fn: func() error {
return rdbClient.Done(m) return rdbClient.Done(context.Background(), m)
}, },
deadline: time.Now().Add(5 * time.Minute), deadline: time.Now().Add(5 * time.Minute),
} }
@@ -66,7 +67,7 @@ func TestSyncerRetry(t *testing.T) {
var wg sync.WaitGroup var wg sync.WaitGroup
syncer.start(&wg) syncer.start(&wg)
defer syncer.terminate() defer syncer.shutdown()
var ( var (
mu sync.Mutex mu sync.Mutex
@@ -131,7 +132,7 @@ func TestSyncerDropsStaleRequests(t *testing.T) {
} }
time.Sleep(2 * interval) // ensure that syncer runs at least once time.Sleep(2 * interval) // ensure that syncer runs at least once
syncer.terminate() syncer.shutdown()
mu.Lock() mu.Lock()
if n != 0 { if n != 0 {

View File

@@ -11,7 +11,7 @@ import (
"sort" "sort"
"time" "time"
"github.com/hibiken/asynq/inspeq" "github.com/hibiken/asynq"
"github.com/spf13/cobra" "github.com/spf13/cobra"
) )
@@ -63,7 +63,7 @@ func cronList(cmd *cobra.Command, args []string) {
cols := []string{"EntryID", "Spec", "Type", "Payload", "Options", "Next", "Prev"} cols := []string{"EntryID", "Spec", "Type", "Payload", "Options", "Next", "Prev"}
printRows := func(w io.Writer, tmpl string) { printRows := func(w io.Writer, tmpl string) {
for _, e := range entries { for _, e := range entries {
fmt.Fprintf(w, tmpl, e.ID, e.Spec, e.Task.Type, e.Task.Payload, e.Opts, fmt.Fprintf(w, tmpl, e.ID, e.Spec, e.Task.Type(), sprintBytes(e.Task.Payload()), e.Opts,
nextEnqueue(e.Next), prevEnqueue(e.Prev)) nextEnqueue(e.Next), prevEnqueue(e.Prev))
} }
} }
@@ -108,7 +108,7 @@ func cronHistory(cmd *cobra.Command, args []string) {
fmt.Printf("Entry: %s\n\n", entryID) fmt.Printf("Entry: %s\n\n", entryID)
events, err := inspector.ListSchedulerEnqueueEvents( events, err := inspector.ListSchedulerEnqueueEvents(
entryID, inspeq.PageSize(pageSize), inspeq.Page(pageNum)) entryID, asynq.PageSize(pageSize), asynq.Page(pageNum))
if err != nil { if err != nil {
fmt.Printf("error: %v\n", err) fmt.Printf("error: %v\n", err)
continue continue

View File

@@ -1,389 +0,0 @@
// Copyright 2020 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
package cmd
import (
"encoding/json"
"fmt"
"os"
"strings"
"time"
"github.com/go-redis/redis/v7"
"github.com/google/uuid"
"github.com/hibiken/asynq/internal/base"
"github.com/spf13/cast"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
var migrateCmd = &cobra.Command{
Use: "migrate",
Short: fmt.Sprintf("Migrate all tasks to be compatible with asynq v%s", base.Version),
Args: cobra.NoArgs,
Run: migrate,
}
func init() {
rootCmd.AddCommand(migrateCmd)
}
func migrate(cmd *cobra.Command, args []string) {
c := redis.NewClient(&redis.Options{
Addr: viper.GetString("uri"),
DB: viper.GetInt("db"),
Password: viper.GetString("password"),
})
r := createRDB()
/*** Migrate from 0.9 to 0.10, 0.11 compatible ***/
lists := []string{"asynq:in_progress"}
allQueues, err := c.SMembers(base.AllQueues).Result()
if err != nil {
printError(fmt.Errorf("could not read all queues: %v", err))
os.Exit(1)
}
lists = append(lists, allQueues...)
for _, key := range lists {
if err := migrateList(c, key); err != nil {
printError(err)
os.Exit(1)
}
}
zsets := []string{"asynq:scheduled", "asynq:retry", "asynq:dead"}
for _, key := range zsets {
if err := migrateZSet(c, key); err != nil {
printError(err)
os.Exit(1)
}
}
/*** Migrate from 0.11 to 0.12 compatible ***/
if err := createBackup(c, base.AllQueues); err != nil {
printError(err)
os.Exit(1)
}
for _, qkey := range allQueues {
qname := strings.TrimPrefix(qkey, "asynq:queues:")
if err := c.SAdd(base.AllQueues, qname).Err(); err != nil {
err = fmt.Errorf("could not add queue name %q to %q set: %v\n",
qname, base.AllQueues, err)
printError(err)
os.Exit(1)
}
}
if err := deleteBackup(c, base.AllQueues); err != nil {
printError(err)
os.Exit(1)
}
for _, qkey := range allQueues {
qname := strings.TrimPrefix(qkey, "asynq:queues:")
if exists := c.Exists(qkey).Val(); exists == 1 {
if err := c.Rename(qkey, base.QueueKey(qname)).Err(); err != nil {
printError(fmt.Errorf("could not rename key %q: %v\n", qkey, err))
os.Exit(1)
}
}
}
if err := partitionZSetMembersByQueue(c, "asynq:scheduled", base.ScheduledKey); err != nil {
printError(err)
os.Exit(1)
}
if err := partitionZSetMembersByQueue(c, "asynq:retry", base.RetryKey); err != nil {
printError(err)
os.Exit(1)
}
// Note: base.DeadKey function was renamed in v0.14. We define the legacy function here since we need it for this migration script.
deadKeyFunc := func(qname string) string { return fmt.Sprintf("asynq:{%s}:dead", qname) }
if err := partitionZSetMembersByQueue(c, "asynq:dead", deadKeyFunc); err != nil {
printError(err)
os.Exit(1)
}
if err := partitionZSetMembersByQueue(c, "asynq:deadlines", base.DeadlinesKey); err != nil {
printError(err)
os.Exit(1)
}
if err := partitionListMembersByQueue(c, "asynq:in_progress", base.ActiveKey); err != nil {
printError(err)
os.Exit(1)
}
paused, err := c.SMembers("asynq:paused").Result()
if err != nil {
printError(fmt.Errorf("command SMEMBERS asynq:paused failed: %v", err))
os.Exit(1)
}
for _, qkey := range paused {
qname := strings.TrimPrefix(qkey, "asynq:queues:")
if err := r.Pause(qname); err != nil {
printError(err)
os.Exit(1)
}
}
if err := deleteKey(c, "asynq:paused"); err != nil {
printError(err)
os.Exit(1)
}
if err := deleteKey(c, "asynq:servers"); err != nil {
printError(err)
os.Exit(1)
}
if err := deleteKey(c, "asynq:workers"); err != nil {
printError(err)
os.Exit(1)
}
/*** Migrate from 0.13 to 0.14 compatible ***/
// Move all dead tasks to archived ZSET.
for _, qname := range allQueues {
zs, err := c.ZRangeWithScores(deadKeyFunc(qname), 0, -1).Result()
if err != nil {
printError(err)
os.Exit(1)
}
for _, z := range zs {
if err := c.ZAdd(base.ArchivedKey(qname), &z).Err(); err != nil {
printError(err)
os.Exit(1)
}
}
if err := deleteKey(c, deadKeyFunc(qname)); err != nil {
printError(err)
os.Exit(1)
}
}
}
func backupKey(key string) string {
return fmt.Sprintf("%s:backup", key)
}
func createBackup(c *redis.Client, key string) error {
err := c.Rename(key, backupKey(key)).Err()
if err != nil {
return fmt.Errorf("could not rename key %q: %v", key, err)
}
return nil
}
func deleteBackup(c *redis.Client, key string) error {
return deleteKey(c, backupKey(key))
}
func deleteKey(c *redis.Client, key string) error {
exists := c.Exists(key).Val()
if exists == 0 {
// key does not exist
return nil
}
err := c.Del(key).Err()
if err != nil {
return fmt.Errorf("could not delete key %q: %v", key, err)
}
return nil
}
func printError(err error) {
fmt.Println(err)
fmt.Println()
fmt.Println("Migrate command error")
fmt.Println("Please file an issue on Github at https://github.com/hibiken/asynq/issues/new/choose")
}
func partitionZSetMembersByQueue(c *redis.Client, key string, newKeyFunc func(string) string) error {
zs, err := c.ZRangeWithScores(key, 0, -1).Result()
if err != nil {
return fmt.Errorf("command ZRANGE %s 0 -1 WITHSCORES failed: %v", key, err)
}
for _, z := range zs {
s := cast.ToString(z.Member)
msg, err := base.DecodeMessage(s)
if err != nil {
return fmt.Errorf("could not decode message from %q: %v", key, err)
}
if err := c.ZAdd(newKeyFunc(msg.Queue), &z).Err(); err != nil {
return fmt.Errorf("could not add %v to %q: %v", z, newKeyFunc(msg.Queue))
}
}
if err := deleteKey(c, key); err != nil {
return err
}
return nil
}
func partitionListMembersByQueue(c *redis.Client, key string, newKeyFunc func(string) string) error {
data, err := c.LRange(key, 0, -1).Result()
if err != nil {
return fmt.Errorf("command LRANGE %s 0 -1 failed: %v", key, err)
}
for _, s := range data {
msg, err := base.DecodeMessage(s)
if err != nil {
return fmt.Errorf("could not decode message from %q: %v", key, err)
}
if err := c.LPush(newKeyFunc(msg.Queue), s).Err(); err != nil {
return fmt.Errorf("could not add %v to %q: %v", s, newKeyFunc(msg.Queue))
}
}
if err := deleteKey(c, key); err != nil {
return err
}
return nil
}
type oldTaskMessage struct {
// Unchanged
Type string
Payload map[string]interface{}
ID uuid.UUID
Queue string
Retry int
Retried int
ErrorMsg string
UniqueKey string
// Following fields have changed.
// Deadline specifies the deadline for the task.
// Task won't be processed if it exceeded its deadline.
// The string shoulbe be in RFC3339 format.
//
// time.Time's zero value means no deadline.
Timeout string
// Deadline specifies the deadline for the task.
// Task won't be processed if it exceeded its deadline.
// The string shoulbe be in RFC3339 format.
//
// time.Time's zero value means no deadline.
Deadline string
}
var defaultTimeout = 30 * time.Minute
func convertMessage(old *oldTaskMessage) (*base.TaskMessage, error) {
timeout, err := time.ParseDuration(old.Timeout)
if err != nil {
return nil, fmt.Errorf("could not parse Timeout field of %+v", old)
}
deadline, err := time.Parse(time.RFC3339, old.Deadline)
if err != nil {
return nil, fmt.Errorf("could not parse Deadline field of %+v", old)
}
if timeout == 0 && deadline.IsZero() {
timeout = defaultTimeout
}
if deadline.IsZero() {
// Zero value used to be time.Time{},
// in the new schema zero value is represented by
// zero in Unix time.
deadline = time.Unix(0, 0)
}
return &base.TaskMessage{
Type: old.Type,
Payload: old.Payload,
ID: uuid.New(),
Queue: old.Queue,
Retry: old.Retry,
Retried: old.Retried,
ErrorMsg: old.ErrorMsg,
UniqueKey: old.UniqueKey,
Timeout: int64(timeout.Seconds()),
Deadline: deadline.Unix(),
}, nil
}
func deserialize(s string) (*base.TaskMessage, error) {
// Try deserializing as old message.
d := json.NewDecoder(strings.NewReader(s))
d.UseNumber()
var old *oldTaskMessage
if err := d.Decode(&old); err != nil {
// Try deserializing as new message.
d = json.NewDecoder(strings.NewReader(s))
d.UseNumber()
var msg *base.TaskMessage
if err := d.Decode(&msg); err != nil {
return nil, fmt.Errorf("could not deserialize %s into task message: %v", s, err)
}
return msg, nil
}
return convertMessage(old)
}
func migrateZSet(c *redis.Client, key string) error {
if c.Exists(key).Val() == 0 {
// skip if key doesn't exist.
return nil
}
res, err := c.ZRangeWithScores(key, 0, -1).Result()
if err != nil {
return err
}
var msgs []*redis.Z
for _, z := range res {
s, err := cast.ToStringE(z.Member)
if err != nil {
return fmt.Errorf("could not cast to string: %v", err)
}
msg, err := deserialize(s)
if err != nil {
return err
}
encoded, err := base.EncodeMessage(msg)
if err != nil {
return fmt.Errorf("could not encode message from %q: %v", key, err)
}
msgs = append(msgs, &redis.Z{Score: z.Score, Member: encoded})
}
if err := c.Rename(key, key+":backup").Err(); err != nil {
return fmt.Errorf("could not rename key %q: %v", key, err)
}
if err := c.ZAdd(key, msgs...).Err(); err != nil {
return fmt.Errorf("could not write new messages to %q: %v", key, err)
}
if err := c.Del(key + ":backup").Err(); err != nil {
return fmt.Errorf("could not delete back up key %q: %v", key+":backup", err)
}
return nil
}
func migrateList(c *redis.Client, key string) error {
if c.Exists(key).Val() == 0 {
// skip if key doesn't exist.
return nil
}
res, err := c.LRange(key, 0, -1).Result()
if err != nil {
return err
}
var msgs []interface{}
for _, s := range res {
msg, err := deserialize(s)
if err != nil {
return err
}
encoded, err := base.EncodeMessage(msg)
if err != nil {
return fmt.Errorf("could not encode message from %q: %v", key, err)
}
msgs = append(msgs, encoded)
}
if err := c.Rename(key, key+":backup").Err(); err != nil {
return fmt.Errorf("could not rename key %q: %v", key, err)
}
if err := c.LPush(key, msgs...).Err(); err != nil {
return fmt.Errorf("could not write new messages to %q: %v", key, err)
}
if err := c.Del(key + ":backup").Err(); err != nil {
return fmt.Errorf("could not delete back up key %q: %v", key+":backup", err)
}
return nil
}

View File

@@ -10,8 +10,8 @@ import (
"os" "os"
"github.com/fatih/color" "github.com/fatih/color"
"github.com/hibiken/asynq/inspeq" "github.com/hibiken/asynq"
"github.com/hibiken/asynq/internal/rdb" "github.com/hibiken/asynq/internal/errors"
"github.com/spf13/cobra" "github.com/spf13/cobra"
) )
@@ -82,7 +82,7 @@ func queueList(cmd *cobra.Command, args []string) {
type queueInfo struct { type queueInfo struct {
name string name string
keyslot int64 keyslot int64
nodes []inspeq.ClusterNode nodes []*asynq.ClusterNode
} }
inspector := createInspector() inspector := createInspector()
queues, err := inspector.Queues() queues, err := inspector.Queues()
@@ -90,7 +90,7 @@ func queueList(cmd *cobra.Command, args []string) {
fmt.Printf("error: Could not fetch list of queues: %v\n", err) fmt.Printf("error: Could not fetch list of queues: %v\n", err)
os.Exit(1) os.Exit(1)
} }
var qs []queueInfo var qs []*queueInfo
for _, qname := range queues { for _, qname := range queues {
q := queueInfo{name: qname} q := queueInfo{name: qname}
if useRedisCluster { if useRedisCluster {
@@ -107,7 +107,7 @@ func queueList(cmd *cobra.Command, args []string) {
} }
q.nodes = nodes q.nodes = nodes
} }
qs = append(qs, q) qs = append(qs, &q)
} }
if useRedisCluster { if useRedisCluster {
printTable( printTable(
@@ -129,43 +129,42 @@ func queueInspect(cmd *cobra.Command, args []string) {
inspector := createInspector() inspector := createInspector()
for i, qname := range args { for i, qname := range args {
if i > 0 { if i > 0 {
fmt.Printf("\n%s\n", separator) fmt.Printf("\n%s\n\n", separator)
} }
fmt.Println() info, err := inspector.GetQueueInfo(qname)
stats, err := inspector.CurrentStats(qname)
if err != nil { if err != nil {
fmt.Printf("error: %v\n", err) fmt.Printf("error: %v\n", err)
continue continue
} }
printQueueStats(stats) printQueueInfo(info)
} }
} }
func printQueueStats(s *inspeq.QueueStats) { func printQueueInfo(info *asynq.QueueInfo) {
bold := color.New(color.Bold) bold := color.New(color.Bold)
bold.Println("Queue Info") bold.Println("Queue Info")
fmt.Printf("Name: %s\n", s.Queue) fmt.Printf("Name: %s\n", info.Queue)
fmt.Printf("Size: %d\n", s.Size) fmt.Printf("Size: %d\n", info.Size)
fmt.Printf("Paused: %t\n\n", s.Paused) fmt.Printf("Paused: %t\n\n", info.Paused)
bold.Println("Task Count by State") bold.Println("Task Count by State")
printTable( printTable(
[]string{"active", "pending", "scheduled", "retry", "archived"}, []string{"active", "pending", "scheduled", "retry", "archived", "completed"},
func(w io.Writer, tmpl string) { func(w io.Writer, tmpl string) {
fmt.Fprintf(w, tmpl, s.Active, s.Pending, s.Scheduled, s.Retry, s.Archived) fmt.Fprintf(w, tmpl, info.Active, info.Pending, info.Scheduled, info.Retry, info.Archived, info.Completed)
}, },
) )
fmt.Println() fmt.Println()
bold.Printf("Daily Stats %s UTC\n", s.Timestamp.UTC().Format("2006-01-02")) bold.Printf("Daily Stats %s UTC\n", info.Timestamp.UTC().Format("2006-01-02"))
printTable( printTable(
[]string{"processed", "failed", "error rate"}, []string{"processed", "failed", "error rate"},
func(w io.Writer, tmpl string) { func(w io.Writer, tmpl string) {
var errRate string var errRate string
if s.Processed == 0 { if info.Processed == 0 {
errRate = "N/A" errRate = "N/A"
} else { } else {
errRate = fmt.Sprintf("%.2f%%", float64(s.Failed)/float64(s.Processed)*100) errRate = fmt.Sprintf("%.2f%%", float64(info.Failed)/float64(info.Processed)*100)
} }
fmt.Fprintf(w, tmpl, s.Processed, s.Failed, errRate) fmt.Fprintf(w, tmpl, info.Processed, info.Failed, errRate)
}, },
) )
} }
@@ -179,9 +178,9 @@ func queueHistory(cmd *cobra.Command, args []string) {
inspector := createInspector() inspector := createInspector()
for i, qname := range args { for i, qname := range args {
if i > 0 { if i > 0 {
fmt.Printf("\n%s\n", separator) fmt.Printf("\n%s\n\n", separator)
} }
fmt.Printf("\nQueue: %s\n\n", qname) fmt.Printf("Queue: %s\n\n", qname)
stats, err := inspector.History(qname, days) stats, err := inspector.History(qname, days)
if err != nil { if err != nil {
fmt.Printf("error: %v\n", err) fmt.Printf("error: %v\n", err)
@@ -191,7 +190,7 @@ func queueHistory(cmd *cobra.Command, args []string) {
} }
} }
func printDailyStats(stats []*inspeq.DailyStats) { func printDailyStats(stats []*asynq.DailyStats) {
printTable( printTable(
[]string{"date (UTC)", "processed", "failed", "error rate"}, []string{"date (UTC)", "processed", "failed", "error rate"},
func(w io.Writer, tmpl string) { func(w io.Writer, tmpl string) {
@@ -244,7 +243,7 @@ func queueRemove(cmd *cobra.Command, args []string) {
for _, qname := range args { for _, qname := range args {
err = r.RemoveQueue(qname, force) err = r.RemoveQueue(qname, force)
if err != nil { if err != nil {
if _, ok := err.(*rdb.ErrQueueNotEmpty); ok { if errors.IsQueueNotEmpty(err) {
fmt.Printf("error: %v\nIf you are sure you want to delete it, run 'asynq queue rm --force %s'\n", err, qname) fmt.Printf("error: %v\nIf you are sure you want to delete it, run 'asynq queue rm --force %s'\n", err, qname)
continue continue
} }

View File

@@ -11,10 +11,11 @@ import (
"os" "os"
"strings" "strings"
"text/tabwriter" "text/tabwriter"
"unicode"
"unicode/utf8"
"github.com/go-redis/redis/v7" "github.com/go-redis/redis/v8"
"github.com/hibiken/asynq" "github.com/hibiken/asynq"
"github.com/hibiken/asynq/inspeq"
"github.com/hibiken/asynq/internal/base" "github.com/hibiken/asynq/internal/base"
"github.com/hibiken/asynq/internal/rdb" "github.com/hibiken/asynq/internal/rdb"
"github.com/spf13/cobra" "github.com/spf13/cobra"
@@ -136,24 +137,25 @@ func createRDB() *rdb.RDB {
} }
// createRDB creates a Inspector instance using flag values and returns it. // createRDB creates a Inspector instance using flag values and returns it.
func createInspector() *inspeq.Inspector { func createInspector() *asynq.Inspector {
var connOpt asynq.RedisConnOpt return asynq.NewInspector(getRedisConnOpt())
}
func getRedisConnOpt() asynq.RedisConnOpt {
if useRedisCluster { if useRedisCluster {
addrs := strings.Split(viper.GetString("cluster_addrs"), ",") addrs := strings.Split(viper.GetString("cluster_addrs"), ",")
connOpt = asynq.RedisClusterClientOpt{ return asynq.RedisClusterClientOpt{
Addrs: addrs, Addrs: addrs,
Password: viper.GetString("password"), Password: viper.GetString("password"),
TLSConfig: getTLSConfig(), TLSConfig: getTLSConfig(),
} }
} else {
connOpt = asynq.RedisClientOpt{
Addr: viper.GetString("uri"),
DB: viper.GetInt("db"),
Password: viper.GetString("password"),
TLSConfig: getTLSConfig(),
}
} }
return inspeq.New(connOpt) return asynq.RedisClientOpt{
Addr: viper.GetString("uri"),
DB: viper.GetInt("db"),
Password: viper.GetString("password"),
TLSConfig: getTLSConfig(),
}
} }
func getTLSConfig() *tls.Config { func getTLSConfig() *tls.Config {
@@ -196,3 +198,28 @@ func printTable(cols []string, printRows func(w io.Writer, tmpl string)) {
printRows(tw, format) printRows(tw, format)
tw.Flush() tw.Flush()
} }
// sprintBytes returns a string representation of the given byte slice if data is printable.
// If data is not printable, it returns a string describing it is not printable.
func sprintBytes(payload []byte) string {
if !isPrintable(payload) {
return "non-printable bytes"
}
return string(payload)
}
func isPrintable(data []byte) bool {
if !utf8.Valid(data) {
return false
}
isAllSpace := true
for _, r := range string(data) {
if !unicode.IsPrint(r) {
return false
}
if !unicode.IsSpace(r) {
isAllSpace = false
}
}
return !isAllSpace
}

View File

@@ -35,11 +35,11 @@ The command shows the following for each server:
* Host and PID of the process in which the server is running * Host and PID of the process in which the server is running
* Number of active workers out of worker pool * Number of active workers out of worker pool
* Queue configuration * Queue configuration
* State of the worker server ("running" | "quiet") * State of the worker server ("active" | "stopped")
* Time the server was started * Time the server was started
A "running" server is pulling tasks from queues and processing them. A "active" server is pulling tasks from queues and processing them.
A "quiet" server is no longer pulling new tasks from queues`, A "stopped" server is no longer pulling new tasks from queues`,
Run: serverList, Run: serverList,
} }

View File

@@ -5,13 +5,16 @@
package cmd package cmd
import ( import (
"encoding/json"
"fmt" "fmt"
"io" "io"
"math"
"os" "os"
"strconv" "strconv"
"strings" "strings"
"text/tabwriter" "text/tabwriter"
"time" "time"
"unicode/utf8"
"github.com/fatih/color" "github.com/fatih/color"
"github.com/hibiken/asynq/internal/rdb" "github.com/hibiken/asynq/internal/rdb"
@@ -22,7 +25,7 @@ import (
var statsCmd = &cobra.Command{ var statsCmd = &cobra.Command{
Use: "stats", Use: "stats",
Short: "Shows current state of the tasks and queues", Short: "Shows current state of the tasks and queues",
Long: `Stats (aysnqmon stats) will show the overview of tasks and queues at that instant. Long: `Stats (aysnq stats) will show the overview of tasks and queues at that instant.
Specifically, the command shows the following: Specifically, the command shows the following:
* Number of tasks in each state * Number of tasks in each state
@@ -38,8 +41,11 @@ Example: watch -n 3 asynq stats -> Shows current state of tasks every three seco
Run: stats, Run: stats,
} }
var jsonFlag bool
func init() { func init() {
rootCmd.AddCommand(statsCmd) rootCmd.AddCommand(statsCmd)
statsCmd.Flags().BoolVar(&jsonFlag, "json", false, "Output stats in JSON format.")
// Here you will define your flags and configuration settings. // Here you will define your flags and configuration settings.
@@ -53,14 +59,21 @@ func init() {
} }
type AggregateStats struct { type AggregateStats struct {
Active int Active int `json:"active"`
Pending int Pending int `json:"pending"`
Scheduled int Scheduled int `json:"scheduled"`
Retry int Retry int `json:"retry"`
Archived int Archived int `json:"archived"`
Processed int Completed int `json:"completed"`
Failed int Processed int `json:"processed"`
Timestamp time.Time Failed int `json:"failed"`
Timestamp time.Time `json:"timestamp"`
}
type FullStats struct {
Aggregate AggregateStats `json:"aggregate"`
QueueStats []*rdb.Stats `json:"queues"`
RedisInfo map[string]string `json:"redis"`
} }
func stats(cmd *cobra.Command, args []string) { func stats(cmd *cobra.Command, args []string) {
@@ -85,6 +98,7 @@ func stats(cmd *cobra.Command, args []string) {
aggStats.Scheduled += s.Scheduled aggStats.Scheduled += s.Scheduled
aggStats.Retry += s.Retry aggStats.Retry += s.Retry
aggStats.Archived += s.Archived aggStats.Archived += s.Archived
aggStats.Completed += s.Completed
aggStats.Processed += s.Processed aggStats.Processed += s.Processed
aggStats.Failed += s.Failed aggStats.Failed += s.Failed
aggStats.Timestamp = s.Timestamp aggStats.Timestamp = s.Timestamp
@@ -100,6 +114,23 @@ func stats(cmd *cobra.Command, args []string) {
fmt.Println(err) fmt.Println(err)
os.Exit(1) os.Exit(1)
} }
if jsonFlag {
statsJSON, err := json.Marshal(FullStats{
Aggregate: aggStats,
QueueStats: stats,
RedisInfo: info,
})
if err != nil {
fmt.Println(err)
os.Exit(1)
}
fmt.Println(string(statsJSON))
return
}
bold := color.New(color.Bold) bold := color.New(color.Bold)
bold.Println("Task Count by State") bold.Println("Task Count by State")
printStatsByState(&aggStats) printStatsByState(&aggStats)
@@ -124,22 +155,50 @@ func stats(cmd *cobra.Command, args []string) {
} }
func printStatsByState(s *AggregateStats) { func printStatsByState(s *AggregateStats) {
format := strings.Repeat("%v\t", 5) + "\n" format := strings.Repeat("%v\t", 6) + "\n"
tw := new(tabwriter.Writer).Init(os.Stdout, 0, 8, 2, ' ', 0) tw := new(tabwriter.Writer).Init(os.Stdout, 0, 8, 2, ' ', 0)
fmt.Fprintf(tw, format, "active", "pending", "scheduled", "retry", "archived") fmt.Fprintf(tw, format, "active", "pending", "scheduled", "retry", "archived", "completed")
fmt.Fprintf(tw, format, "----------", "--------", "---------", "-----", "----") width := maxInt(9 /* defaultWidth */, maxWidthOf(s.Active, s.Pending, s.Scheduled, s.Retry, s.Archived, s.Completed)) // length of widest column
fmt.Fprintf(tw, format, s.Active, s.Pending, s.Scheduled, s.Retry, s.Archived) sep := strings.Repeat("-", width)
fmt.Fprintf(tw, format, sep, sep, sep, sep, sep, sep)
fmt.Fprintf(tw, format, s.Active, s.Pending, s.Scheduled, s.Retry, s.Archived, s.Completed)
tw.Flush() tw.Flush()
} }
// numDigits returns the number of digits in n.
func numDigits(n int) int {
return len(strconv.Itoa(n))
}
// maxWidthOf returns the max number of digits amount the provided vals.
func maxWidthOf(vals ...int) int {
max := 0
for _, v := range vals {
if vw := numDigits(v); vw > max {
max = vw
}
}
return max
}
func maxInt(a, b int) int {
return int(math.Max(float64(a), float64(b)))
}
func printStatsByQueue(stats []*rdb.Stats) { func printStatsByQueue(stats []*rdb.Stats) {
var headers, seps, counts []string var headers, seps, counts []string
maxHeaderWidth := 0
for _, s := range stats { for _, s := range stats {
title := queueTitle(s) title := queueTitle(s)
headers = append(headers, title) headers = append(headers, title)
seps = append(seps, strings.Repeat("-", len(title))) if w := utf8.RuneCountInString(title); w > maxHeaderWidth {
maxHeaderWidth = w
}
counts = append(counts, strconv.Itoa(s.Size)) counts = append(counts, strconv.Itoa(s.Size))
} }
for i := 0; i < len(headers); i++ {
seps = append(seps, strings.Repeat("-", maxHeaderWidth))
}
format := strings.Repeat("%v\t", len(headers)) + "\n" format := strings.Repeat("%v\t", len(headers)) + "\n"
tw := new(tabwriter.Writer).Init(os.Stdout, 0, 8, 2, ' ', 0) tw := new(tabwriter.Writer).Init(os.Stdout, 0, 8, 2, ' ', 0)
fmt.Fprintf(tw, format, toInterfaceSlice(headers)...) fmt.Fprintf(tw, format, toInterfaceSlice(headers)...)

View File

@@ -10,7 +10,8 @@ import (
"os" "os"
"time" "time"
"github.com/hibiken/asynq/inspeq" "github.com/fatih/color"
"github.com/hibiken/asynq"
"github.com/spf13/cobra" "github.com/spf13/cobra"
) )
@@ -26,23 +27,29 @@ func init() {
taskCmd.AddCommand(taskCancelCmd) taskCmd.AddCommand(taskCancelCmd)
taskCmd.AddCommand(taskInspectCmd)
taskInspectCmd.Flags().StringP("queue", "q", "", "queue to which the task belongs")
taskInspectCmd.Flags().StringP("id", "i", "", "id of the task")
taskInspectCmd.MarkFlagRequired("queue")
taskInspectCmd.MarkFlagRequired("id")
taskCmd.AddCommand(taskArchiveCmd) taskCmd.AddCommand(taskArchiveCmd)
taskArchiveCmd.Flags().StringP("queue", "q", "", "queue to which the task belongs") taskArchiveCmd.Flags().StringP("queue", "q", "", "queue to which the task belongs")
taskArchiveCmd.Flags().StringP("key", "k", "", "key of the task") taskArchiveCmd.Flags().StringP("id", "i", "", "id of the task")
taskArchiveCmd.MarkFlagRequired("queue") taskArchiveCmd.MarkFlagRequired("queue")
taskArchiveCmd.MarkFlagRequired("key") taskArchiveCmd.MarkFlagRequired("id")
taskCmd.AddCommand(taskDeleteCmd) taskCmd.AddCommand(taskDeleteCmd)
taskDeleteCmd.Flags().StringP("queue", "q", "", "queue to which the task belongs") taskDeleteCmd.Flags().StringP("queue", "q", "", "queue to which the task belongs")
taskDeleteCmd.Flags().StringP("key", "k", "", "key of the task") taskDeleteCmd.Flags().StringP("id", "i", "", "id of the task")
taskDeleteCmd.MarkFlagRequired("queue") taskDeleteCmd.MarkFlagRequired("queue")
taskDeleteCmd.MarkFlagRequired("key") taskDeleteCmd.MarkFlagRequired("id")
taskCmd.AddCommand(taskRunCmd) taskCmd.AddCommand(taskRunCmd)
taskRunCmd.Flags().StringP("queue", "q", "", "queue to which the task belongs") taskRunCmd.Flags().StringP("queue", "q", "", "queue to which the task belongs")
taskRunCmd.Flags().StringP("key", "k", "", "key of the task") taskRunCmd.Flags().StringP("id", "i", "", "id of the task")
taskRunCmd.MarkFlagRequired("queue") taskRunCmd.MarkFlagRequired("queue")
taskRunCmd.MarkFlagRequired("key") taskRunCmd.MarkFlagRequired("id")
taskCmd.AddCommand(taskArchiveAllCmd) taskCmd.AddCommand(taskArchiveAllCmd)
taskArchiveAllCmd.Flags().StringP("queue", "q", "", "queue to which the tasks belong") taskArchiveAllCmd.Flags().StringP("queue", "q", "", "queue to which the tasks belong")
@@ -79,6 +86,7 @@ The value for the state flag should be one of:
- scheduled - scheduled
- retry - retry
- archived - archived
- completed
List opeartion paginates the result set. List opeartion paginates the result set.
By default, the command fetches the first 30 tasks. By default, the command fetches the first 30 tasks.
@@ -93,6 +101,13 @@ To list the tasks from the second page, run
Run: taskList, Run: taskList,
} }
var taskInspectCmd = &cobra.Command{
Use: "inspect --queue=QUEUE --id=TASK_ID",
Short: "Display detailed information on the specified task",
Args: cobra.NoArgs,
Run: taskInspect,
}
var taskCancelCmd = &cobra.Command{ var taskCancelCmd = &cobra.Command{
Use: "cancel TASK_ID [TASK_ID...]", Use: "cancel TASK_ID [TASK_ID...]",
Short: "Cancel one or more active tasks", Short: "Cancel one or more active tasks",
@@ -101,42 +116,42 @@ var taskCancelCmd = &cobra.Command{
} }
var taskArchiveCmd = &cobra.Command{ var taskArchiveCmd = &cobra.Command{
Use: "archive --queue=QUEUE --key=KEY", Use: "archive --queue=QUEUE --id=TASK_ID",
Short: "Archive a task with the given key", Short: "Archive a task with the given id",
Args: cobra.NoArgs, Args: cobra.NoArgs,
Run: taskArchive, Run: taskArchive,
} }
var taskDeleteCmd = &cobra.Command{ var taskDeleteCmd = &cobra.Command{
Use: "delete --queue=QUEUE --key=KEY", Use: "delete --queue=QUEUE --id=TASK_ID",
Short: "Delete a task with the given key", Short: "Delete a task with the given id",
Args: cobra.NoArgs, Args: cobra.NoArgs,
Run: taskDelete, Run: taskDelete,
} }
var taskRunCmd = &cobra.Command{ var taskRunCmd = &cobra.Command{
Use: "run --queue=QUEUE --key=KEY", Use: "run --queue=QUEUE --id=TASK_ID",
Short: "Run a task with the given key", Short: "Run a task with the given id",
Args: cobra.NoArgs, Args: cobra.NoArgs,
Run: taskRun, Run: taskRun,
} }
var taskArchiveAllCmd = &cobra.Command{ var taskArchiveAllCmd = &cobra.Command{
Use: "archive-all --queue=QUEUE --state=STATE", Use: "archiveall --queue=QUEUE --state=STATE",
Short: "Archive all tasks in the given state", Short: "Archive all tasks in the given state",
Args: cobra.NoArgs, Args: cobra.NoArgs,
Run: taskArchiveAll, Run: taskArchiveAll,
} }
var taskDeleteAllCmd = &cobra.Command{ var taskDeleteAllCmd = &cobra.Command{
Use: "delete-all --queue=QUEUE --key=KEY", Use: "deleteall --queue=QUEUE --state=STATE",
Short: "Delete all tasks in the given state", Short: "Delete all tasks in the given state",
Args: cobra.NoArgs, Args: cobra.NoArgs,
Run: taskDeleteAll, Run: taskDeleteAll,
} }
var taskRunAllCmd = &cobra.Command{ var taskRunAllCmd = &cobra.Command{
Use: "run-all --queue=QUEUE --key=KEY", Use: "runall --queue=QUEUE --state=STATE",
Short: "Run all tasks in the given state", Short: "Run all tasks in the given state",
Args: cobra.NoArgs, Args: cobra.NoArgs,
Run: taskRunAll, Run: taskRunAll,
@@ -175,6 +190,8 @@ func taskList(cmd *cobra.Command, args []string) {
listRetryTasks(qname, pageNum, pageSize) listRetryTasks(qname, pageNum, pageSize)
case "archived": case "archived":
listArchivedTasks(qname, pageNum, pageSize) listArchivedTasks(qname, pageNum, pageSize)
case "completed":
listCompletedTasks(qname, pageNum, pageSize)
default: default:
fmt.Printf("error: state=%q is not supported\n", state) fmt.Printf("error: state=%q is not supported\n", state)
os.Exit(1) os.Exit(1)
@@ -183,7 +200,7 @@ func taskList(cmd *cobra.Command, args []string) {
func listActiveTasks(qname string, pageNum, pageSize int) { func listActiveTasks(qname string, pageNum, pageSize int) {
i := createInspector() i := createInspector()
tasks, err := i.ListActiveTasks(qname, inspeq.PageSize(pageSize), inspeq.Page(pageNum)) tasks, err := i.ListActiveTasks(qname, asynq.PageSize(pageSize), asynq.Page(pageNum))
if err != nil { if err != nil {
fmt.Println(err) fmt.Println(err)
os.Exit(1) os.Exit(1)
@@ -196,7 +213,7 @@ func listActiveTasks(qname string, pageNum, pageSize int) {
[]string{"ID", "Type", "Payload"}, []string{"ID", "Type", "Payload"},
func(w io.Writer, tmpl string) { func(w io.Writer, tmpl string) {
for _, t := range tasks { for _, t := range tasks {
fmt.Fprintf(w, tmpl, t.ID, t.Type, t.Payload) fmt.Fprintf(w, tmpl, t.ID, t.Type, sprintBytes(t.Payload))
} }
}, },
) )
@@ -204,7 +221,7 @@ func listActiveTasks(qname string, pageNum, pageSize int) {
func listPendingTasks(qname string, pageNum, pageSize int) { func listPendingTasks(qname string, pageNum, pageSize int) {
i := createInspector() i := createInspector()
tasks, err := i.ListPendingTasks(qname, inspeq.PageSize(pageSize), inspeq.Page(pageNum)) tasks, err := i.ListPendingTasks(qname, asynq.PageSize(pageSize), asynq.Page(pageNum))
if err != nil { if err != nil {
fmt.Println(err) fmt.Println(err)
os.Exit(1) os.Exit(1)
@@ -214,10 +231,10 @@ func listPendingTasks(qname string, pageNum, pageSize int) {
return return
} }
printTable( printTable(
[]string{"Key", "Type", "Payload"}, []string{"ID", "Type", "Payload"},
func(w io.Writer, tmpl string) { func(w io.Writer, tmpl string) {
for _, t := range tasks { for _, t := range tasks {
fmt.Fprintf(w, tmpl, t.Key(), t.Type, t.Payload) fmt.Fprintf(w, tmpl, t.ID, t.Type, sprintBytes(t.Payload))
} }
}, },
) )
@@ -225,7 +242,7 @@ func listPendingTasks(qname string, pageNum, pageSize int) {
func listScheduledTasks(qname string, pageNum, pageSize int) { func listScheduledTasks(qname string, pageNum, pageSize int) {
i := createInspector() i := createInspector()
tasks, err := i.ListScheduledTasks(qname, inspeq.PageSize(pageSize), inspeq.Page(pageNum)) tasks, err := i.ListScheduledTasks(qname, asynq.PageSize(pageSize), asynq.Page(pageNum))
if err != nil { if err != nil {
fmt.Println(err) fmt.Println(err)
os.Exit(1) os.Exit(1)
@@ -235,20 +252,29 @@ func listScheduledTasks(qname string, pageNum, pageSize int) {
return return
} }
printTable( printTable(
[]string{"Key", "Type", "Payload", "Process In"}, []string{"ID", "Type", "Payload", "Process In"},
func(w io.Writer, tmpl string) { func(w io.Writer, tmpl string) {
for _, t := range tasks { for _, t := range tasks {
processIn := fmt.Sprintf("%.0f seconds", fmt.Fprintf(w, tmpl, t.ID, t.Type, sprintBytes(t.Payload), formatProcessAt(t.NextProcessAt))
t.NextProcessAt.Sub(time.Now()).Seconds())
fmt.Fprintf(w, tmpl, t.Key(), t.Type, t.Payload, processIn)
} }
}, },
) )
} }
// formatProcessAt formats next process at time to human friendly string.
// If processAt time is in the past, returns "right now".
// If processAt time is in the future, returns "in xxx" where xxx is the duration from now.
func formatProcessAt(processAt time.Time) string {
d := processAt.Sub(time.Now())
if d < 0 {
return "right now"
}
return fmt.Sprintf("in %v", d.Round(time.Second))
}
func listRetryTasks(qname string, pageNum, pageSize int) { func listRetryTasks(qname string, pageNum, pageSize int) {
i := createInspector() i := createInspector()
tasks, err := i.ListRetryTasks(qname, inspeq.PageSize(pageSize), inspeq.Page(pageNum)) tasks, err := i.ListRetryTasks(qname, asynq.PageSize(pageSize), asynq.Page(pageNum))
if err != nil { if err != nil {
fmt.Println(err) fmt.Println(err)
os.Exit(1) os.Exit(1)
@@ -258,16 +284,11 @@ func listRetryTasks(qname string, pageNum, pageSize int) {
return return
} }
printTable( printTable(
[]string{"Key", "Type", "Payload", "Next Retry", "Last Error", "Retried", "Max Retry"}, []string{"ID", "Type", "Payload", "Next Retry", "Last Error", "Last Failed", "Retried", "Max Retry"},
func(w io.Writer, tmpl string) { func(w io.Writer, tmpl string) {
for _, t := range tasks { for _, t := range tasks {
var nextRetry string fmt.Fprintf(w, tmpl, t.ID, t.Type, sprintBytes(t.Payload), formatProcessAt(t.NextProcessAt),
if d := t.NextProcessAt.Sub(time.Now()); d > 0 { t.LastErr, formatPastTime(t.LastFailedAt), t.Retried, t.MaxRetry)
nextRetry = fmt.Sprintf("in %v", d.Round(time.Second))
} else {
nextRetry = "right now"
}
fmt.Fprintf(w, tmpl, t.Key(), t.Type, t.Payload, nextRetry, t.LastError, t.Retried, t.MaxRetry)
} }
}, },
) )
@@ -275,7 +296,7 @@ func listRetryTasks(qname string, pageNum, pageSize int) {
func listArchivedTasks(qname string, pageNum, pageSize int) { func listArchivedTasks(qname string, pageNum, pageSize int) {
i := createInspector() i := createInspector()
tasks, err := i.ListArchivedTasks(qname, inspeq.PageSize(pageSize), inspeq.Page(pageNum)) tasks, err := i.ListArchivedTasks(qname, asynq.PageSize(pageSize), asynq.Page(pageNum))
if err != nil { if err != nil {
fmt.Println(err) fmt.Println(err)
os.Exit(1) os.Exit(1)
@@ -285,19 +306,38 @@ func listArchivedTasks(qname string, pageNum, pageSize int) {
return return
} }
printTable( printTable(
[]string{"Key", "Type", "Payload", "Last Failed", "Last Error"}, []string{"ID", "Type", "Payload", "Last Failed", "Last Error"},
func(w io.Writer, tmpl string) { func(w io.Writer, tmpl string) {
for _, t := range tasks { for _, t := range tasks {
fmt.Fprintf(w, tmpl, t.Key(), t.Type, t.Payload, t.LastFailedAt, t.LastError) fmt.Fprintf(w, tmpl, t.ID, t.Type, sprintBytes(t.Payload), formatPastTime(t.LastFailedAt), t.LastErr)
}
})
}
func listCompletedTasks(qname string, pageNum, pageSize int) {
i := createInspector()
tasks, err := i.ListCompletedTasks(qname, asynq.PageSize(pageSize), asynq.Page(pageNum))
if err != nil {
fmt.Println(err)
os.Exit(1)
}
if len(tasks) == 0 {
fmt.Printf("No completed tasks in %q queue\n", qname)
return
}
printTable(
[]string{"ID", "Type", "Payload", "CompletedAt", "Result"},
func(w io.Writer, tmpl string) {
for _, t := range tasks {
fmt.Fprintf(w, tmpl, t.ID, t.Type, sprintBytes(t.Payload), formatPastTime(t.CompletedAt), sprintBytes(t.Result))
} }
}) })
} }
func taskCancel(cmd *cobra.Command, args []string) { func taskCancel(cmd *cobra.Command, args []string) {
r := createRDB() i := createInspector()
for _, id := range args { for _, id := range args {
err := r.PublishCancelation(id) if err := i.CancelProcessing(id); err != nil {
if err != nil {
fmt.Printf("error: could not send cancelation signal: %v\n", err) fmt.Printf("error: could not send cancelation signal: %v\n", err)
continue continue
} }
@@ -305,20 +345,77 @@ func taskCancel(cmd *cobra.Command, args []string) {
} }
} }
func taskInspect(cmd *cobra.Command, args []string) {
qname, err := cmd.Flags().GetString("queue")
if err != nil {
fmt.Printf("error: %v\n", err)
os.Exit(1)
}
id, err := cmd.Flags().GetString("id")
if err != nil {
fmt.Printf("error: %v\n", err)
os.Exit(1)
}
i := createInspector()
info, err := i.GetTaskInfo(qname, id)
if err != nil {
fmt.Printf("error: %v\n", err)
os.Exit(1)
}
printTaskInfo(info)
}
func printTaskInfo(info *asynq.TaskInfo) {
bold := color.New(color.Bold)
bold.Println("Task Info")
fmt.Printf("Queue: %s\n", info.Queue)
fmt.Printf("ID: %s\n", info.ID)
fmt.Printf("Type: %s\n", info.Type)
fmt.Printf("State: %v\n", info.State)
fmt.Printf("Retried: %d/%d\n", info.Retried, info.MaxRetry)
fmt.Println()
fmt.Printf("Next process time: %s\n", formatNextProcessAt(info.NextProcessAt))
if len(info.LastErr) != 0 {
fmt.Println()
bold.Println("Last Failure")
fmt.Printf("Failed at: %s\n", formatPastTime(info.LastFailedAt))
fmt.Printf("Error message: %s\n", info.LastErr)
}
}
func formatNextProcessAt(processAt time.Time) string {
if processAt.IsZero() || processAt.Unix() == 0 {
return "n/a"
}
if processAt.Before(time.Now()) {
return "now"
}
return fmt.Sprintf("%s (in %v)", processAt.Format(time.UnixDate), processAt.Sub(time.Now()).Round(time.Second))
}
// formatPastTime takes t which is time in the past and returns a user-friendly string.
func formatPastTime(t time.Time) string {
if t.IsZero() || t.Unix() == 0 {
return ""
}
return t.Format(time.UnixDate)
}
func taskArchive(cmd *cobra.Command, args []string) { func taskArchive(cmd *cobra.Command, args []string) {
qname, err := cmd.Flags().GetString("queue") qname, err := cmd.Flags().GetString("queue")
if err != nil { if err != nil {
fmt.Printf("error: %v\n", err) fmt.Printf("error: %v\n", err)
os.Exit(1) os.Exit(1)
} }
key, err := cmd.Flags().GetString("key") id, err := cmd.Flags().GetString("id")
if err != nil { if err != nil {
fmt.Printf("error: %v\n", err) fmt.Printf("error: %v\n", err)
os.Exit(1) os.Exit(1)
} }
i := createInspector() i := createInspector()
err = i.ArchiveTaskByKey(qname, key) err = i.ArchiveTask(qname, id)
if err != nil { if err != nil {
fmt.Printf("error: %v\n", err) fmt.Printf("error: %v\n", err)
os.Exit(1) os.Exit(1)
@@ -332,14 +429,14 @@ func taskDelete(cmd *cobra.Command, args []string) {
fmt.Printf("error: %v\n", err) fmt.Printf("error: %v\n", err)
os.Exit(1) os.Exit(1)
} }
key, err := cmd.Flags().GetString("key") id, err := cmd.Flags().GetString("id")
if err != nil { if err != nil {
fmt.Printf("error: %v\n", err) fmt.Printf("error: %v\n", err)
os.Exit(1) os.Exit(1)
} }
i := createInspector() i := createInspector()
err = i.DeleteTaskByKey(qname, key) err = i.DeleteTask(qname, id)
if err != nil { if err != nil {
fmt.Printf("error: %v\n", err) fmt.Printf("error: %v\n", err)
os.Exit(1) os.Exit(1)
@@ -353,14 +450,14 @@ func taskRun(cmd *cobra.Command, args []string) {
fmt.Printf("error: %v\n", err) fmt.Printf("error: %v\n", err)
os.Exit(1) os.Exit(1)
} }
key, err := cmd.Flags().GetString("key") id, err := cmd.Flags().GetString("id")
if err != nil { if err != nil {
fmt.Printf("error: %v\n", err) fmt.Printf("error: %v\n", err)
os.Exit(1) os.Exit(1)
} }
i := createInspector() i := createInspector()
err = i.RunTaskByKey(qname, key) err = i.RunTask(qname, id)
if err != nil { if err != nil {
fmt.Printf("error: %v\n", err) fmt.Printf("error: %v\n", err)
os.Exit(1) os.Exit(1)
@@ -423,6 +520,8 @@ func taskDeleteAll(cmd *cobra.Command, args []string) {
n, err = i.DeleteAllRetryTasks(qname) n, err = i.DeleteAllRetryTasks(qname)
case "archived": case "archived":
n, err = i.DeleteAllArchivedTasks(qname) n, err = i.DeleteAllArchivedTasks(qname)
case "completed":
n, err = i.DeleteAllCompletedTasks(qname)
default: default:
fmt.Printf("error: unsupported state %q\n", state) fmt.Printf("error: unsupported state %q\n", state)
os.Exit(1) os.Exit(1)

View File

@@ -3,20 +3,13 @@ module github.com/hibiken/asynq/tools
go 1.13 go 1.13
require ( require (
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6 // indirect
github.com/coreos/go-etcd v2.0.0+incompatible // indirect
github.com/cpuguy83/go-md2man v1.0.10 // indirect
github.com/fatih/color v1.9.0 github.com/fatih/color v1.9.0
github.com/go-redis/redis/v7 v7.4.0 github.com/go-redis/redis/v8 v8.11.4
github.com/google/uuid v1.1.1 github.com/hibiken/asynq v0.21.0
github.com/hibiken/asynq v0.14.0 github.com/hibiken/asynq/x v0.0.0-20220131170841-349f4c50fb1d
github.com/mitchellh/go-homedir v1.1.0 github.com/mitchellh/go-homedir v1.1.0
github.com/spf13/cast v1.3.1 github.com/prometheus/client_golang v1.11.0
github.com/spf13/afero v1.1.2 // indirect
github.com/spf13/cobra v1.1.1 github.com/spf13/cobra v1.1.1
github.com/spf13/viper v1.7.0 github.com/spf13/viper v1.7.0
github.com/ugorji/go v1.1.4 // indirect
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8 // indirect
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77 // indirect
) )
replace github.com/hibiken/asynq => ./..

View File

@@ -16,47 +16,60 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s= github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s=
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-redis/redis/v7 v7.2.0 h1:CrCexy/jYWZjW0AyVoHlcJUeZN19VWlbepTh1Vq6dJs= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
github.com/go-redis/redis/v7 v7.2.0/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRfnwdHj/Dcg= github.com/go-redis/redis/v8 v8.11.2/go.mod h1:DLomh7y2e3ggQXQLd1YgmvIfecPJoFl7WU5SOQ/r06M=
github.com/go-redis/redis/v7 v7.4.0 h1:7obg6wUoj05T0EpY0o8B59S9w5yeMWql7sw2kwNW1x4= github.com/go-redis/redis/v8 v8.11.4 h1:kHoYkfZP6+pe04aFTnhDH6GDROa5yJdHJVNxV3F46Tg=
github.com/go-redis/redis/v7 v7.4.0/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRfnwdHj/Dcg= github.com/go-redis/redis/v8 v8.11.4/go.mod h1:2Z2wHZXdQpCDXEGzqMockDpNyYvi2l4Pxt6RJr792+w=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
@@ -66,25 +79,41 @@ github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
@@ -110,26 +139,34 @@ github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hibiken/asynq v0.19.0/go.mod h1:tyc63ojaW8SJ5SBm8mvI4DDONsguP5HE85EEl4Qr5Ig=
github.com/hibiken/asynq v0.21.0 h1:uH9XogJhjq/S39E0/DEPWLZQ6hHJ73UiblZTe4RzHwA=
github.com/hibiken/asynq v0.21.0/go.mod h1:tyc63ojaW8SJ5SBm8mvI4DDONsguP5HE85EEl4Qr5Ig=
github.com/hibiken/asynq/x v0.0.0-20220131170841-349f4c50fb1d h1:Er+U+9PmnyRHRDQjSjRQ24HoWvOY7w9Pk7bUPYM3Ags=
github.com/hibiken/asynq/x v0.0.0-20220131170841-349f4c50fb1d/go.mod h1:VmxwMfMKyb6gyv8xG0oOBMXIhquWKPx+zPtbVBd2Q1s=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
@@ -139,6 +176,7 @@ github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNx
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.11 h1:FxPOTFNqGkuDUGi3H/qkUbQO4ZiBa2brKq5r0l8TGeM= github.com/mattn/go-isatty v0.0.11 h1:FxPOTFNqGkuDUGi3H/qkUbQO4ZiBa2brKq5r0l8TGeM=
github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
@@ -151,42 +189,70 @@ github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0Qu
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.15.0/go.mod h1:hF8qUzuuC8DJGygJH3726JnCZX4MYbRB8yFfISqnKUg=
github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME= github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48=
github.com/onsi/gomega v1.16.0 h1:6gjqkI8iiRHMvdccRJM8rVKjCWk6ZIm6FTm3ddIe4/c=
github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
@@ -198,49 +264,43 @@ github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng=
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s=
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
github.com/spf13/cobra v1.0.0 h1:6m/oheQuQ13N9ks4hubMG6BnvwOeaJrqSPLahSnczz8=
github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
github.com/spf13/cobra v1.1.1 h1:KfztREH0tPxJJ+geloSLaAkaPkr4ki2Er5quFV1TDo4= github.com/spf13/cobra v1.1.1 h1:KfztREH0tPxJJ+geloSLaAkaPkr4ki2Er5quFV1TDo4=
github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI=
github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
github.com/spf13/viper v1.6.2 h1:7aKfF+e8/k68gda3LOjo5RxiUqddoFxVq4BKBPrxk5E=
github.com/spf13/viper v1.6.2/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k=
github.com/spf13/viper v1.7.0 h1:xVKxvI7ouOI5I+U9s2eeiUfMaWBVoXA3AWskkrqK0VM= github.com/spf13/viper v1.7.0 h1:xVKxvI7ouOI5I+U9s2eeiUfMaWBVoXA3AWskkrqK0VM=
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/goleak v0.10.0 h1:G3eWbSNIskeRqtsN/1uI5B+eP73y3JUuBsv9AZjehb4=
go.uber.org/goleak v0.10.0/go.mod h1:VCZuO8V8mFPlL0F5J5GK1rtHV3DrFcQ1R8ryq7FK0aI= go.uber.org/goleak v0.10.0/go.mod h1:VCZuO8V8mFPlL0F5J5GK1rtHV3DrFcQ1R8ryq7FK0aI=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -259,6 +319,7 @@ golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -272,11 +333,15 @@ golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190923162816-aa69164e4478 h1:l5EDrHhldLYb3ZRHDUhXF7Om7MvYXnkV9/iQNo1lX6g= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781 h1:DzZ89McO9/gWPsQXS/FVKAlG02ZjaQ6AlZRBimEYOd0=
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -285,6 +350,9 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -292,23 +360,37 @@ golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e h1:9vRrk9YW2BTzLP0VCB9ZDjU4cPqkg+IDWL7XgxA1yxQ= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40 h1:JWgyZ1qgdTaF3N3oxC+MdTV7qvEEgHo3otj+HB5CM7Q=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -322,6 +404,7 @@ golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
@@ -329,9 +412,13 @@ golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtn
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
@@ -350,17 +437,30 @@ google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno= gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno=
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
@@ -371,12 +471,16 @@ gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bl
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=

View File

@@ -0,0 +1,56 @@
package main
import (
"flag"
"fmt"
"log"
"net/http"
"github.com/hibiken/asynq"
"github.com/hibiken/asynq/x/metrics"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/collectors"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
// Declare command-line flags.
// These variables are binded to flags in init().
var (
flagRedisAddr string
flagRedisDB int
flagRedisPassword string
flagRedisUsername string
flagPort int
)
func init() {
flag.StringVar(&flagRedisAddr, "redis-addr", "127.0.0.1:6379", "host:port of redis server to connect to")
flag.IntVar(&flagRedisDB, "redis-db", 0, "redis DB number to use")
flag.StringVar(&flagRedisPassword, "redis-password", "", "password used to connect to redis server")
flag.StringVar(&flagRedisUsername, "redis-username", "", "username used to connect to redis server")
flag.IntVar(&flagPort, "port", 9876, "port to use for the HTTP server")
}
func main() {
flag.Parse()
// Using NewPedanticRegistry here to test the implementation of Collectors and Metrics.
reg := prometheus.NewPedanticRegistry()
inspector := asynq.NewInspector(asynq.RedisClientOpt{
Addr: flagRedisAddr,
DB: flagRedisDB,
Password: flagRedisPassword,
Username: flagRedisUsername,
})
reg.MustRegister(
metrics.NewQueueMetricsCollector(inspector),
// Add the standard process and go metrics to the registry
collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}),
collectors.NewGoCollector(),
)
http.Handle("/metrics", promhttp.HandlerFor(reg, promhttp.HandlerOpts{}))
log.Printf("exporter server is listening on port: %d\n", flagPort)
log.Fatal(http.ListenAndServe(fmt.Sprintf(":%d", flagPort), nil))
}

10
x/go.mod Normal file
View File

@@ -0,0 +1,10 @@
module github.com/hibiken/asynq/x
go 1.16
require (
github.com/go-redis/redis/v8 v8.11.4
github.com/google/uuid v1.3.0
github.com/hibiken/asynq v0.21.0
github.com/prometheus/client_golang v1.11.0
)

258
x/go.sum Normal file
View File

@@ -0,0 +1,258 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
github.com/go-redis/redis/v8 v8.11.2/go.mod h1:DLomh7y2e3ggQXQLd1YgmvIfecPJoFl7WU5SOQ/r06M=
github.com/go-redis/redis/v8 v8.11.4 h1:kHoYkfZP6+pe04aFTnhDH6GDROa5yJdHJVNxV3F46Tg=
github.com/go-redis/redis/v8 v8.11.4/go.mod h1:2Z2wHZXdQpCDXEGzqMockDpNyYvi2l4Pxt6RJr792+w=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/hibiken/asynq v0.21.0 h1:uH9XogJhjq/S39E0/DEPWLZQ6hHJ73UiblZTe4RzHwA=
github.com/hibiken/asynq v0.21.0/go.mod h1:tyc63ojaW8SJ5SBm8mvI4DDONsguP5HE85EEl4Qr5Ig=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.15.0/go.mod h1:hF8qUzuuC8DJGygJH3726JnCZX4MYbRB8yFfISqnKUg=
github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc=
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48=
github.com/onsi/gomega v1.16.0 h1:6gjqkI8iiRHMvdccRJM8rVKjCWk6ZIm6FTm3ddIe4/c=
github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng=
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.uber.org/goleak v0.10.0 h1:G3eWbSNIskeRqtsN/1uI5B+eP73y3JUuBsv9AZjehb4=
go.uber.org/goleak v0.10.0/go.mod h1:VCZuO8V8mFPlL0F5J5GK1rtHV3DrFcQ1R8ryq7FK0aI=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781 h1:DzZ89McO9/gWPsQXS/FVKAlG02ZjaQ6AlZRBimEYOd0=
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40 h1:JWgyZ1qgdTaF3N3oxC+MdTV7qvEEgHo3otj+HB5CM7Q=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

190
x/metrics/metrics.go Normal file
View File

@@ -0,0 +1,190 @@
// Package metrics provides implementations of prometheus.Collector to collect Asynq queue metrics.
package metrics
import (
"fmt"
"log"
"github.com/hibiken/asynq"
"github.com/prometheus/client_golang/prometheus"
)
// Namespace used in fully-qualified metrics names.
const namespace = "asynq"
// QueueMetricsCollector gathers queue metrics.
// It implements prometheus.Collector interface.
//
// All metrics exported from this collector have prefix "asynq".
type QueueMetricsCollector struct {
inspector *asynq.Inspector
}
// collectQueueInfo gathers QueueInfo of all queues.
// Since this operation is expensive, it must be called once per collection.
func (qmc *QueueMetricsCollector) collectQueueInfo() ([]*asynq.QueueInfo, error) {
qnames, err := qmc.inspector.Queues()
if err != nil {
return nil, fmt.Errorf("failed to get queue names: %v", err)
}
infos := make([]*asynq.QueueInfo, len(qnames))
for i, qname := range qnames {
qinfo, err := qmc.inspector.GetQueueInfo(qname)
if err != nil {
return nil, fmt.Errorf("failed to get queue info: %v", err)
}
infos[i] = qinfo
}
return infos, nil
}
// Descriptors used by QueueMetricsCollector
var (
tasksQueuedDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, "", "tasks_enqueued_total"),
"Number of tasks enqueued; broken down by queue and state.",
[]string{"queue", "state"}, nil,
)
queueSizeDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, "", "queue_size"),
"Number of tasks in a queue",
[]string{"queue"}, nil,
)
queueLatencyDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, "", "queue_latency_seconds"),
"Number of seconds the oldest pending task is waiting in pending state to be processed.",
[]string{"queue"}, nil,
)
queueMemUsgDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, "", "queue_memory_usage_approx_bytes"),
"Number of memory used by a given queue (approximated number by sampling).",
[]string{"queue"}, nil,
)
tasksProcessedTotalDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, "", "tasks_processed_total"),
"Number of tasks processed (both succeeded and failed); broken down by queue",
[]string{"queue"}, nil,
)
tasksFailedTotalDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, "", "tasks_failed_total"),
"Number of tasks failed; broken down by queue",
[]string{"queue"}, nil,
)
pausedQueues = prometheus.NewDesc(
prometheus.BuildFQName(namespace, "", "queue_paused_total"),
"Number of queues paused",
[]string{"queue"}, nil,
)
)
func (qmc *QueueMetricsCollector) Describe(ch chan<- *prometheus.Desc) {
prometheus.DescribeByCollect(qmc, ch)
}
func (qmc *QueueMetricsCollector) Collect(ch chan<- prometheus.Metric) {
queueInfos, err := qmc.collectQueueInfo()
if err != nil {
log.Printf("Failed to collect metrics data: %v", err)
}
for _, info := range queueInfos {
ch <- prometheus.MustNewConstMetric(
tasksQueuedDesc,
prometheus.GaugeValue,
float64(info.Active),
info.Queue,
"active",
)
ch <- prometheus.MustNewConstMetric(
tasksQueuedDesc,
prometheus.GaugeValue,
float64(info.Pending),
info.Queue,
"pending",
)
ch <- prometheus.MustNewConstMetric(
tasksQueuedDesc,
prometheus.GaugeValue,
float64(info.Scheduled),
info.Queue,
"scheduled",
)
ch <- prometheus.MustNewConstMetric(
tasksQueuedDesc,
prometheus.GaugeValue,
float64(info.Retry),
info.Queue,
"retry",
)
ch <- prometheus.MustNewConstMetric(
tasksQueuedDesc,
prometheus.GaugeValue,
float64(info.Archived),
info.Queue,
"archived",
)
ch <- prometheus.MustNewConstMetric(
tasksQueuedDesc,
prometheus.GaugeValue,
float64(info.Completed),
info.Queue,
"completed",
)
ch <- prometheus.MustNewConstMetric(
queueSizeDesc,
prometheus.GaugeValue,
float64(info.Size),
info.Queue,
)
ch <- prometheus.MustNewConstMetric(
queueLatencyDesc,
prometheus.GaugeValue,
info.Latency.Seconds(),
info.Queue,
)
ch <- prometheus.MustNewConstMetric(
queueMemUsgDesc,
prometheus.GaugeValue,
float64(info.MemoryUsage),
info.Queue,
)
ch <- prometheus.MustNewConstMetric(
tasksProcessedTotalDesc,
prometheus.CounterValue,
float64(info.ProcessedTotal),
info.Queue,
)
ch <- prometheus.MustNewConstMetric(
tasksFailedTotalDesc,
prometheus.CounterValue,
float64(info.FailedTotal),
info.Queue,
)
pausedValue := 0 // zero to indicate "not paused"
if info.Paused {
pausedValue = 1
}
ch <- prometheus.MustNewConstMetric(
pausedQueues,
prometheus.GaugeValue,
float64(pausedValue),
info.Queue,
)
}
}
// NewQueueMetricsCollector returns a collector that exports metrics about Asynq queues.
func NewQueueMetricsCollector(inspector *asynq.Inspector) *QueueMetricsCollector {
return &QueueMetricsCollector{inspector: inspector}
}

40
x/rate/example_test.go Normal file
View File

@@ -0,0 +1,40 @@
package rate_test
import (
"context"
"fmt"
"time"
"github.com/hibiken/asynq"
"github.com/hibiken/asynq/x/rate"
)
type RateLimitError struct {
RetryIn time.Duration
}
func (e *RateLimitError) Error() string {
return fmt.Sprintf("rate limited (retry in %v)", e.RetryIn)
}
func ExampleNewSemaphore() {
redisConnOpt := asynq.RedisClientOpt{Addr: ":6379"}
sema := rate.NewSemaphore(redisConnOpt, "my_queue", 10)
// call sema.Close() when appropriate
_ = asynq.HandlerFunc(func(ctx context.Context, task *asynq.Task) error {
ok, err := sema.Acquire(ctx)
if err != nil {
return err
}
if !ok {
return &RateLimitError{RetryIn: 30 * time.Second}
}
// Make sure to release the token once we're done.
defer sema.Release(ctx)
// Process task
return nil
})
}

114
x/rate/semaphore.go Normal file
View File

@@ -0,0 +1,114 @@
// Package rate contains rate limiting strategies for asynq.Handler(s).
package rate
import (
"context"
"fmt"
"strings"
"time"
"github.com/go-redis/redis/v8"
"github.com/hibiken/asynq"
asynqcontext "github.com/hibiken/asynq/internal/context"
)
// NewSemaphore creates a counting Semaphore for the given scope with the given number of tokens.
func NewSemaphore(rco asynq.RedisConnOpt, scope string, maxTokens int) *Semaphore {
rc, ok := rco.MakeRedisClient().(redis.UniversalClient)
if !ok {
panic(fmt.Sprintf("rate.NewSemaphore: unsupported RedisConnOpt type %T", rco))
}
if maxTokens < 1 {
panic("rate.NewSemaphore: maxTokens cannot be less than 1")
}
if len(strings.TrimSpace(scope)) == 0 {
panic("rate.NewSemaphore: scope should not be empty")
}
return &Semaphore{
rc: rc,
scope: scope,
maxTokens: maxTokens,
}
}
// Semaphore is a distributed counting semaphore which can be used to set maxTokens across multiple asynq servers.
type Semaphore struct {
rc redis.UniversalClient
maxTokens int
scope string
}
// KEYS[1] -> asynq:sema:<scope>
// ARGV[1] -> max concurrency
// ARGV[2] -> current time in unix time
// ARGV[3] -> deadline in unix time
// ARGV[4] -> task ID
var acquireCmd = redis.NewScript(`
redis.call("ZREMRANGEBYSCORE", KEYS[1], "-inf", tonumber(ARGV[2])-1)
local count = redis.call("ZCARD", KEYS[1])
if (count < tonumber(ARGV[1])) then
redis.call("ZADD", KEYS[1], ARGV[3], ARGV[4])
return 'true'
else
return 'false'
end
`)
// Acquire attempts to acquire a token from the semaphore.
// - Returns (true, nil), iff semaphore key exists and current value is less than maxTokens
// - Returns (false, nil) when token cannot be acquired
// - Returns (false, error) otherwise
//
// The context.Context passed to Acquire must have a deadline set,
// this ensures that token is released if the job goroutine crashes and does not call Release.
func (s *Semaphore) Acquire(ctx context.Context) (bool, error) {
d, ok := ctx.Deadline()
if !ok {
return false, fmt.Errorf("provided context must have a deadline")
}
taskID, ok := asynqcontext.GetTaskID(ctx)
if !ok {
return false, fmt.Errorf("provided context is missing task ID value")
}
return acquireCmd.Run(ctx, s.rc,
[]string{semaphoreKey(s.scope)},
s.maxTokens,
time.Now().Unix(),
d.Unix(),
taskID,
).Bool()
}
// Release will release the token on the counting semaphore.
func (s *Semaphore) Release(ctx context.Context) error {
taskID, ok := asynqcontext.GetTaskID(ctx)
if !ok {
return fmt.Errorf("provided context is missing task ID value")
}
n, err := s.rc.ZRem(ctx, semaphoreKey(s.scope), taskID).Result()
if err != nil {
return fmt.Errorf("redis command failed: %v", err)
}
if n == 0 {
return fmt.Errorf("no token found for task %q", taskID)
}
return nil
}
// Close closes the connection to redis.
func (s *Semaphore) Close() error {
return s.rc.Close()
}
func semaphoreKey(scope string) string {
return fmt.Sprintf("asynq:sema:%s", scope)
}

408
x/rate/semaphore_test.go Normal file
View File

@@ -0,0 +1,408 @@
package rate
import (
"context"
"flag"
"fmt"
"strings"
"testing"
"time"
"github.com/go-redis/redis/v8"
"github.com/google/uuid"
"github.com/hibiken/asynq"
"github.com/hibiken/asynq/internal/base"
asynqcontext "github.com/hibiken/asynq/internal/context"
)
var (
redisAddr string
redisDB int
useRedisCluster bool
redisClusterAddrs string // comma-separated list of host:port
)
func init() {
flag.StringVar(&redisAddr, "redis_addr", "localhost:6379", "redis address to use in testing")
flag.IntVar(&redisDB, "redis_db", 14, "redis db number to use in testing")
flag.BoolVar(&useRedisCluster, "redis_cluster", false, "use redis cluster as a broker in testing")
flag.StringVar(&redisClusterAddrs, "redis_cluster_addrs", "localhost:7000,localhost:7001,localhost:7002", "comma separated list of redis server addresses")
}
func TestNewSemaphore(t *testing.T) {
tests := []struct {
desc string
name string
maxConcurrency int
wantPanic string
connOpt asynq.RedisConnOpt
}{
{
desc: "Bad RedisConnOpt",
wantPanic: "rate.NewSemaphore: unsupported RedisConnOpt type *rate.badConnOpt",
connOpt: &badConnOpt{},
},
{
desc: "Zero maxTokens should panic",
wantPanic: "rate.NewSemaphore: maxTokens cannot be less than 1",
},
{
desc: "Empty scope should panic",
maxConcurrency: 2,
name: " ",
wantPanic: "rate.NewSemaphore: scope should not be empty",
},
}
for _, tt := range tests {
t.Run(tt.desc, func(t *testing.T) {
if tt.wantPanic != "" {
defer func() {
if r := recover(); r.(string) != tt.wantPanic {
t.Errorf("%s;\nNewSemaphore should panic with msg: %s, got %s", tt.desc, tt.wantPanic, r.(string))
}
}()
}
opt := tt.connOpt
if tt.connOpt == nil {
opt = getRedisConnOpt(t)
}
sema := NewSemaphore(opt, tt.name, tt.maxConcurrency)
defer sema.Close()
})
}
}
func TestNewSemaphore_Acquire(t *testing.T) {
tests := []struct {
desc string
name string
maxConcurrency int
taskIDs []string
ctxFunc func(string) (context.Context, context.CancelFunc)
want []bool
}{
{
desc: "Should acquire token when current token count is less than maxTokens",
name: "task-1",
maxConcurrency: 3,
taskIDs: []string{uuid.NewString(), uuid.NewString()},
ctxFunc: func(id string) (context.Context, context.CancelFunc) {
return asynqcontext.New(&base.TaskMessage{
ID: id,
Queue: "task-1",
}, time.Now().Add(time.Second))
},
want: []bool{true, true},
},
{
desc: "Should fail acquiring token when current token count is equal to maxTokens",
name: "task-2",
maxConcurrency: 3,
taskIDs: []string{uuid.NewString(), uuid.NewString(), uuid.NewString(), uuid.NewString()},
ctxFunc: func(id string) (context.Context, context.CancelFunc) {
return asynqcontext.New(&base.TaskMessage{
ID: id,
Queue: "task-2",
}, time.Now().Add(time.Second))
},
want: []bool{true, true, true, false},
},
}
for _, tt := range tests {
t.Run(tt.desc, func(t *testing.T) {
opt := getRedisConnOpt(t)
rc := opt.MakeRedisClient().(redis.UniversalClient)
defer rc.Close()
if err := rc.Del(context.Background(), semaphoreKey(tt.name)).Err(); err != nil {
t.Errorf("%s;\nredis.UniversalClient.Del() got error %v", tt.desc, err)
}
sema := NewSemaphore(opt, tt.name, tt.maxConcurrency)
defer sema.Close()
for i := 0; i < len(tt.taskIDs); i++ {
ctx, cancel := tt.ctxFunc(tt.taskIDs[i])
got, err := sema.Acquire(ctx)
if err != nil {
t.Errorf("%s;\nSemaphore.Acquire() got error %v", tt.desc, err)
}
if got != tt.want[i] {
t.Errorf("%s;\nSemaphore.Acquire(ctx) returned %v, want %v", tt.desc, got, tt.want[i])
}
cancel()
}
})
}
}
func TestNewSemaphore_Acquire_Error(t *testing.T) {
tests := []struct {
desc string
name string
maxConcurrency int
taskIDs []string
ctxFunc func(string) (context.Context, context.CancelFunc)
errStr string
}{
{
desc: "Should return error if context has no deadline",
name: "task-3",
maxConcurrency: 1,
taskIDs: []string{uuid.NewString(), uuid.NewString()},
ctxFunc: func(id string) (context.Context, context.CancelFunc) {
return context.Background(), func() {}
},
errStr: "provided context must have a deadline",
},
{
desc: "Should return error when context is missing taskID",
name: "task-4",
maxConcurrency: 1,
taskIDs: []string{uuid.NewString()},
ctxFunc: func(_ string) (context.Context, context.CancelFunc) {
return context.WithTimeout(context.Background(), time.Second)
},
errStr: "provided context is missing task ID value",
},
}
for _, tt := range tests {
t.Run(tt.desc, func(t *testing.T) {
opt := getRedisConnOpt(t)
rc := opt.MakeRedisClient().(redis.UniversalClient)
defer rc.Close()
if err := rc.Del(context.Background(), semaphoreKey(tt.name)).Err(); err != nil {
t.Errorf("%s;\nredis.UniversalClient.Del() got error %v", tt.desc, err)
}
sema := NewSemaphore(opt, tt.name, tt.maxConcurrency)
defer sema.Close()
for i := 0; i < len(tt.taskIDs); i++ {
ctx, cancel := tt.ctxFunc(tt.taskIDs[i])
_, err := sema.Acquire(ctx)
if err == nil || err.Error() != tt.errStr {
t.Errorf("%s;\nSemaphore.Acquire() got error %v want error %v", tt.desc, err, tt.errStr)
}
cancel()
}
})
}
}
func TestNewSemaphore_Acquire_StaleToken(t *testing.T) {
opt := getRedisConnOpt(t)
rc := opt.MakeRedisClient().(redis.UniversalClient)
defer rc.Close()
taskID := uuid.NewString()
// adding a set member to mimic the case where token is acquired but the goroutine crashed,
// in which case, the token will not be explicitly removed and should be present already
rc.ZAdd(context.Background(), semaphoreKey("stale-token"), &redis.Z{
Score: float64(time.Now().Add(-10 * time.Second).Unix()),
Member: taskID,
})
sema := NewSemaphore(opt, "stale-token", 1)
defer sema.Close()
ctx, cancel := asynqcontext.New(&base.TaskMessage{
ID: taskID,
Queue: "task-1",
}, time.Now().Add(time.Second))
defer cancel()
got, err := sema.Acquire(ctx)
if err != nil {
t.Errorf("Acquire_StaleToken;\nSemaphore.Acquire() got error %v", err)
}
if !got {
t.Error("Acquire_StaleToken;\nSemaphore.Acquire() got false want true")
}
}
func TestNewSemaphore_Release(t *testing.T) {
tests := []struct {
desc string
name string
taskIDs []string
ctxFunc func(string) (context.Context, context.CancelFunc)
wantCount int64
}{
{
desc: "Should decrease token count",
name: "task-5",
taskIDs: []string{uuid.NewString()},
ctxFunc: func(id string) (context.Context, context.CancelFunc) {
return asynqcontext.New(&base.TaskMessage{
ID: id,
Queue: "task-3",
}, time.Now().Add(time.Second))
},
},
{
desc: "Should decrease token count by 2",
name: "task-6",
taskIDs: []string{uuid.NewString(), uuid.NewString()},
ctxFunc: func(id string) (context.Context, context.CancelFunc) {
return asynqcontext.New(&base.TaskMessage{
ID: id,
Queue: "task-4",
}, time.Now().Add(time.Second))
},
},
}
for _, tt := range tests {
t.Run(tt.desc, func(t *testing.T) {
opt := getRedisConnOpt(t)
rc := opt.MakeRedisClient().(redis.UniversalClient)
defer rc.Close()
if err := rc.Del(context.Background(), semaphoreKey(tt.name)).Err(); err != nil {
t.Errorf("%s;\nredis.UniversalClient.Del() got error %v", tt.desc, err)
}
var members []*redis.Z
for i := 0; i < len(tt.taskIDs); i++ {
members = append(members, &redis.Z{
Score: float64(time.Now().Add(time.Duration(i) * time.Second).Unix()),
Member: tt.taskIDs[i],
})
}
if err := rc.ZAdd(context.Background(), semaphoreKey(tt.name), members...).Err(); err != nil {
t.Errorf("%s;\nredis.UniversalClient.ZAdd() got error %v", tt.desc, err)
}
sema := NewSemaphore(opt, tt.name, 3)
defer sema.Close()
for i := 0; i < len(tt.taskIDs); i++ {
ctx, cancel := tt.ctxFunc(tt.taskIDs[i])
if err := sema.Release(ctx); err != nil {
t.Errorf("%s;\nSemaphore.Release() got error %v", tt.desc, err)
}
cancel()
}
i, err := rc.ZCount(context.Background(), semaphoreKey(tt.name), "-inf", "+inf").Result()
if err != nil {
t.Errorf("%s;\nredis.UniversalClient.ZCount() got error %v", tt.desc, err)
}
if i != tt.wantCount {
t.Errorf("%s;\nSemaphore.Release(ctx) didn't release token, got %v want 0", tt.desc, i)
}
})
}
}
func TestNewSemaphore_Release_Error(t *testing.T) {
testID := uuid.NewString()
tests := []struct {
desc string
name string
taskIDs []string
ctxFunc func(string) (context.Context, context.CancelFunc)
errStr string
}{
{
desc: "Should return error when context is missing taskID",
name: "task-7",
taskIDs: []string{uuid.NewString()},
ctxFunc: func(_ string) (context.Context, context.CancelFunc) {
return context.WithTimeout(context.Background(), time.Second)
},
errStr: "provided context is missing task ID value",
},
{
desc: "Should return error when context has taskID which never acquired token",
name: "task-8",
taskIDs: []string{uuid.NewString()},
ctxFunc: func(_ string) (context.Context, context.CancelFunc) {
return asynqcontext.New(&base.TaskMessage{
ID: testID,
Queue: "task-4",
}, time.Now().Add(time.Second))
},
errStr: fmt.Sprintf("no token found for task %q", testID),
},
}
for _, tt := range tests {
t.Run(tt.desc, func(t *testing.T) {
opt := getRedisConnOpt(t)
rc := opt.MakeRedisClient().(redis.UniversalClient)
defer rc.Close()
if err := rc.Del(context.Background(), semaphoreKey(tt.name)).Err(); err != nil {
t.Errorf("%s;\nredis.UniversalClient.Del() got error %v", tt.desc, err)
}
var members []*redis.Z
for i := 0; i < len(tt.taskIDs); i++ {
members = append(members, &redis.Z{
Score: float64(time.Now().Add(time.Duration(i) * time.Second).Unix()),
Member: tt.taskIDs[i],
})
}
if err := rc.ZAdd(context.Background(), semaphoreKey(tt.name), members...).Err(); err != nil {
t.Errorf("%s;\nredis.UniversalClient.ZAdd() got error %v", tt.desc, err)
}
sema := NewSemaphore(opt, tt.name, 3)
defer sema.Close()
for i := 0; i < len(tt.taskIDs); i++ {
ctx, cancel := tt.ctxFunc(tt.taskIDs[i])
if err := sema.Release(ctx); err == nil || err.Error() != tt.errStr {
t.Errorf("%s;\nSemaphore.Release() got error %v want error %v", tt.desc, err, tt.errStr)
}
cancel()
}
})
}
}
func getRedisConnOpt(tb testing.TB) asynq.RedisConnOpt {
tb.Helper()
if useRedisCluster {
addrs := strings.Split(redisClusterAddrs, ",")
if len(addrs) == 0 {
tb.Fatal("No redis cluster addresses provided. Please set addresses using --redis_cluster_addrs flag.")
}
return asynq.RedisClusterClientOpt{
Addrs: addrs,
}
}
return asynq.RedisClientOpt{
Addr: redisAddr,
DB: redisDB,
}
}
type badConnOpt struct {
}
func (b badConnOpt) MakeRedisClient() interface{} {
return nil
}