2
0
mirror of https://github.com/hibiken/asynq.git synced 2025-10-21 21:46:12 +08:00

Compare commits

..

145 Commits

Author SHA1 Message Date
Ken Hibino
245d4fe663 v0.23.0 2022-04-11 16:57:33 -07:00
Ken Hibino
94719e325c Update CHANGELOG.md 2022-04-11 16:55:43 -07:00
Ken Hibino
8b2a787759 Rename variables for public API documentation 2022-04-11 16:55:43 -07:00
Ken Hibino
451be7e50f (cli): Update stats command to print the number of aggregating tasks 2022-04-11 16:55:43 -07:00
Ken Hibino
578321f226 (cli): Extend task ls command to list aggregating tasks 2022-04-11 16:55:43 -07:00
Ken Hibino
2c783566f3 (cli): Add group ls command 2022-04-11 16:55:43 -07:00
Ken Hibino
39718f8bea Always enqueue the aggregated task in the same queue 2022-04-11 16:55:43 -07:00
Ken Hibino
829f64fd38 Define GroupAggregator interface 2022-04-11 16:55:43 -07:00
Ken Hibino
a369443955 Add batch actions to inspector for aggregating tasks
Added:
- Inspector.DeleteAllAggregatingTasks
- Inspector.ArchiveAllAggregatingTasks
- Inspector.RunAllAggregatingTasks
2022-04-11 16:55:43 -07:00
Ken Hibino
de139cc18e Update RDB.RunTask to schedule aggregating task 2022-04-11 16:55:43 -07:00
Ken Hibino
74db013ab9 Add RDB.RunAllAggregatingTasks 2022-04-11 16:55:43 -07:00
Ken Hibino
725105ca03 Update RDB.ArchiveTask to archive aggregating task 2022-04-11 16:55:43 -07:00
Ken Hibino
d8f31e45f1 Add RDB.ArchiveAllAggregatingTasks 2022-04-11 16:55:43 -07:00
Ken Hibino
9023cbf4be Update RDB.DeleteTask to handle aggregating task 2022-04-11 16:55:43 -07:00
Ken Hibino
9279c09125 Add RDB.DeleteAllAggregatingTasks 2022-04-11 16:55:43 -07:00
Ken Hibino
bc27126670 Fix memory usage lua script 2022-04-11 16:55:43 -07:00
Ken Hibino
0cfa7f47ba Fix memory_usage lua script 2022-04-11 16:55:43 -07:00
Ken Hibino
8a4fb71dd5 Update go.mod with replace directive 2022-04-11 16:55:43 -07:00
Ken Hibino
7fb5b25944 Add Inspector.ListAggregatingTasks 2022-04-11 16:55:43 -07:00
Ken Hibino
71bd8f0535 Add RDB.ListAggregating 2022-04-11 16:55:43 -07:00
Ken Hibino
4c8432e0ce Add Inspector.Groups method 2022-04-11 16:55:43 -07:00
Ken Hibino
e939b5d166 Rename asynqtest package to testutil 2022-04-11 16:55:43 -07:00
Ken Hibino
1acd62c760 Move test helpers to asynqtest package 2022-04-11 16:55:43 -07:00
Ken Hibino
0149396bae Add RDB.GroupStats for inspecting groups 2022-04-11 16:55:43 -07:00
Ken Hibino
45ed560708 Add Group field to TaskInfo struct 2022-04-11 16:55:43 -07:00
Ken Hibino
01eeb8756e (cli): Update queue inspect cmd to show # of groups and aggregating tasks 2022-04-11 16:55:43 -07:00
Ken Hibino
47af17cfb4 Fix RDB.CurrentStats to report the correct queue size 2022-04-11 16:55:43 -07:00
Ken Hibino
eb064c2bab Fix AggregationCheck with unlimited size to clear group name from
all-groups set
2022-04-11 16:55:43 -07:00
Ken Hibino
652939dd3a Update memory usage redis lua script to account for groups 2022-04-11 16:55:43 -07:00
Ken Hibino
efe3c74037 Show number of groups and aggregating task count in QueueInfo 2022-04-11 16:55:43 -07:00
Ken Hibino
74d2eea4e0 Clear group if aggregation set empties the group 2022-04-11 16:55:43 -07:00
Ken Hibino
60a4dc1401 Add test for DeleteAggregationSet error case 2022-04-11 16:55:43 -07:00
Ken Hibino
4b716780ef Rewrite test for DeleteAggregationSet function with a new pattern 2022-04-11 16:55:43 -07:00
Ken Hibino
e63f41fb24 Fix DeleteAggregationSet 2022-04-11 16:55:43 -07:00
Ken Hibino
1c388baf06 Implement RDB.ReclaimStaleAggregationSets 2022-04-11 16:55:43 -07:00
Ken Hibino
47a66231b3 Store aggregation set *key* in all aggreationsets zset 2022-04-11 16:55:43 -07:00
Ken Hibino
3551d3334c Use zset for aggregation set to preserve score 2022-04-11 16:55:43 -07:00
Ken Hibino
8b16ede8bc Declare ReclaimStaleAggregationSets 2022-04-11 16:55:43 -07:00
Ken Hibino
c8658a53e6 Add aggregator test 2022-04-11 16:55:43 -07:00
Ken Hibino
562506c7ba Fix client to return error when nil task is passed 2022-04-11 16:55:43 -07:00
Ken Hibino
888b5590fb Make GroupMaxSize and GroupMaxDelay config optional 2022-04-11 16:55:43 -07:00
Ken Hibino
196db64d4d Run aggregator on the server 2022-04-11 16:55:43 -07:00
Ken Hibino
4b35eb0e1a Fix RDB.AggregationCheck when run against an empty group 2022-04-11 16:55:43 -07:00
Ken Hibino
b29fe58434 Implement RDB.ListGroups 2022-04-11 16:55:43 -07:00
Ken Hibino
7849b1114c Implement RDB.DeleteAggregationSet 2022-04-11 16:55:43 -07:00
Ken Hibino
99c00bffeb Implement RDB.AggregationCheck 2022-04-11 16:55:43 -07:00
Ken Hibino
4542b52da8 Check for aggregation at an interval <= gracePeriod 2022-04-11 16:55:43 -07:00
Ken Hibino
d841dc2f8d Add initial implementation of aggregator 2022-04-11 16:55:43 -07:00
Ken Hibino
ab28234767 Update client dependency to base.Broker 2022-04-11 16:55:43 -07:00
Ken Hibino
eb27b0fe1e Add TaskMessageBuilder type as a test helper 2022-04-11 16:55:43 -07:00
Ken Hibino
088be63ee4 Update forwarder to use time.Timer 2022-04-11 16:55:43 -07:00
Ken Hibino
ed69667e86 Update ForwardIfReady test with group 2022-04-11 16:55:43 -07:00
Ken Hibino
4e8885276c Update client to store groupKey under TaskMessage 2022-04-11 16:55:43 -07:00
Ken Hibino
401f7fb4fe Add GroupKey field to TaskMessage 2022-04-11 16:55:43 -07:00
Ken Hibino
61854ea1dc Update RDB.ForwardIfReady to forward to group if groupKey is specified 2022-04-11 16:55:43 -07:00
Ken Hibino
f17c157b0f Update Client to add task to group if Group option is specified 2022-04-11 16:55:43 -07:00
Ken Hibino
8b582899ad Add RDB.AddToGroup and RDB.AddToGroupUnique methods 2022-04-11 16:55:43 -07:00
Ken Hibino
e3d2939a4c Add helper functions to generate group key 2022-04-11 16:55:43 -07:00
Ken Hibino
2ce71e83b0 Add Group task option 2022-04-11 16:55:43 -07:00
Ken Hibino
1608366032 Add group related configuration options 2022-04-11 16:55:43 -07:00
ashang
3f4f0c1daa Use explicit types for limit constants 2022-03-29 06:30:10 -07:00
Ken Hibino
f94a65dc9f Add go1.18 to build workflow matrix 2022-03-22 06:51:28 -07:00
Erwan Leboucher
04d7c8c38c Add rediss url parsing support 2022-02-24 08:30:55 -08:00
Ken Hibino
c04fd41653 v0.22.1 2022-02-20 06:22:55 -08:00
Ken Hibino
7e5efb0e30 Drop GT option from RDB.ExtendLease
GT option in ZAdd is supported for redis v6.2.0 or above.
This Change fixes redis version compatibility (currently v4.0+)
2022-02-20 06:20:38 -08:00
Ken Hibino
cabf8d3627 Fix changelog 2022-02-19 06:21:56 -08:00
Ken Hibino
a19909f5f4 v0.22.0 2022-02-19 06:20:05 -08:00
Ken Hibino
cea5110d15 Add IsOrphaned field to TaskInfo 2022-02-19 06:15:44 -08:00
Ken Hibino
9b63e23274 Update log messages 2022-02-19 06:15:44 -08:00
Ken Hibino
de25201d9f Make timeutil.SimulatedClock concurrency safe 2022-02-19 06:15:44 -08:00
Ken Hibino
ec560afb01 Fix processor test 2022-02-19 06:15:44 -08:00
Ken Hibino
d4006894ad Remove base.DeadlinesKey 2022-02-19 06:15:44 -08:00
Ken Hibino
59927509d8 Remove timeout and deadline fields under task key 2022-02-19 06:15:44 -08:00
Ken Hibino
8211167de2 Update processor to create a lease and watch for expiration 2022-02-19 06:15:44 -08:00
Ken Hibino
d7169cd445 Update heartbeat to extend lease of active workers 2022-02-19 06:15:44 -08:00
Ken Hibino
dfae8638e1 Update RDB methods to work with lease 2022-02-19 06:15:44 -08:00
Ken Hibino
b9943de2ab Add Lease type to base package 2022-02-19 06:15:44 -08:00
Ken Hibino
871474f220 Update heartbeat goroutine to call ExtendLease on active tasks 2022-02-19 06:15:44 -08:00
Ken Hibino
87dc392c7f Add RDB.ExtendLease method 2022-02-19 06:15:44 -08:00
Ken Hibino
dabcb120d5 Update recoverer to use ListLeaseExpired 2022-02-19 06:15:44 -08:00
Ken Hibino
bc2f1986d7 Update ListDeadlineExceeded to ListLeaseExpired 2022-02-19 06:15:44 -08:00
Ken Hibino
b8cb579407 Update RDB methods to use lease instead of deadlines set 2022-02-19 06:15:44 -08:00
Ken Hibino
bca624792c Move task deadline compute logic to processor 2022-02-19 06:15:44 -08:00
Ken Hibino
d865d89900 Update RDB.Dequeue to insert task ID to lease set 2022-02-19 06:15:44 -08:00
Ken Hibino
852af7abd1 Add base.LeaseKey helper function 2022-02-19 06:15:44 -08:00
Ken Hibino
5490d2c625 Fix tests 2022-02-16 07:08:01 -08:00
Binaek Sarkar
ebd7a32c0f conventions 2022-02-16 06:43:08 -08:00
Binaek Sarkar
55d0610a03 test and changelog 2022-02-16 06:43:08 -08:00
Binaek Sarkar
ab8a4f5b1e review corrections 2022-02-16 06:43:08 -08:00
Binaek Sarkar
d7ceb0c090 first cut 2022-02-16 06:43:08 -08:00
Ken Hibino
8bd70c6f84 (ci): Run go (build|test) commands for each module 2022-02-01 07:00:00 -08:00
Ken Hibino
10ab4e3745 Remove replace directives in go.mod 2022-02-01 06:18:41 -08:00
Ken Hibino
349f4c50fb Add example for ResultWriter 2022-01-31 09:08:41 -08:00
Ken Hibino
dff2e3a336 v0.21.0 2022-01-22 06:15:29 -08:00
Ken Hibino
65040af7b5 Update changelog 2022-01-22 06:14:24 -08:00
Ken Hibino
053fe2d1ee Create PeriodicTaskManager 2022-01-22 05:59:33 -08:00
Ken Hibino
25832e5e95 Fix bug related to concurrently executing server state changes 2022-01-12 09:10:56 -08:00
Ken Hibino
aa26f3819e Fix flaky tests 2022-01-05 09:07:42 -08:00
Ken Hibino
d94614bb9b Add CODE_OF_CONDUCT.md 2022-01-04 06:17:48 -08:00
Mahdi Dibaiee
ce46b07652 Allow configuration of DelayedTaskCheckInterval 2022-01-03 14:44:00 -08:00
Mahdi Dibaiee
2d0170541c Add --json flag for asynq stats command 2022-01-02 07:24:29 -08:00
Andreas Thomas
c1f08106da fix: missing import statement in example code 2021-12-27 05:40:10 -08:00
Ken Hibino
74cf804197 Update readme 2021-12-20 05:51:51 -08:00
Ken Hibino
8dfabfccb3 Fix build 2021-12-19 07:06:37 -08:00
Ken Hibino
5f20edcbd1 v0.20.0 2021-12-19 07:00:21 -08:00
Ken Hibino
1ddb2f7bce Use math.MaxInt64 instead of custom const 2021-12-19 06:58:12 -08:00
Ken Hibino
82d18e3d91 Record total tasks processed/failed 2021-12-16 16:53:02 -08:00
Ken Hibino
43cb4ddf19 Add queue metrics exporter
Changes:
- Added `x/metrics` package
- Added `tools/metrics_exporter` binary
2021-12-16 06:01:01 -08:00
Francisco Miamoto
ddfc6747a1 Fix typo in Server doc 2021-12-13 16:23:30 -08:00
Ken Hibino
970cb7a606 v0.19.1 2021-12-12 06:16:13 -08:00
Ken Hibino
157e97e72e Update changelog 2021-12-11 10:29:43 -08:00
Ken Hibino
22e6c9d297 Delete "pending_since" under task-key when state changes to active 2021-12-11 10:29:43 -08:00
Ken Hibino
99a6750656 Add Latency field to QueueInfo 2021-12-11 10:29:43 -08:00
Ken Hibino
e7c1c3ad6f Use clock in RDB 2021-12-11 10:29:43 -08:00
Ken Hibino
c9183374c5 Add internal timeutil package 2021-12-11 10:29:43 -08:00
Ken Hibino
6e7106c8f2 Record time when task moved to pending state 2021-12-11 10:29:43 -08:00
Ken Hibino
9f2c321e98 Add EnqueueContext method to Client 2021-11-15 16:34:26 -08:00
Ken Hibino
e2b61c9056 Return error if Unique TTL is less than 1s 2021-11-09 16:37:02 -08:00
Ken Hibino
531d1ef089 Fix godoc around errors returned from Inspector 2021-11-09 15:45:20 -08:00
Ken Hibino
413afc2ab6 v0.19.0 2021-11-06 15:20:09 -07:00
Ken Hibino
6bb4818509 Update readme 2021-11-06 15:18:42 -07:00
Ken Hibino
f4ddac4dcc Introduce Task Results
* Added Retention Option to specify retention TTL for tasks
* Added ResultWriter as a client interface to write result data for the associated task
2021-11-06 15:18:42 -07:00
Ken Hibino
4638405cbd Fix flaky test 2021-11-06 15:18:42 -07:00
Ken Hibino
9e2f88c00d Add TaskID option to allow user to specify task id 2021-11-06 15:18:42 -07:00
Ken Hibino
dbdd9c6d5f Update RDB Enqueue and Schedule methods to check for task ID conflict 2021-11-06 15:18:42 -07:00
Ken Hibino
2261c7c9a0 Change TaskMessage.ID type from uuid.UUID to string 2021-11-06 15:18:42 -07:00
Ken Hibino
83cae4bb24 Update NewTask function to take Option as varargs 2021-11-06 15:18:42 -07:00
Ajat Prabha
23c522dc9f Add asynq/x/rate package
- Added a directory /x for external, experimental packeges
- Added a `rate` package to enable rate limiting across multiple asynq worker servers
2021-11-03 15:55:23 -07:00
Ken Hibino
0d2c0f612b Add FUNDING.yml 2021-10-03 09:25:35 -07:00
Ken Hibino
d612a8a9e4 v0.18.6 2021-10-03 05:55:49 -07:00
Jason White
b3ef9e91a9 Upgrade go-redis/redis to version 8 2021-09-02 05:56:02 -07:00
Ken Hibino
05534c6f24 v0.18.5 2021-09-01 06:02:49 -07:00
Ken Hibino
f0db219f6a Add IsFailure to Config
With this IsFailure config, users can provide a predicate function to 
determine whether the error returned from Handler counts as a failure.
2021-09-01 06:00:54 -07:00
Mehran Poursadeghi
3ae0e7f528 Fix readme 2021-08-27 14:47:03 -07:00
Ken Hibino
421dc584ff v0.18.4 2021-08-17 17:12:33 -07:00
Ken Hibino
cfd1a1dfe8 Make scheduler methods thread-safe 2021-08-17 17:10:53 -07:00
Ken Hibino
c197902dc0 v0.18.3 2021-08-09 08:59:35 -07:00
Ken Hibino
e6355bf3f5 Use approximate memory usage for QueueInfo 2021-08-09 08:58:44 -07:00
Luqqk
95c90a5cb8 Add changelog entry, add additional test case 2021-08-02 20:20:09 -07:00
Luqqk
6817af366a Adjust error message, use TrimSpace for more robust empty typename check 2021-08-02 20:20:09 -07:00
Luqqk
4bce28d677 client.Enqueue - prevent empty task's typename 2021-08-02 20:20:09 -07:00
Pedro Henrique
73f930313c Fixes links 2021-07-29 17:15:27 -07:00
Ken Hibino
bff2a05d59 Fix examples in readme 2021-07-18 09:28:43 -07:00
Ken Hibino
684a7e0c98 v0.18.2 2021-07-15 06:56:53 -07:00
Ken Hibino
46b23d6495 Allow upper case characters in queue name 2021-07-15 06:55:47 -07:00
72 changed files with 12085 additions and 2526 deletions

12
.github/FUNDING.yml vendored Normal file
View File

@@ -0,0 +1,12 @@
# These are supported funding model platforms
github: [hibiken] # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2]
patreon: # Replace with a single Patreon username
open_collective: # Replace with a single Open Collective username
ko_fi: # Replace with a single Ko-fi username
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
liberapay: # Replace with a single Liberapay username
issuehunt: # Replace with a single IssueHunt username
otechie: # Replace with a single Otechie username
custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']

View File

@@ -7,7 +7,7 @@ jobs:
strategy: strategy:
matrix: matrix:
os: [ubuntu-latest] os: [ubuntu-latest]
go-version: [1.13.x, 1.14.x, 1.15.x, 1.16.x] go-version: [1.14.x, 1.15.x, 1.16.x, 1.17.x, 1.18.x]
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
services: services:
redis: redis:
@@ -22,12 +22,21 @@ jobs:
with: with:
go-version: ${{ matrix.go-version }} go-version: ${{ matrix.go-version }}
- name: Build - name: Build core module
run: go build -v ./... run: go build -v ./...
- name: Test - name: Build x module
run: cd x && go build -v ./... && cd ..
- name: Build tools module
run: cd tools && go build -v ./... && cd ..
- name: Test core module
run: go test -race -v -coverprofile=coverage.txt -covermode=atomic ./... run: go test -race -v -coverprofile=coverage.txt -covermode=atomic ./...
- name: Test x module
run: cd x && go test -race -v ./... && cd ..
- name: Benchmark Test - name: Benchmark Test
run: go test -run=^$ -bench=. -loglevel=debug ./... run: go test -run=^$ -bench=. -loglevel=debug ./...

5
.gitignore vendored
View File

@@ -1,3 +1,4 @@
vendor
# Binaries for programs and plugins # Binaries for programs and plugins
*.exe *.exe
*.exe~ *.exe~
@@ -14,11 +15,13 @@
# Ignore examples for now # Ignore examples for now
/examples /examples
# Ignore command binary # Ignore tool binaries
/tools/asynq/asynq /tools/asynq/asynq
/tools/metrics_exporter/metrics_exporter
# Ignore asynq config file # Ignore asynq config file
.asynq.* .asynq.*
# Ignore editor config files # Ignore editor config files
.vscode .vscode
.idea

View File

@@ -7,7 +7,122 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [Unreleased] ## [Unreleased]
## [0.18.1] - 2020-07-04 ## [0.23.0] - 2022-03-11
### Added
- `Group` option is introduced to enqueue task in a group.
- `GroupAggregator` and related types are introduced for task aggregation feature.
- `GroupGracePeriod`, `GroupMaxSize`, `GroupMaxDelay`, and `GroupAggregator` fields are added to `Config`.
- `Inspector` has new methods related to "aggregating tasks".
- `Group` field is added to `TaskInfo`.
- (CLI): `group ls` command is added
- (CLI): `task ls` supports listing aggregating tasks via `--state=aggregating --group=<GROUP>` flags
- Enable rediss url parsing support
### Fixed
- Fixed overflow issue with 32-bit systems (For details, see https://github.com/hibiken/asynq/pull/426)
## [0.22.1] - 2022-02-20
### Fixed
- Fixed Redis version compatibility: Keep support for redis v4.0+
## [0.22.0] - 2022-02-19
### Added
- `BaseContext` is introduced in `Config` to specify callback hook to provide a base `context` from which `Handler` `context` is derived
- `IsOrphaned` field is added to `TaskInfo` to describe a task left in active state with no worker processing it.
### Changed
- `Server` now recovers tasks with an expired lease. Recovered tasks are retried/archived with `ErrLeaseExpired` error.
## [0.21.0] - 2022-01-22
### Added
- `PeriodicTaskManager` is added. Prefer using this over `Scheduler` as it has better support for dynamic periodic tasks.
- The `asynq stats` command now supports a `--json` option, making its output a JSON object
- Introduced new configuration for `DelayedTaskCheckInterval`. See [godoc](https://godoc.org/github.com/hibiken/asynq) for more details.
## [0.20.0] - 2021-12-19
### Added
- Package `x/metrics` is added.
- Tool `tools/metrics_exporter` binary is added.
- `ProcessedTotal` and `FailedTotal` fields were added to `QueueInfo` struct.
## [0.19.1] - 2021-12-12
### Added
- `Latency` field is added to `QueueInfo`.
- `EnqueueContext` method is added to `Client`.
### Fixed
- Fixed an error when user pass a duration less than 1s to `Unique` option
## [0.19.0] - 2021-11-06
### Changed
- `NewTask` takes `Option` as variadic argument
- Bumped minimum supported go version to 1.14 (i.e. go1.14 or higher is required).
### Added
- `Retention` option is added to allow user to specify task retention duration after completion.
- `TaskID` option is added to allow user to specify task ID.
- `ErrTaskIDConflict` sentinel error value is added.
- `ResultWriter` type is added and provided through `Task.ResultWriter` method.
- `TaskInfo` has new fields `CompletedAt`, `Result` and `Retention`.
### Removed
- `Client.SetDefaultOptions` is removed. Use `NewTask` instead to pass default options for tasks.
## [0.18.6] - 2021-10-03
### Changed
- Updated `github.com/go-redis/redis` package to v8
## [0.18.5] - 2021-09-01
### Added
- `IsFailure` config option is added to determine whether error returned from Handler counts as a failure.
## [0.18.4] - 2021-08-17
### Fixed
- Scheduler methods are now thread-safe. It's now safe to call `Register` and `Unregister` concurrently.
## [0.18.3] - 2021-08-09
### Changed
- `Client.Enqueue` no longer enqueues tasks with empty typename; Error message is returned.
## [0.18.2] - 2021-07-15
### Changed
- Changed `Queue` function to not to convert the provided queue name to lowercase. Queue names are now case-sensitive.
- `QueueInfo.MemoryUsage` is now an approximate usage value.
### Fixed
- Fixed latency issue around memory usage (see https://github.com/hibiken/asynq/issues/309).
## [0.18.1] - 2021-07-04
### Changed ### Changed

128
CODE_OF_CONDUCT.md Normal file
View File

@@ -0,0 +1,128 @@
# Contributor Covenant Code of Conduct
## Our Pledge
We as members, contributors, and leaders pledge to make participation in our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
nationality, personal appearance, race, religion, or sexual identity
and orientation.
We pledge to act and interact in ways that contribute to an open, welcoming,
diverse, inclusive, and healthy community.
## Our Standards
Examples of behavior that contributes to a positive environment for our
community include:
* Demonstrating empathy and kindness toward other people
* Being respectful of differing opinions, viewpoints, and experiences
* Giving and gracefully accepting constructive feedback
* Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
* Focusing on what is best not just for us as individuals, but for the
overall community
Examples of unacceptable behavior include:
* The use of sexualized language or imagery, and sexual attention or
advances of any kind
* Trolling, insulting or derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or email
address, without their explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Enforcement Responsibilities
Community leaders are responsible for clarifying and enforcing our standards of
acceptable behavior and will take appropriate and fair corrective action in
response to any behavior that they deem inappropriate, threatening, offensive,
or harmful.
Community leaders have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, and will communicate reasons for moderation
decisions when appropriate.
## Scope
This Code of Conduct applies within all community spaces, and also applies when
an individual is officially representing the community in public spaces.
Examples of representing our community include using an official e-mail address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement at
ken.hibino7@gmail.com.
All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the
reporter of any incident.
## Enforcement Guidelines
Community leaders will follow these Community Impact Guidelines in determining
the consequences for any action they deem in violation of this Code of Conduct:
### 1. Correction
**Community Impact**: Use of inappropriate language or other behavior deemed
unprofessional or unwelcome in the community.
**Consequence**: A private, written warning from community leaders, providing
clarity around the nature of the violation and an explanation of why the
behavior was inappropriate. A public apology may be requested.
### 2. Warning
**Community Impact**: A violation through a single incident or series
of actions.
**Consequence**: A warning with consequences for continued behavior. No
interaction with the people involved, including unsolicited interaction with
those enforcing the Code of Conduct, for a specified period of time. This
includes avoiding interactions in community spaces as well as external channels
like social media. Violating these terms may lead to a temporary or
permanent ban.
### 3. Temporary Ban
**Community Impact**: A serious violation of community standards, including
sustained inappropriate behavior.
**Consequence**: A temporary ban from any sort of interaction or public
communication with the community for a specified period of time. No public or
private interaction with the people involved, including unsolicited interaction
with those enforcing the Code of Conduct, is allowed during this period.
Violating these terms may lead to a permanent ban.
### 4. Permanent Ban
**Community Impact**: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior, harassment of an
individual, or aggression toward or disparagement of classes of individuals.
**Consequence**: A permanent ban from any sort of public interaction within
the community.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 2.0, available at
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
Community Impact Guidelines were inspired by [Mozilla's code of conduct
enforcement ladder](https://github.com/mozilla/diversity).
[homepage]: https://www.contributor-covenant.org
For answers to common questions about this code of conduct, see the FAQ at
https://www.contributor-covenant.org/faq. Translations are available at
https://www.contributor-covenant.org/translations.

View File

@@ -28,8 +28,8 @@ Task queues are used as a mechanism to distribute work across multiple machines.
- Scheduling of tasks - Scheduling of tasks
- [Retries](https://github.com/hibiken/asynq/wiki/Task-Retry) of failed tasks - [Retries](https://github.com/hibiken/asynq/wiki/Task-Retry) of failed tasks
- Automatic recovery of tasks in the event of a worker crash - Automatic recovery of tasks in the event of a worker crash
- [Weighted priority queues](https://github.com/hibiken/asynq/wiki/Priority-Queues#weighted-priority-queues) - [Weighted priority queues](https://github.com/hibiken/asynq/wiki/Queue-Priority#weighted-priority)
- [Strict priority queues](https://github.com/hibiken/asynq/wiki/Priority-Queues#strict-priority-queues) - [Strict priority queues](https://github.com/hibiken/asynq/wiki/Queue-Priority#strict-priority)
- Low latency to add a task since writes are fast in Redis - Low latency to add a task since writes are fast in Redis
- De-duplication of tasks using [unique option](https://github.com/hibiken/asynq/wiki/Unique-Tasks) - De-duplication of tasks using [unique option](https://github.com/hibiken/asynq/wiki/Unique-Tasks)
- Allow [timeout and deadline per task](https://github.com/hibiken/asynq/wiki/Task-Timeout-and-Cancelation) - Allow [timeout and deadline per task](https://github.com/hibiken/asynq/wiki/Task-Timeout-and-Cancelation)
@@ -38,6 +38,7 @@ Task queues are used as a mechanism to distribute work across multiple machines.
- [Periodic Tasks](https://github.com/hibiken/asynq/wiki/Periodic-Tasks) - [Periodic Tasks](https://github.com/hibiken/asynq/wiki/Periodic-Tasks)
- [Support Redis Cluster](https://github.com/hibiken/asynq/wiki/Redis-Cluster) for automatic sharding and high availability - [Support Redis Cluster](https://github.com/hibiken/asynq/wiki/Redis-Cluster) for automatic sharding and high availability
- [Support Redis Sentinels](https://github.com/hibiken/asynq/wiki/Automatic-Failover) for high availability - [Support Redis Sentinels](https://github.com/hibiken/asynq/wiki/Automatic-Failover) for high availability
- Integration with [Prometheus](https://prometheus.io/) to collect and visualize queue metrics
- [Web UI](#web-ui) to inspect and remote-control queues and tasks - [Web UI](#web-ui) to inspect and remote-control queues and tasks
- [CLI](#command-line-tool) to inspect and remote-control queues and tasks - [CLI](#command-line-tool) to inspect and remote-control queues and tasks
@@ -49,7 +50,7 @@ Task queues are used as a mechanism to distribute work across multiple machines.
## Quickstart ## Quickstart
Make sure you have Go installed ([download](https://golang.org/dl/)). Version `1.13` or higher is required. Make sure you have Go installed ([download](https://golang.org/dl/)). Version `1.14` or higher is required.
Initialize your project by creating a folder and then running `go mod init github.com/your/repo` ([learn more](https://blog.golang.org/using-go-modules)) inside the folder. Then install Asynq library with the [`go get`](https://golang.org/cmd/go/#hdr-Add_dependencies_to_current_module_and_install_them) command: Initialize your project by creating a folder and then running `go mod init github.com/your/repo` ([learn more](https://blog.golang.org/using-go-modules)) inside the folder. Then install Asynq library with the [`go get`](https://golang.org/cmd/go/#hdr-Add_dependencies_to_current_module_and_install_them) command:
@@ -65,8 +66,11 @@ Next, write a package that encapsulates task creation and task handling.
package tasks package tasks
import ( import (
"context"
"encoding/json"
"fmt" "fmt"
"log"
"time"
"github.com/hibiken/asynq" "github.com/hibiken/asynq"
) )
@@ -91,7 +95,7 @@ type ImageResizePayload struct {
//---------------------------------------------- //----------------------------------------------
func NewEmailDeliveryTask(userID int, tmplID string) (*asynq.Task, error) { func NewEmailDeliveryTask(userID int, tmplID string) (*asynq.Task, error) {
payload, err := json.Marshal(EmailDeliveryPayload{UserID: userID, TemplateID: templID}) payload, err := json.Marshal(EmailDeliveryPayload{UserID: userID, TemplateID: tmplID})
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -103,7 +107,8 @@ func NewImageResizeTask(src string) (*asynq.Task, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
return asynq.NewTask(TypeImageResize, payload), nil // task options can be passed to NewTask, which can be overridden at enqueue time.
return asynq.NewTask(TypeImageResize, payload, asynq.MaxRetry(5), asynq.Timeout(20 * time.Minute)), nil
} }
//--------------------------------------------------------------- //---------------------------------------------------------------
@@ -129,7 +134,7 @@ type ImageProcessor struct {
// ... fields for struct // ... fields for struct
} }
func (p *ImageProcessor) ProcessTask(ctx context.Context, t *asynq.Task) error { func (processor *ImageProcessor) ProcessTask(ctx context.Context, t *asynq.Task) error {
var p ImageResizePayload var p ImageResizePayload
if err := json.Unmarshal(t.Payload(), &p); err != nil { if err := json.Unmarshal(t.Payload(), &p); err != nil {
return fmt.Errorf("json.Unmarshal failed: %v: %w", err, asynq.SkipRetry) return fmt.Errorf("json.Unmarshal failed: %v: %w", err, asynq.SkipRetry)
@@ -140,7 +145,7 @@ func (p *ImageProcessor) ProcessTask(ctx context.Context, t *asynq.Task) error {
} }
func NewImageProcessor() *ImageProcessor { func NewImageProcessor() *ImageProcessor {
// ... return an instance return &ImageProcessor{}
} }
``` ```
@@ -196,28 +201,15 @@ func main() {
// Options include MaxRetry, Queue, Timeout, Deadline, Unique etc. // Options include MaxRetry, Queue, Timeout, Deadline, Unique etc.
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
client.SetDefaultOptions(tasks.TypeImageResize, asynq.MaxRetry(10), asynq.Timeout(3*time.Minute))
task, err = tasks.NewImageResizeTask("https://example.com/myassets/image.jpg") task, err = tasks.NewImageResizeTask("https://example.com/myassets/image.jpg")
if err != nil { if err != nil {
log.Fatalf("could not create task: %v", err) log.Fatalf("could not create task: %v", err)
} }
info, err = client.Enqueue(task) info, err = client.Enqueue(task, asynq.MaxRetry(10), asynq.Timeout(3 * time.Minute))
if err != nil { if err != nil {
log.Fatalf("could not enqueue task: %v", err) log.Fatalf("could not enqueue task: %v", err)
} }
log.Printf("enqueued task: id=%s queue=%s", info.ID, info.Queue) log.Printf("enqueued task: id=%s queue=%s", info.ID, info.Queue)
// ---------------------------------------------------------------------------
// Example 4: Pass options to tune task processing behavior at enqueue time.
// Options passed at enqueue time override default ones.
// ---------------------------------------------------------------------------
info, err = client.Enqueue(task, asynq.Queue("critical"), asynq.Timeout(30*time.Second))
if err != nil {
log.Fatal("could not enqueue task: %v", err)
}
log.Printf("enqueued task: id=%s queue=%s", info.ID, info.Queue)
} }
``` ```
@@ -239,7 +231,7 @@ const redisAddr = "127.0.0.1:6379"
func main() { func main() {
srv := asynq.NewServer( srv := asynq.NewServer(
asynq.RedisClientOpt{Addr: redisAddr} asynq.RedisClientOpt{Addr: redisAddr},
asynq.Config{ asynq.Config{
// Specify how many concurrent workers to use // Specify how many concurrent workers to use
Concurrency: 10, Concurrency: 10,
@@ -283,6 +275,9 @@ Here's a few screenshots of the Web UI:
![Web UI TasksView](https://user-images.githubusercontent.com/11155743/114697070-1f0a0300-9d26-11eb-855c-d3ec263865b7.png) ![Web UI TasksView](https://user-images.githubusercontent.com/11155743/114697070-1f0a0300-9d26-11eb-855c-d3ec263865b7.png)
**Metrics view**
<img width="1532" alt="Screen Shot 2021-12-19 at 4 37 19 PM" src="https://user-images.githubusercontent.com/10953044/146777420-cae6c476-bac6-469c-acce-b2f6584e8707.png">
**Settings and adaptive dark mode** **Settings and adaptive dark mode**
![Web UI Settings and adaptive dark mode](https://user-images.githubusercontent.com/11155743/114697149-3517c380-9d26-11eb-9f7a-ae2dd00aad5b.png) ![Web UI Settings and adaptive dark mode](https://user-images.githubusercontent.com/11155743/114697149-3517c380-9d26-11eb-9f7a-ae2dd00aad5b.png)

176
aggregator.go Normal file
View File

@@ -0,0 +1,176 @@
// Copyright 2022 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
package asynq
import (
"context"
"sync"
"time"
"github.com/hibiken/asynq/internal/base"
"github.com/hibiken/asynq/internal/log"
)
// An aggregator is responsible for checking groups and aggregate into one task
// if any of the grouping condition is met.
type aggregator struct {
logger *log.Logger
broker base.Broker
client *Client
// channel to communicate back to the long running "aggregator" goroutine.
done chan struct{}
// list of queue names to check and aggregate.
queues []string
// Group configurations
gracePeriod time.Duration
maxDelay time.Duration
maxSize int
// User provided group aggregator.
ga GroupAggregator
// interval used to check for aggregation
interval time.Duration
// sema is a counting semaphore to ensure the number of active aggregating function
// does not exceed the limit.
sema chan struct{}
}
type aggregatorParams struct {
logger *log.Logger
broker base.Broker
queues []string
gracePeriod time.Duration
maxDelay time.Duration
maxSize int
groupAggregator GroupAggregator
}
const (
// Maximum number of aggregation checks in flight concurrently.
maxConcurrentAggregationChecks = 3
// Default interval used for aggregation checks. If the provided gracePeriod is less than
// the default, use the gracePeriod.
defaultAggregationCheckInterval = 7 * time.Second
)
func newAggregator(params aggregatorParams) *aggregator {
interval := defaultAggregationCheckInterval
if params.gracePeriod < interval {
interval = params.gracePeriod
}
return &aggregator{
logger: params.logger,
broker: params.broker,
client: &Client{broker: params.broker},
done: make(chan struct{}),
queues: params.queues,
gracePeriod: params.gracePeriod,
maxDelay: params.maxDelay,
maxSize: params.maxSize,
ga: params.groupAggregator,
sema: make(chan struct{}, maxConcurrentAggregationChecks),
interval: interval,
}
}
func (a *aggregator) shutdown() {
if a.ga == nil {
return
}
a.logger.Debug("Aggregator shutting down...")
// Signal the aggregator goroutine to stop.
a.done <- struct{}{}
}
func (a *aggregator) start(wg *sync.WaitGroup) {
if a.ga == nil {
return
}
wg.Add(1)
go func() {
defer wg.Done()
ticker := time.NewTicker(a.interval)
for {
select {
case <-a.done:
a.logger.Debug("Waiting for all aggregation checks to finish...")
// block until all aggregation checks released the token
for i := 0; i < cap(a.sema); i++ {
a.sema <- struct{}{}
}
a.logger.Debug("Aggregator done")
ticker.Stop()
return
case t := <-ticker.C:
a.exec(t)
}
}
}()
}
func (a *aggregator) exec(t time.Time) {
select {
case a.sema <- struct{}{}: // acquire token
go a.aggregate(t)
default:
// If the semaphore blocks, then we are currently running max number of
// aggregation checks. Skip this round and log warning.
a.logger.Warnf("Max number of aggregation checks in flight. Skipping")
}
}
func (a *aggregator) aggregate(t time.Time) {
defer func() { <-a.sema /* release token */ }()
for _, qname := range a.queues {
groups, err := a.broker.ListGroups(qname)
if err != nil {
a.logger.Errorf("Failed to list groups in queue: %q", qname)
continue
}
for _, gname := range groups {
aggregationSetID, err := a.broker.AggregationCheck(
qname, gname, t, a.gracePeriod, a.maxDelay, a.maxSize)
if err != nil {
a.logger.Errorf("Failed to run aggregation check: queue=%q group=%q", qname, gname)
continue
}
if aggregationSetID == "" {
a.logger.Debugf("No aggregation needed at this time: queue=%q group=%q", qname, gname)
continue
}
// Aggregate and enqueue.
msgs, deadline, err := a.broker.ReadAggregationSet(qname, gname, aggregationSetID)
if err != nil {
a.logger.Errorf("Failed to read aggregation set: queue=%q, group=%q, setID=%q",
qname, gname, aggregationSetID)
continue
}
tasks := make([]*Task, len(msgs))
for i, m := range msgs {
tasks[i] = NewTask(m.Type, m.Payload)
}
aggregatedTask := a.ga.Aggregate(gname, tasks)
ctx, cancel := context.WithDeadline(context.Background(), deadline)
if _, err := a.client.EnqueueContext(ctx, aggregatedTask, Queue(qname)); err != nil {
a.logger.Errorf("Failed to enqueue aggregated task (queue=%q, group=%q, setID=%q): %v",
qname, gname, aggregationSetID, err)
cancel()
continue
}
if err := a.broker.DeleteAggregationSet(ctx, qname, gname, aggregationSetID); err != nil {
a.logger.Warnf("Failed to delete aggregation set: queue=%q, group=%q, setID=%q",
qname, gname, aggregationSetID)
}
cancel()
}
}
}

165
aggregator_test.go Normal file
View File

@@ -0,0 +1,165 @@
// Copyright 2022 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
package asynq
import (
"sync"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/hibiken/asynq/internal/base"
"github.com/hibiken/asynq/internal/rdb"
h "github.com/hibiken/asynq/internal/testutil"
)
func TestAggregator(t *testing.T) {
r := setup(t)
defer r.Close()
rdbClient := rdb.NewRDB(r)
client := Client{broker: rdbClient}
tests := []struct {
desc string
gracePeriod time.Duration
maxDelay time.Duration
maxSize int
aggregateFunc func(gname string, tasks []*Task) *Task
tasks []*Task // tasks to enqueue
enqueueFrequency time.Duration // time between one enqueue event to another
waitTime time.Duration // time to wait
wantGroups map[string]map[string][]base.Z
wantPending map[string][]*base.TaskMessage
}{
{
desc: "group older than the grace period should be aggregated",
gracePeriod: 1 * time.Second,
maxDelay: 0, // no maxdelay limit
maxSize: 0, // no maxsize limit
aggregateFunc: func(gname string, tasks []*Task) *Task {
return NewTask(gname, nil, MaxRetry(len(tasks))) // use max retry to see how many tasks were aggregated
},
tasks: []*Task{
NewTask("task1", nil, Group("mygroup")),
NewTask("task2", nil, Group("mygroup")),
NewTask("task3", nil, Group("mygroup")),
},
enqueueFrequency: 300 * time.Millisecond,
waitTime: 3 * time.Second,
wantGroups: map[string]map[string][]base.Z{
"default": {
"mygroup": {},
},
},
wantPending: map[string][]*base.TaskMessage{
"default": {
h.NewTaskMessageBuilder().SetType("mygroup").SetRetry(3).Build(),
},
},
},
{
desc: "group older than the max-delay should be aggregated",
gracePeriod: 2 * time.Second,
maxDelay: 4 * time.Second,
maxSize: 0, // no maxsize limit
aggregateFunc: func(gname string, tasks []*Task) *Task {
return NewTask(gname, nil, MaxRetry(len(tasks))) // use max retry to see how many tasks were aggregated
},
tasks: []*Task{
NewTask("task1", nil, Group("mygroup")), // time 0
NewTask("task2", nil, Group("mygroup")), // time 1s
NewTask("task3", nil, Group("mygroup")), // time 2s
NewTask("task4", nil, Group("mygroup")), // time 3s
},
enqueueFrequency: 1 * time.Second,
waitTime: 4 * time.Second,
wantGroups: map[string]map[string][]base.Z{
"default": {
"mygroup": {},
},
},
wantPending: map[string][]*base.TaskMessage{
"default": {
h.NewTaskMessageBuilder().SetType("mygroup").SetRetry(4).Build(),
},
},
},
{
desc: "group reached the max-size should be aggregated",
gracePeriod: 1 * time.Minute,
maxDelay: 0, // no maxdelay limit
maxSize: 5,
aggregateFunc: func(gname string, tasks []*Task) *Task {
return NewTask(gname, nil, MaxRetry(len(tasks))) // use max retry to see how many tasks were aggregated
},
tasks: []*Task{
NewTask("task1", nil, Group("mygroup")),
NewTask("task2", nil, Group("mygroup")),
NewTask("task3", nil, Group("mygroup")),
NewTask("task4", nil, Group("mygroup")),
NewTask("task5", nil, Group("mygroup")),
},
enqueueFrequency: 300 * time.Millisecond,
waitTime: defaultAggregationCheckInterval * 2,
wantGroups: map[string]map[string][]base.Z{
"default": {
"mygroup": {},
},
},
wantPending: map[string][]*base.TaskMessage{
"default": {
h.NewTaskMessageBuilder().SetType("mygroup").SetRetry(5).Build(),
},
},
},
}
for _, tc := range tests {
h.FlushDB(t, r)
aggregator := newAggregator(aggregatorParams{
logger: testLogger,
broker: rdbClient,
queues: []string{"default"},
gracePeriod: tc.gracePeriod,
maxDelay: tc.maxDelay,
maxSize: tc.maxSize,
groupAggregator: GroupAggregatorFunc(tc.aggregateFunc),
})
var wg sync.WaitGroup
aggregator.start(&wg)
for _, task := range tc.tasks {
if _, err := client.Enqueue(task); err != nil {
t.Errorf("%s: Client Enqueue failed: %v", tc.desc, err)
aggregator.shutdown()
wg.Wait()
continue
}
time.Sleep(tc.enqueueFrequency)
}
time.Sleep(tc.waitTime)
for qname, groups := range tc.wantGroups {
for gname, want := range groups {
gotGroup := h.GetGroupEntries(t, r, qname, gname)
if diff := cmp.Diff(want, gotGroup, h.SortZSetEntryOpt); diff != "" {
t.Errorf("%s: mismatch found in %q; (-want,+got)\n%s", tc.desc, base.GroupKey(qname, gname), diff)
}
}
}
for qname, want := range tc.wantPending {
gotPending := h.GetPendingMessages(t, r, qname)
if diff := cmp.Diff(want, gotPending, h.SortMsgOpt, h.IgnoreIDOpt); diff != "" {
t.Errorf("%s: mismatch found in %q; (-want,+got)\n%s", tc.desc, base.PendingKey(qname), diff)
}
}
aggregator.shutdown()
wg.Wait()
}
}

146
asynq.go
View File

@@ -5,14 +5,16 @@
package asynq package asynq
import ( import (
"context"
"crypto/tls" "crypto/tls"
"fmt" "fmt"
"net"
"net/url" "net/url"
"strconv" "strconv"
"strings" "strings"
"time" "time"
"github.com/go-redis/redis/v7" "github.com/go-redis/redis/v8"
"github.com/hibiken/asynq/internal/base" "github.com/hibiken/asynq/internal/base"
) )
@@ -23,16 +25,39 @@ type Task struct {
// payload holds data needed to perform the task. // payload holds data needed to perform the task.
payload []byte payload []byte
// opts holds options for the task.
opts []Option
// w is the ResultWriter for the task.
w *ResultWriter
} }
func (t *Task) Type() string { return t.typename } func (t *Task) Type() string { return t.typename }
func (t *Task) Payload() []byte { return t.payload } func (t *Task) Payload() []byte { return t.payload }
// ResultWriter returns a pointer to the ResultWriter associated with the task.
//
// Nil pointer is returned if called on a newly created task (i.e. task created by calling NewTask).
// Only the tasks passed to Handler.ProcessTask have a valid ResultWriter pointer.
func (t *Task) ResultWriter() *ResultWriter { return t.w }
// NewTask returns a new Task given a type name and payload data. // NewTask returns a new Task given a type name and payload data.
func NewTask(typename string, payload []byte) *Task { // Options can be passed to configure task processing behavior.
func NewTask(typename string, payload []byte, opts ...Option) *Task {
return &Task{ return &Task{
typename: typename, typename: typename,
payload: payload, payload: payload,
opts: opts,
}
}
// newTask creates a task with the given typename, payload and ResultWriter.
func newTask(typename string, payload []byte, w *ResultWriter) *Task {
return &Task{
typename: typename,
payload: payload,
w: w,
} }
} }
@@ -73,33 +98,64 @@ type TaskInfo struct {
// Deadline is the deadline for the task, zero value if not specified. // Deadline is the deadline for the task, zero value if not specified.
Deadline time.Time Deadline time.Time
// Group is the name of the group in which the task belongs.
//
// Tasks in the same queue can be grouped together by Group name and will be aggregated into one task
// by a Server processing the queue.
//
// Empty string (default) indicates task does not belong to any groups, and no aggregation will be applied to the task.
Group string
// NextProcessAt is the time the task is scheduled to be processed, // NextProcessAt is the time the task is scheduled to be processed,
// zero if not applicable. // zero if not applicable.
NextProcessAt time.Time NextProcessAt time.Time
// IsOrphaned describes whether the task is left in active state with no worker processing it.
// An orphaned task indicates that the worker has crashed or experienced network failures and was not able to
// extend its lease on the task.
//
// This task will be recovered by running a server against the queue the task is in.
// This field is only applicable to tasks with TaskStateActive.
IsOrphaned bool
// Retention is duration of the retention period after the task is successfully processed.
Retention time.Duration
// CompletedAt is the time when the task is processed successfully.
// Zero value (i.e. time.Time{}) indicates no value.
CompletedAt time.Time
// Result holds the result data associated with the task.
// Use ResultWriter to write result data from the Handler.
Result []byte
} }
func newTaskInfo(msg *base.TaskMessage, state base.TaskState, nextProcessAt time.Time) *TaskInfo { // If t is non-zero, returns time converted from t as unix time in seconds.
// If t is zero, returns zero value of time.Time.
func fromUnixTimeOrZero(t int64) time.Time {
if t == 0 {
return time.Time{}
}
return time.Unix(t, 0)
}
func newTaskInfo(msg *base.TaskMessage, state base.TaskState, nextProcessAt time.Time, result []byte) *TaskInfo {
info := TaskInfo{ info := TaskInfo{
ID: msg.ID.String(), ID: msg.ID,
Queue: msg.Queue, Queue: msg.Queue,
Type: msg.Type, Type: msg.Type,
Payload: msg.Payload, // Do we need to make a copy? Payload: msg.Payload, // Do we need to make a copy?
MaxRetry: msg.Retry, MaxRetry: msg.Retry,
Retried: msg.Retried, Retried: msg.Retried,
LastErr: msg.ErrorMsg, LastErr: msg.ErrorMsg,
Group: msg.GroupKey,
Timeout: time.Duration(msg.Timeout) * time.Second, Timeout: time.Duration(msg.Timeout) * time.Second,
Deadline: fromUnixTimeOrZero(msg.Deadline),
Retention: time.Duration(msg.Retention) * time.Second,
NextProcessAt: nextProcessAt, NextProcessAt: nextProcessAt,
} LastFailedAt: fromUnixTimeOrZero(msg.LastFailedAt),
if msg.LastFailedAt == 0 { CompletedAt: fromUnixTimeOrZero(msg.CompletedAt),
info.LastFailedAt = time.Time{} Result: result,
} else {
info.LastFailedAt = time.Unix(msg.LastFailedAt, 0)
}
if msg.Deadline == 0 {
info.Deadline = time.Time{}
} else {
info.Deadline = time.Unix(msg.Deadline, 0)
} }
switch state { switch state {
@@ -113,6 +169,10 @@ func newTaskInfo(msg *base.TaskMessage, state base.TaskState, nextProcessAt time
info.State = TaskStateRetry info.State = TaskStateRetry
case base.TaskStateArchived: case base.TaskStateArchived:
info.State = TaskStateArchived info.State = TaskStateArchived
case base.TaskStateCompleted:
info.State = TaskStateCompleted
case base.TaskStateAggregating:
info.State = TaskStateAggregating
default: default:
panic(fmt.Sprintf("internal error: unknown state: %d", state)) panic(fmt.Sprintf("internal error: unknown state: %d", state))
} }
@@ -137,6 +197,12 @@ const (
// Indicates that the task is archived and stored for inspection purposes. // Indicates that the task is archived and stored for inspection purposes.
TaskStateArchived TaskStateArchived
// Indicates that the task is processed successfully and retained until the retention TTL expires.
TaskStateCompleted
// Indicates that the task is waiting in a group to be aggreated into one task.
TaskStateAggregating
) )
func (s TaskState) String() string { func (s TaskState) String() string {
@@ -151,6 +217,10 @@ func (s TaskState) String() string {
return "retry" return "retry"
case TaskStateArchived: case TaskStateArchived:
return "archived" return "archived"
case TaskStateCompleted:
return "completed"
case TaskStateAggregating:
return "aggregating"
} }
panic("asynq: unknown task state") panic("asynq: unknown task state")
} }
@@ -366,9 +436,10 @@ func (opt RedisClusterClientOpt) MakeRedisClient() interface{} {
// ParseRedisURI parses redis uri string and returns RedisConnOpt if uri is valid. // ParseRedisURI parses redis uri string and returns RedisConnOpt if uri is valid.
// It returns a non-nil error if uri cannot be parsed. // It returns a non-nil error if uri cannot be parsed.
// //
// Three URI schemes are supported, which are redis:, redis-socket:, and redis-sentinel:. // Three URI schemes are supported, which are redis:, rediss:, redis-socket:, and redis-sentinel:.
// Supported formats are: // Supported formats are:
// redis://[:password@]host[:port][/dbnumber] // redis://[:password@]host[:port][/dbnumber]
// rediss://[:password@]host[:port][/dbnumber]
// redis-socket://[:password@]path[?db=dbnumber] // redis-socket://[:password@]path[?db=dbnumber]
// redis-sentinel://[:password@]host1[:port][,host2:[:port]][,hostN:[:port]][?master=masterName] // redis-sentinel://[:password@]host1[:port][,host2:[:port]][,hostN:[:port]][?master=masterName]
func ParseRedisURI(uri string) (RedisConnOpt, error) { func ParseRedisURI(uri string) (RedisConnOpt, error) {
@@ -377,7 +448,7 @@ func ParseRedisURI(uri string) (RedisConnOpt, error) {
return nil, fmt.Errorf("asynq: could not parse redis uri: %v", err) return nil, fmt.Errorf("asynq: could not parse redis uri: %v", err)
} }
switch u.Scheme { switch u.Scheme {
case "redis": case "redis", "rediss":
return parseRedisURI(u) return parseRedisURI(u)
case "redis-socket": case "redis-socket":
return parseRedisSocketURI(u) return parseRedisSocketURI(u)
@@ -391,6 +462,8 @@ func ParseRedisURI(uri string) (RedisConnOpt, error) {
func parseRedisURI(u *url.URL) (RedisConnOpt, error) { func parseRedisURI(u *url.URL) (RedisConnOpt, error) {
var db int var db int
var err error var err error
var redisConnOpt RedisClientOpt
if len(u.Path) > 0 { if len(u.Path) > 0 {
xs := strings.Split(strings.Trim(u.Path, "/"), "/") xs := strings.Split(strings.Trim(u.Path, "/"), "/")
db, err = strconv.Atoi(xs[0]) db, err = strconv.Atoi(xs[0])
@@ -402,7 +475,20 @@ func parseRedisURI(u *url.URL) (RedisConnOpt, error) {
if v, ok := u.User.Password(); ok { if v, ok := u.User.Password(); ok {
password = v password = v
} }
return RedisClientOpt{Addr: u.Host, DB: db, Password: password}, nil
if u.Scheme == "rediss" {
h, _, err := net.SplitHostPort(u.Host)
if err != nil {
h = u.Host
}
redisConnOpt.TLSConfig = &tls.Config{ServerName: h}
}
redisConnOpt.Addr = u.Host
redisConnOpt.Password = password
redisConnOpt.DB = db
return redisConnOpt, nil
} }
func parseRedisSocketURI(u *url.URL) (RedisConnOpt, error) { func parseRedisSocketURI(u *url.URL) (RedisConnOpt, error) {
@@ -435,3 +521,27 @@ func parseRedisSentinelURI(u *url.URL) (RedisConnOpt, error) {
} }
return RedisFailoverClientOpt{MasterName: master, SentinelAddrs: addrs, Password: password}, nil return RedisFailoverClientOpt{MasterName: master, SentinelAddrs: addrs, Password: password}, nil
} }
// ResultWriter is a client interface to write result data for a task.
// It writes the data to the redis instance the server is connected to.
type ResultWriter struct {
id string // task ID this writer is responsible for
qname string // queue name the task belongs to
broker base.Broker
ctx context.Context // context associated with the task
}
// Write writes the given data as a result of the task the ResultWriter is associated with.
func (w *ResultWriter) Write(data []byte) (n int, err error) {
select {
case <-w.ctx.Done():
return 0, fmt.Errorf("failed to result task result: %v", w.ctx.Err())
default:
}
return w.broker.WriteResult(w.qname, w.id, data)
}
// TaskID returns the ID of the task the ResultWriter is associated with.
func (w *ResultWriter) TaskID() string {
return w.id
}

View File

@@ -5,15 +5,17 @@
package asynq package asynq
import ( import (
"crypto/tls"
"flag" "flag"
"sort" "sort"
"strings" "strings"
"testing" "testing"
"github.com/go-redis/redis/v7" "github.com/go-redis/redis/v8"
"github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp"
h "github.com/hibiken/asynq/internal/asynqtest" "github.com/google/go-cmp/cmp/cmpopts"
"github.com/hibiken/asynq/internal/log" "github.com/hibiken/asynq/internal/log"
h "github.com/hibiken/asynq/internal/testutil"
) )
//============================================================================ //============================================================================
@@ -99,6 +101,10 @@ func TestParseRedisURI(t *testing.T) {
"redis://localhost:6379", "redis://localhost:6379",
RedisClientOpt{Addr: "localhost:6379"}, RedisClientOpt{Addr: "localhost:6379"},
}, },
{
"rediss://localhost:6379",
RedisClientOpt{Addr: "localhost:6379", TLSConfig: &tls.Config{ServerName: "localhost"}},
},
{ {
"redis://localhost:6379/3", "redis://localhost:6379/3",
RedisClientOpt{Addr: "localhost:6379", DB: 3}, RedisClientOpt{Addr: "localhost:6379", DB: 3},
@@ -151,7 +157,7 @@ func TestParseRedisURI(t *testing.T) {
continue continue
} }
if diff := cmp.Diff(tc.want, got); diff != "" { if diff := cmp.Diff(tc.want, got, cmpopts.IgnoreUnexported(tls.Config{})); diff != "" {
t.Errorf("ParseRedisURI(%q) = %+v, want %+v\n(-want,+got)\n%s", tc.uri, got, tc.want, diff) t.Errorf("ParseRedisURI(%q) = %+v, want %+v\n(-want,+got)\n%s", tc.uri, got, tc.want, diff)
} }
} }

View File

@@ -12,7 +12,7 @@ import (
"testing" "testing"
"time" "time"
h "github.com/hibiken/asynq/internal/asynqtest" h "github.com/hibiken/asynq/internal/testutil"
) )
// Creates a new task of type "task<n>" with payload {"data": n}. // Creates a new task of type "task<n>" with payload {"data": n}.

195
client.go
View File

@@ -5,12 +5,12 @@
package asynq package asynq
import ( import (
"context"
"fmt" "fmt"
"strings" "strings"
"sync"
"time" "time"
"github.com/go-redis/redis/v7" "github.com/go-redis/redis/v8"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/hibiken/asynq/internal/base" "github.com/hibiken/asynq/internal/base"
"github.com/hibiken/asynq/internal/errors" "github.com/hibiken/asynq/internal/errors"
@@ -24,9 +24,7 @@ import (
// //
// Clients are safe for concurrent use by multiple goroutines. // Clients are safe for concurrent use by multiple goroutines.
type Client struct { type Client struct {
mu sync.Mutex broker base.Broker
opts map[string][]Option
rdb *rdb.RDB
} }
// NewClient returns a new Client instance given a redis connection option. // NewClient returns a new Client instance given a redis connection option.
@@ -35,11 +33,7 @@ func NewClient(r RedisConnOpt) *Client {
if !ok { if !ok {
panic(fmt.Sprintf("asynq: unsupported RedisConnOpt type %T", r)) panic(fmt.Sprintf("asynq: unsupported RedisConnOpt type %T", r))
} }
rdb := rdb.NewRDB(c) return &Client{broker: rdb.NewRDB(c)}
return &Client{
opts: make(map[string][]Option),
rdb: rdb,
}
} }
type OptionType int type OptionType int
@@ -52,6 +46,9 @@ const (
UniqueOpt UniqueOpt
ProcessAtOpt ProcessAtOpt
ProcessInOpt ProcessInOpt
TaskIDOpt
RetentionOpt
GroupOpt
) )
// Option specifies the task processing behavior. // Option specifies the task processing behavior.
@@ -70,11 +67,14 @@ type Option interface {
type ( type (
retryOption int retryOption int
queueOption string queueOption string
taskIDOption string
timeoutOption time.Duration timeoutOption time.Duration
deadlineOption time.Time deadlineOption time.Time
uniqueOption time.Duration uniqueOption time.Duration
processAtOption time.Time processAtOption time.Time
processInOption time.Duration processInOption time.Duration
retentionOption time.Duration
groupOption string
) )
// MaxRetry returns an option to specify the max number of times // MaxRetry returns an option to specify the max number of times
@@ -93,15 +93,22 @@ func (n retryOption) Type() OptionType { return MaxRetryOpt }
func (n retryOption) Value() interface{} { return int(n) } func (n retryOption) Value() interface{} { return int(n) }
// Queue returns an option to specify the queue to enqueue the task into. // Queue returns an option to specify the queue to enqueue the task into.
// func Queue(name string) Option {
// Queue name is case-insensitive and the lowercased version is used. return queueOption(name)
func Queue(qname string) Option {
return queueOption(strings.ToLower(qname))
} }
func (qname queueOption) String() string { return fmt.Sprintf("Queue(%q)", string(qname)) } func (name queueOption) String() string { return fmt.Sprintf("Queue(%q)", string(name)) }
func (qname queueOption) Type() OptionType { return QueueOpt } func (name queueOption) Type() OptionType { return QueueOpt }
func (qname queueOption) Value() interface{} { return string(qname) } func (name queueOption) Value() interface{} { return string(name) }
// TaskID returns an option to specify the task ID.
func TaskID(id string) Option {
return taskIDOption(id)
}
func (id taskIDOption) String() string { return fmt.Sprintf("TaskID(%q)", string(id)) }
func (id taskIDOption) Type() OptionType { return TaskIDOpt }
func (id taskIDOption) Value() interface{} { return string(id) }
// Timeout returns an option to specify how long a task may run. // Timeout returns an option to specify how long a task may run.
// If the timeout elapses before the Handler returns, then the task // If the timeout elapses before the Handler returns, then the task
@@ -137,8 +144,10 @@ func (t deadlineOption) Value() interface{} { return time.Time(t) }
// Unique returns an option to enqueue a task only if the given task is unique. // Unique returns an option to enqueue a task only if the given task is unique.
// Task enqueued with this option is guaranteed to be unique within the given ttl. // Task enqueued with this option is guaranteed to be unique within the given ttl.
// Once the task gets processed successfully or once the TTL has expired, another task with the same uniqueness may be enqueued. // Once the task gets processed successfully or once the TTL has expired,
// another task with the same uniqueness may be enqueued.
// ErrDuplicateTask error is returned when enqueueing a duplicate task. // ErrDuplicateTask error is returned when enqueueing a duplicate task.
// TTL duration must be greater than or equal to 1 second.
// //
// Uniqueness of a task is based on the following properties: // Uniqueness of a task is based on the following properties:
// - Task Type // - Task Type
@@ -176,18 +185,47 @@ func (d processInOption) String() string { return fmt.Sprintf("ProcessIn(%v)
func (d processInOption) Type() OptionType { return ProcessInOpt } func (d processInOption) Type() OptionType { return ProcessInOpt }
func (d processInOption) Value() interface{} { return time.Duration(d) } func (d processInOption) Value() interface{} { return time.Duration(d) }
// Retention returns an option to specify the duration of retention period for the task.
// If this option is provided, the task will be stored as a completed task after successful processing.
// A completed task will be deleted after the specified duration elapses.
func Retention(d time.Duration) Option {
return retentionOption(d)
}
func (ttl retentionOption) String() string { return fmt.Sprintf("Retention(%v)", time.Duration(ttl)) }
func (ttl retentionOption) Type() OptionType { return RetentionOpt }
func (ttl retentionOption) Value() interface{} { return time.Duration(ttl) }
// Group returns an option to specify the group used for the task.
// Tasks in a given queue with the same group will be aggregated into one task before passed to Handler.
func Group(name string) Option {
return groupOption(name)
}
func (name groupOption) String() string { return fmt.Sprintf("Group(%q)", string(name)) }
func (name groupOption) Type() OptionType { return GroupOpt }
func (name groupOption) Value() interface{} { return string(name) }
// ErrDuplicateTask indicates that the given task could not be enqueued since it's a duplicate of another task. // ErrDuplicateTask indicates that the given task could not be enqueued since it's a duplicate of another task.
// //
// ErrDuplicateTask error only applies to tasks enqueued with a Unique option. // ErrDuplicateTask error only applies to tasks enqueued with a Unique option.
var ErrDuplicateTask = errors.New("task already exists") var ErrDuplicateTask = errors.New("task already exists")
// ErrTaskIDConflict indicates that the given task could not be enqueued since its task ID already exists.
//
// ErrTaskIDConflict error only applies to tasks enqueued with a TaskID option.
var ErrTaskIDConflict = errors.New("task ID conflicts with another task")
type option struct { type option struct {
retry int retry int
queue string queue string
taskID string
timeout time.Duration timeout time.Duration
deadline time.Time deadline time.Time
uniqueTTL time.Duration uniqueTTL time.Duration
processAt time.Time processAt time.Time
retention time.Duration
group string
} }
// composeOptions merges user provided options into the default options // composeOptions merges user provided options into the default options
@@ -198,6 +236,7 @@ func composeOptions(opts ...Option) (option, error) {
res := option{ res := option{
retry: defaultMaxRetry, retry: defaultMaxRetry,
queue: base.DefaultQueueName, queue: base.DefaultQueueName,
taskID: uuid.NewString(),
timeout: 0, // do not set to deafultTimeout here timeout: 0, // do not set to deafultTimeout here
deadline: time.Time{}, deadline: time.Time{},
processAt: time.Now(), processAt: time.Now(),
@@ -207,21 +246,39 @@ func composeOptions(opts ...Option) (option, error) {
case retryOption: case retryOption:
res.retry = int(opt) res.retry = int(opt)
case queueOption: case queueOption:
trimmed := strings.TrimSpace(string(opt)) qname := string(opt)
if err := base.ValidateQueueName(trimmed); err != nil { if err := base.ValidateQueueName(qname); err != nil {
return option{}, err return option{}, err
} }
res.queue = trimmed res.queue = qname
case taskIDOption:
id := string(opt)
if isBlank(id) {
return option{}, errors.New("task ID cannot be empty")
}
res.taskID = id
case timeoutOption: case timeoutOption:
res.timeout = time.Duration(opt) res.timeout = time.Duration(opt)
case deadlineOption: case deadlineOption:
res.deadline = time.Time(opt) res.deadline = time.Time(opt)
case uniqueOption: case uniqueOption:
res.uniqueTTL = time.Duration(opt) ttl := time.Duration(opt)
if ttl < 1*time.Second {
return option{}, errors.New("Unique TTL cannot be less than 1s")
}
res.uniqueTTL = ttl
case processAtOption: case processAtOption:
res.processAt = time.Time(opt) res.processAt = time.Time(opt)
case processInOption: case processInOption:
res.processAt = time.Now().Add(time.Duration(opt)) res.processAt = time.Now().Add(time.Duration(opt))
case retentionOption:
res.retention = time.Duration(opt)
case groupOption:
key := string(opt)
if isBlank(key) {
return option{}, errors.New("group key cannot be empty")
}
res.group = key
default: default:
// ignore unexpected option // ignore unexpected option
} }
@@ -229,6 +286,11 @@ func composeOptions(opts ...Option) (option, error) {
return res, nil return res, nil
} }
// isBlank returns true if the given s is empty or consist of all whitespaces.
func isBlank(s string) bool {
return strings.TrimSpace(s) == ""
}
const ( const (
// Default max retry count used if nothing is specified. // Default max retry count used if nothing is specified.
defaultMaxRetry = 25 defaultMaxRetry = 25
@@ -243,37 +305,48 @@ var (
noDeadline time.Time = time.Unix(0, 0) noDeadline time.Time = time.Unix(0, 0)
) )
// SetDefaultOptions sets options to be used for a given task type.
// The argument opts specifies the behavior of task processing.
// If there are conflicting Option values the last one overrides others.
//
// Default options can be overridden by options passed at enqueue time.
func (c *Client) SetDefaultOptions(taskType string, opts ...Option) {
c.mu.Lock()
defer c.mu.Unlock()
c.opts[taskType] = opts
}
// Close closes the connection with redis. // Close closes the connection with redis.
func (c *Client) Close() error { func (c *Client) Close() error {
return c.rdb.Close() return c.broker.Close()
} }
// Enqueue enqueues the given task to be processed asynchronously. // Enqueue enqueues the given task to a queue.
// //
// Enqueue returns TaskInfo and nil error if the task is enqueued successfully, otherwise returns a non-nil error. // Enqueue returns TaskInfo and nil error if the task is enqueued successfully, otherwise returns a non-nil error.
// //
// The argument opts specifies the behavior of task processing. // The argument opts specifies the behavior of task processing.
// If there are conflicting Option values the last one overrides others. // If there are conflicting Option values the last one overrides others.
// Any options provided to NewTask can be overridden by options passed to Enqueue.
// By deafult, max retry is set to 25 and timeout is set to 30 minutes. // By deafult, max retry is set to 25 and timeout is set to 30 minutes.
// //
// If no ProcessAt or ProcessIn options are provided, the task will be pending immediately. // If no ProcessAt or ProcessIn options are provided, the task will be pending immediately.
//
// Enqueue uses context.Background internally; to specify the context, use EnqueueContext.
func (c *Client) Enqueue(task *Task, opts ...Option) (*TaskInfo, error) { func (c *Client) Enqueue(task *Task, opts ...Option) (*TaskInfo, error) {
c.mu.Lock() return c.EnqueueContext(context.Background(), task, opts...)
if defaults, ok := c.opts[task.Type()]; ok { }
opts = append(defaults, opts...)
// EnqueueContext enqueues the given task to a queue.
//
// EnqueueContext returns TaskInfo and nil error if the task is enqueued successfully, otherwise returns a non-nil error.
//
// The argument opts specifies the behavior of task processing.
// If there are conflicting Option values the last one overrides others.
// Any options provided to NewTask can be overridden by options passed to Enqueue.
// By deafult, max retry is set to 25 and timeout is set to 30 minutes.
//
// If no ProcessAt or ProcessIn options are provided, the task will be pending immediately.
//
// The first argument context applies to the enqueue operation. To specify task timeout and deadline, use Timeout and Deadline option instead.
func (c *Client) EnqueueContext(ctx context.Context, task *Task, opts ...Option) (*TaskInfo, error) {
if task == nil {
return nil, fmt.Errorf("task cannot be nil")
} }
c.mu.Unlock() if strings.TrimSpace(task.Type()) == "" {
return nil, fmt.Errorf("task typename cannot be empty")
}
// merge task options with the options provided at enqueue time.
opts = append(task.opts, opts...)
opt, err := composeOptions(opts...) opt, err := composeOptions(opts...)
if err != nil { if err != nil {
return nil, err return nil, err
@@ -295,7 +368,7 @@ func (c *Client) Enqueue(task *Task, opts ...Option) (*TaskInfo, error) {
uniqueKey = base.UniqueKey(opt.queue, task.Type(), task.Payload()) uniqueKey = base.UniqueKey(opt.queue, task.Type(), task.Payload())
} }
msg := &base.TaskMessage{ msg := &base.TaskMessage{
ID: uuid.New(), ID: opt.taskID,
Type: task.Type(), Type: task.Type(),
Payload: task.Payload(), Payload: task.Payload(),
Queue: opt.queue, Queue: opt.queue,
@@ -303,37 +376,53 @@ func (c *Client) Enqueue(task *Task, opts ...Option) (*TaskInfo, error) {
Deadline: deadline.Unix(), Deadline: deadline.Unix(),
Timeout: int64(timeout.Seconds()), Timeout: int64(timeout.Seconds()),
UniqueKey: uniqueKey, UniqueKey: uniqueKey,
GroupKey: opt.group,
Retention: int64(opt.retention.Seconds()),
} }
now := time.Now() now := time.Now()
var state base.TaskState var state base.TaskState
if opt.processAt.Before(now) || opt.processAt.Equal(now) { if opt.processAt.After(now) {
opt.processAt = now err = c.schedule(ctx, msg, opt.processAt, opt.uniqueTTL)
err = c.enqueue(msg, opt.uniqueTTL)
state = base.TaskStatePending
} else {
err = c.schedule(msg, opt.processAt, opt.uniqueTTL)
state = base.TaskStateScheduled state = base.TaskStateScheduled
} else if opt.group != "" {
// Use zero value for processAt since we don't know when the task will be aggregated and processed.
opt.processAt = time.Time{}
err = c.addToGroup(ctx, msg, opt.group, opt.uniqueTTL)
state = base.TaskStateAggregating
} else {
opt.processAt = now
err = c.enqueue(ctx, msg, opt.uniqueTTL)
state = base.TaskStatePending
} }
switch { switch {
case errors.Is(err, errors.ErrDuplicateTask): case errors.Is(err, errors.ErrDuplicateTask):
return nil, fmt.Errorf("%w", ErrDuplicateTask) return nil, fmt.Errorf("%w", ErrDuplicateTask)
case errors.Is(err, errors.ErrTaskIdConflict):
return nil, fmt.Errorf("%w", ErrTaskIDConflict)
case err != nil: case err != nil:
return nil, err return nil, err
} }
return newTaskInfo(msg, state, opt.processAt), nil return newTaskInfo(msg, state, opt.processAt, nil), nil
} }
func (c *Client) enqueue(msg *base.TaskMessage, uniqueTTL time.Duration) error { func (c *Client) enqueue(ctx context.Context, msg *base.TaskMessage, uniqueTTL time.Duration) error {
if uniqueTTL > 0 { if uniqueTTL > 0 {
return c.rdb.EnqueueUnique(msg, uniqueTTL) return c.broker.EnqueueUnique(ctx, msg, uniqueTTL)
} }
return c.rdb.Enqueue(msg) return c.broker.Enqueue(ctx, msg)
} }
func (c *Client) schedule(msg *base.TaskMessage, t time.Time, uniqueTTL time.Duration) error { func (c *Client) schedule(ctx context.Context, msg *base.TaskMessage, t time.Time, uniqueTTL time.Duration) error {
if uniqueTTL > 0 { if uniqueTTL > 0 {
ttl := t.Add(uniqueTTL).Sub(time.Now()) ttl := t.Add(uniqueTTL).Sub(time.Now())
return c.rdb.ScheduleUnique(msg, t, ttl) return c.broker.ScheduleUnique(ctx, msg, t, ttl)
} }
return c.rdb.Schedule(msg, t) return c.broker.Schedule(ctx, msg, t)
}
func (c *Client) addToGroup(ctx context.Context, msg *base.TaskMessage, group string, uniqueTTL time.Duration) error {
if uniqueTTL > 0 {
return c.broker.AddToGroupUnique(ctx, msg, group, uniqueTTL)
}
return c.broker.AddToGroup(ctx, msg, group)
} }

View File

@@ -5,14 +5,15 @@
package asynq package asynq
import ( import (
"context"
"errors" "errors"
"testing" "testing"
"time" "time"
"github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts" "github.com/google/go-cmp/cmp/cmpopts"
h "github.com/hibiken/asynq/internal/asynqtest"
"github.com/hibiken/asynq/internal/base" "github.com/hibiken/asynq/internal/base"
h "github.com/hibiken/asynq/internal/testutil"
) )
func TestClientEnqueueWithProcessAtOption(t *testing.T) { func TestClientEnqueueWithProcessAtOption(t *testing.T) {
@@ -287,13 +288,13 @@ func TestClientEnqueue(t *testing.T) {
}, },
}, },
{ {
desc: "Queue option should be case-insensitive", desc: "Queue option should be case sensitive",
task: task, task: task,
opts: []Option{ opts: []Option{
Queue("HIGH"), Queue("MyQueue"),
}, },
wantInfo: &TaskInfo{ wantInfo: &TaskInfo{
Queue: "high", Queue: "MyQueue",
Type: task.Type(), Type: task.Type(),
Payload: task.Payload(), Payload: task.Payload(),
State: TaskStatePending, State: TaskStatePending,
@@ -306,12 +307,12 @@ func TestClientEnqueue(t *testing.T) {
NextProcessAt: now, NextProcessAt: now,
}, },
wantPending: map[string][]*base.TaskMessage{ wantPending: map[string][]*base.TaskMessage{
"high": { "MyQueue": {
{ {
Type: task.Type(), Type: task.Type(),
Payload: task.Payload(), Payload: task.Payload(),
Retry: defaultMaxRetry, Retry: defaultMaxRetry,
Queue: "high", Queue: "MyQueue",
Timeout: int64(defaultTimeout.Seconds()), Timeout: int64(defaultTimeout.Seconds()),
Deadline: noDeadline.Unix(), Deadline: noDeadline.Unix(),
}, },
@@ -415,6 +416,40 @@ func TestClientEnqueue(t *testing.T) {
}, },
}, },
}, },
{
desc: "With Retention option",
task: task,
opts: []Option{
Retention(24 * time.Hour),
},
wantInfo: &TaskInfo{
Queue: "default",
Type: task.Type(),
Payload: task.Payload(),
State: TaskStatePending,
MaxRetry: defaultMaxRetry,
Retried: 0,
LastErr: "",
LastFailedAt: time.Time{},
Timeout: defaultTimeout,
Deadline: time.Time{},
NextProcessAt: now,
Retention: 24 * time.Hour,
},
wantPending: map[string][]*base.TaskMessage{
"default": {
{
Type: task.Type(),
Payload: task.Payload(),
Retry: defaultMaxRetry,
Queue: "default",
Timeout: int64(defaultTimeout.Seconds()),
Deadline: noDeadline.Unix(),
Retention: int64((24 * time.Hour).Seconds()),
},
},
},
},
} }
for _, tc := range tests { for _, tc := range tests {
@@ -443,6 +478,252 @@ func TestClientEnqueue(t *testing.T) {
} }
} }
func TestClientEnqueueWithGroupOption(t *testing.T) {
r := setup(t)
client := NewClient(getRedisConnOpt(t))
defer client.Close()
task := NewTask("mytask", []byte("foo"))
now := time.Now()
tests := []struct {
desc string
task *Task
opts []Option
wantInfo *TaskInfo
wantPending map[string][]*base.TaskMessage
wantGroups map[string]map[string][]base.Z // map queue name to a set of groups
wantScheduled map[string][]base.Z
}{
{
desc: "With only Group option",
task: task,
opts: []Option{
Group("mygroup"),
},
wantInfo: &TaskInfo{
Queue: "default",
Group: "mygroup",
Type: task.Type(),
Payload: task.Payload(),
State: TaskStateAggregating,
MaxRetry: defaultMaxRetry,
Retried: 0,
LastErr: "",
LastFailedAt: time.Time{},
Timeout: defaultTimeout,
Deadline: time.Time{},
NextProcessAt: time.Time{},
},
wantPending: map[string][]*base.TaskMessage{
"default": {}, // should not be pending
},
wantGroups: map[string]map[string][]base.Z{
"default": {
"mygroup": {
{
Message: &base.TaskMessage{
Type: task.Type(),
Payload: task.Payload(),
Retry: defaultMaxRetry,
Queue: "default",
Timeout: int64(defaultTimeout.Seconds()),
Deadline: noDeadline.Unix(),
GroupKey: "mygroup",
},
Score: now.Unix(),
},
},
},
},
wantScheduled: map[string][]base.Z{
"default": {},
},
},
{
desc: "With Group and ProcessIn options",
task: task,
opts: []Option{
Group("mygroup"),
ProcessIn(30 * time.Minute),
},
wantInfo: &TaskInfo{
Queue: "default",
Group: "mygroup",
Type: task.Type(),
Payload: task.Payload(),
State: TaskStateScheduled,
MaxRetry: defaultMaxRetry,
Retried: 0,
LastErr: "",
LastFailedAt: time.Time{},
Timeout: defaultTimeout,
Deadline: time.Time{},
NextProcessAt: now.Add(30 * time.Minute),
},
wantPending: map[string][]*base.TaskMessage{
"default": {}, // should not be pending
},
wantGroups: map[string]map[string][]base.Z{
"default": {
"mygroup": {}, // should not be added to the group yet
},
},
wantScheduled: map[string][]base.Z{
"default": {
{
Message: &base.TaskMessage{
Type: task.Type(),
Payload: task.Payload(),
Retry: defaultMaxRetry,
Queue: "default",
Timeout: int64(defaultTimeout.Seconds()),
Deadline: noDeadline.Unix(),
GroupKey: "mygroup",
},
Score: now.Add(30 * time.Minute).Unix(),
},
},
},
},
}
for _, tc := range tests {
h.FlushDB(t, r) // clean up db before each test case.
gotInfo, err := client.Enqueue(tc.task, tc.opts...)
if err != nil {
t.Error(err)
continue
}
cmpOptions := []cmp.Option{
cmpopts.IgnoreFields(TaskInfo{}, "ID"),
cmpopts.EquateApproxTime(500 * time.Millisecond),
}
if diff := cmp.Diff(tc.wantInfo, gotInfo, cmpOptions...); diff != "" {
t.Errorf("%s;\nEnqueue(task) returned %v, want %v; (-want,+got)\n%s",
tc.desc, gotInfo, tc.wantInfo, diff)
}
for qname, want := range tc.wantPending {
got := h.GetPendingMessages(t, r, qname)
if diff := cmp.Diff(want, got, h.IgnoreIDOpt, cmpopts.EquateEmpty()); diff != "" {
t.Errorf("%s;\nmismatch found in %q; (-want,+got)\n%s", tc.desc, base.PendingKey(qname), diff)
}
}
for qname, groups := range tc.wantGroups {
for groupKey, want := range groups {
got := h.GetGroupEntries(t, r, qname, groupKey)
if diff := cmp.Diff(want, got, h.IgnoreIDOpt, cmpopts.EquateEmpty()); diff != "" {
t.Errorf("%s;\nmismatch found in %q; (-want,+got)\n%s", tc.desc, base.GroupKey(qname, groupKey), diff)
}
}
}
for qname, want := range tc.wantScheduled {
gotScheduled := h.GetScheduledEntries(t, r, qname)
if diff := cmp.Diff(want, gotScheduled, h.IgnoreIDOpt, cmpopts.EquateEmpty()); diff != "" {
t.Errorf("%s;\nmismatch found in %q; (-want,+got)\n%s", tc.desc, base.ScheduledKey(qname), diff)
}
}
}
}
func TestClientEnqueueWithTaskIDOption(t *testing.T) {
r := setup(t)
client := NewClient(getRedisConnOpt(t))
defer client.Close()
task := NewTask("send_email", nil)
now := time.Now()
tests := []struct {
desc string
task *Task
opts []Option
wantInfo *TaskInfo
wantPending map[string][]*base.TaskMessage
}{
{
desc: "With a valid TaskID option",
task: task,
opts: []Option{
TaskID("custom_id"),
},
wantInfo: &TaskInfo{
ID: "custom_id",
Queue: "default",
Type: task.Type(),
Payload: task.Payload(),
State: TaskStatePending,
MaxRetry: defaultMaxRetry,
Retried: 0,
LastErr: "",
LastFailedAt: time.Time{},
Timeout: defaultTimeout,
Deadline: time.Time{},
NextProcessAt: now,
},
wantPending: map[string][]*base.TaskMessage{
"default": {
{
ID: "custom_id",
Type: task.Type(),
Payload: task.Payload(),
Retry: defaultMaxRetry,
Queue: "default",
Timeout: int64(defaultTimeout.Seconds()),
Deadline: noDeadline.Unix(),
},
},
},
},
}
for _, tc := range tests {
h.FlushDB(t, r) // clean up db before each test case.
gotInfo, err := client.Enqueue(tc.task, tc.opts...)
if err != nil {
t.Errorf("got non-nil error %v, want nil", err)
continue
}
cmpOptions := []cmp.Option{
cmpopts.EquateApproxTime(500 * time.Millisecond),
}
if diff := cmp.Diff(tc.wantInfo, gotInfo, cmpOptions...); diff != "" {
t.Errorf("%s;\nEnqueue(task) returned %v, want %v; (-want,+got)\n%s",
tc.desc, gotInfo, tc.wantInfo, diff)
}
for qname, want := range tc.wantPending {
got := h.GetPendingMessages(t, r, qname)
if diff := cmp.Diff(want, got); diff != "" {
t.Errorf("%s;\nmismatch found in %q; (-want,+got)\n%s", tc.desc, base.PendingKey(qname), diff)
}
}
}
}
func TestClientEnqueueWithConflictingTaskID(t *testing.T) {
setup(t)
client := NewClient(getRedisConnOpt(t))
defer client.Close()
const taskID = "custom_id"
task := NewTask("foo", nil)
if _, err := client.Enqueue(task, TaskID(taskID)); err != nil {
t.Fatalf("First task: Enqueue failed: %v", err)
}
_, err := client.Enqueue(task, TaskID(taskID))
if !errors.Is(err, ErrTaskIDConflict) {
t.Errorf("Second task: Enqueue returned %v, want %v", err, ErrTaskIDConflict)
}
}
func TestClientEnqueueWithProcessInOption(t *testing.T) { func TestClientEnqueueWithProcessInOption(t *testing.T) {
r := setup(t) r := setup(t)
client := NewClient(getRedisConnOpt(t)) client := NewClient(getRedisConnOpt(t))
@@ -578,6 +859,11 @@ func TestClientEnqueueError(t *testing.T) {
task *Task task *Task
opts []Option opts []Option
}{ }{
{
desc: "With nil task",
task: nil,
opts: []Option{},
},
{ {
desc: "With empty queue name", desc: "With empty queue name",
task: task, task: task,
@@ -585,6 +871,31 @@ func TestClientEnqueueError(t *testing.T) {
Queue(""), Queue(""),
}, },
}, },
{
desc: "With empty task typename",
task: NewTask("", h.JSON(map[string]interface{}{})),
opts: []Option{},
},
{
desc: "With blank task typename",
task: NewTask(" ", h.JSON(map[string]interface{}{})),
opts: []Option{},
},
{
desc: "With empty task ID",
task: NewTask("foo", nil),
opts: []Option{TaskID("")},
},
{
desc: "With blank task ID",
task: NewTask("foo", nil),
opts: []Option{TaskID(" ")},
},
{
desc: "With unique option less than 1s",
task: NewTask("foo", nil),
opts: []Option{Unique(300 * time.Millisecond)},
},
} }
for _, tc := range tests { for _, tc := range tests {
@@ -597,16 +908,17 @@ func TestClientEnqueueError(t *testing.T) {
} }
} }
func TestClientDefaultOptions(t *testing.T) { func TestClientWithDefaultOptions(t *testing.T) {
r := setup(t) r := setup(t)
now := time.Now() now := time.Now()
tests := []struct { tests := []struct {
desc string desc string
defaultOpts []Option // options set at the client level. defaultOpts []Option // options set at task initialization time
opts []Option // options used at enqueue time. opts []Option // options used at enqueue time.
task *Task tasktype string
payload []byte
wantInfo *TaskInfo wantInfo *TaskInfo
queue string // queue that the message should go into. queue string // queue that the message should go into.
want *base.TaskMessage want *base.TaskMessage
@@ -615,7 +927,8 @@ func TestClientDefaultOptions(t *testing.T) {
desc: "With queue routing option", desc: "With queue routing option",
defaultOpts: []Option{Queue("feed")}, defaultOpts: []Option{Queue("feed")},
opts: []Option{}, opts: []Option{},
task: NewTask("feed:import", nil), tasktype: "feed:import",
payload: nil,
wantInfo: &TaskInfo{ wantInfo: &TaskInfo{
Queue: "feed", Queue: "feed",
Type: "feed:import", Type: "feed:import",
@@ -643,7 +956,8 @@ func TestClientDefaultOptions(t *testing.T) {
desc: "With multiple options", desc: "With multiple options",
defaultOpts: []Option{Queue("feed"), MaxRetry(5)}, defaultOpts: []Option{Queue("feed"), MaxRetry(5)},
opts: []Option{}, opts: []Option{},
task: NewTask("feed:import", nil), tasktype: "feed:import",
payload: nil,
wantInfo: &TaskInfo{ wantInfo: &TaskInfo{
Queue: "feed", Queue: "feed",
Type: "feed:import", Type: "feed:import",
@@ -671,7 +985,8 @@ func TestClientDefaultOptions(t *testing.T) {
desc: "With overriding options at enqueue time", desc: "With overriding options at enqueue time",
defaultOpts: []Option{Queue("feed"), MaxRetry(5)}, defaultOpts: []Option{Queue("feed"), MaxRetry(5)},
opts: []Option{Queue("critical")}, opts: []Option{Queue("critical")},
task: NewTask("feed:import", nil), tasktype: "feed:import",
payload: nil,
wantInfo: &TaskInfo{ wantInfo: &TaskInfo{
Queue: "critical", Queue: "critical",
Type: "feed:import", Type: "feed:import",
@@ -700,8 +1015,8 @@ func TestClientDefaultOptions(t *testing.T) {
h.FlushDB(t, r) h.FlushDB(t, r)
c := NewClient(getRedisConnOpt(t)) c := NewClient(getRedisConnOpt(t))
defer c.Close() defer c.Close()
c.SetDefaultOptions(tc.task.Type(), tc.defaultOpts...) task := NewTask(tc.tasktype, tc.payload, tc.defaultOpts...)
gotInfo, err := c.Enqueue(tc.task, tc.opts...) gotInfo, err := c.Enqueue(task, tc.opts...)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@@ -751,7 +1066,7 @@ func TestClientEnqueueUnique(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
gotTTL := r.TTL(base.UniqueKey(base.DefaultQueueName, tc.task.Type(), tc.task.Payload())).Val() gotTTL := r.TTL(context.Background(), base.UniqueKey(base.DefaultQueueName, tc.task.Type(), tc.task.Payload())).Val()
if !cmp.Equal(tc.ttl.Seconds(), gotTTL.Seconds(), cmpopts.EquateApprox(0, 1)) { if !cmp.Equal(tc.ttl.Seconds(), gotTTL.Seconds(), cmpopts.EquateApprox(0, 1)) {
t.Errorf("TTL = %v, want %v", gotTTL, tc.ttl) t.Errorf("TTL = %v, want %v", gotTTL, tc.ttl)
continue continue
@@ -796,7 +1111,7 @@ func TestClientEnqueueUniqueWithProcessInOption(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
gotTTL := r.TTL(base.UniqueKey(base.DefaultQueueName, tc.task.Type(), tc.task.Payload())).Val() gotTTL := r.TTL(context.Background(), base.UniqueKey(base.DefaultQueueName, tc.task.Type(), tc.task.Payload())).Val()
wantTTL := time.Duration(tc.ttl.Seconds()+tc.d.Seconds()) * time.Second wantTTL := time.Duration(tc.ttl.Seconds()+tc.d.Seconds()) * time.Second
if !cmp.Equal(wantTTL.Seconds(), gotTTL.Seconds(), cmpopts.EquateApprox(0, 1)) { if !cmp.Equal(wantTTL.Seconds(), gotTTL.Seconds(), cmpopts.EquateApprox(0, 1)) {
t.Errorf("TTL = %v, want %v", gotTTL, wantTTL) t.Errorf("TTL = %v, want %v", gotTTL, wantTTL)
@@ -842,7 +1157,7 @@ func TestClientEnqueueUniqueWithProcessAtOption(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
gotTTL := r.TTL(base.UniqueKey(base.DefaultQueueName, tc.task.Type(), tc.task.Payload())).Val() gotTTL := r.TTL(context.Background(), base.UniqueKey(base.DefaultQueueName, tc.task.Type(), tc.task.Payload())).Val()
wantTTL := tc.at.Add(tc.ttl).Sub(time.Now()) wantTTL := tc.at.Add(tc.ttl).Sub(time.Now())
if !cmp.Equal(wantTTL.Seconds(), gotTTL.Seconds(), cmpopts.EquateApprox(0, 1)) { if !cmp.Equal(wantTTL.Seconds(), gotTTL.Seconds(), cmpopts.EquateApprox(0, 1)) {
t.Errorf("TTL = %v, want %v", gotTTL, wantTTL) t.Errorf("TTL = %v, want %v", gotTTL, wantTTL)

View File

@@ -6,49 +6,16 @@ package asynq
import ( import (
"context" "context"
"time"
"github.com/hibiken/asynq/internal/base" asynqcontext "github.com/hibiken/asynq/internal/context"
) )
// A taskMetadata holds task scoped data to put in context.
type taskMetadata struct {
id string
maxRetry int
retryCount int
qname string
}
// ctxKey type is unexported to prevent collisions with context keys defined in
// other packages.
type ctxKey int
// metadataCtxKey is the context key for the task metadata.
// Its value of zero is arbitrary.
const metadataCtxKey ctxKey = 0
// createContext returns a context and cancel function for a given task message.
func createContext(msg *base.TaskMessage, deadline time.Time) (context.Context, context.CancelFunc) {
metadata := taskMetadata{
id: msg.ID.String(),
maxRetry: msg.Retry,
retryCount: msg.Retried,
qname: msg.Queue,
}
ctx := context.WithValue(context.Background(), metadataCtxKey, metadata)
return context.WithDeadline(ctx, deadline)
}
// GetTaskID extracts a task ID from a context, if any. // GetTaskID extracts a task ID from a context, if any.
// //
// ID of a task is guaranteed to be unique. // ID of a task is guaranteed to be unique.
// ID of a task doesn't change if the task is being retried. // ID of a task doesn't change if the task is being retried.
func GetTaskID(ctx context.Context) (id string, ok bool) { func GetTaskID(ctx context.Context) (id string, ok bool) {
metadata, ok := ctx.Value(metadataCtxKey).(taskMetadata) return asynqcontext.GetTaskID(ctx)
if !ok {
return "", false
}
return metadata.id, true
} }
// GetRetryCount extracts retry count from a context, if any. // GetRetryCount extracts retry count from a context, if any.
@@ -56,11 +23,7 @@ func GetTaskID(ctx context.Context) (id string, ok bool) {
// Return value n indicates the number of times associated task has been // Return value n indicates the number of times associated task has been
// retried so far. // retried so far.
func GetRetryCount(ctx context.Context) (n int, ok bool) { func GetRetryCount(ctx context.Context) (n int, ok bool) {
metadata, ok := ctx.Value(metadataCtxKey).(taskMetadata) return asynqcontext.GetRetryCount(ctx)
if !ok {
return 0, false
}
return metadata.retryCount, true
} }
// GetMaxRetry extracts maximum retry from a context, if any. // GetMaxRetry extracts maximum retry from a context, if any.
@@ -68,20 +31,12 @@ func GetRetryCount(ctx context.Context) (n int, ok bool) {
// Return value n indicates the maximum number of times the assoicated task // Return value n indicates the maximum number of times the assoicated task
// can be retried if ProcessTask returns a non-nil error. // can be retried if ProcessTask returns a non-nil error.
func GetMaxRetry(ctx context.Context) (n int, ok bool) { func GetMaxRetry(ctx context.Context) (n int, ok bool) {
metadata, ok := ctx.Value(metadataCtxKey).(taskMetadata) return asynqcontext.GetMaxRetry(ctx)
if !ok {
return 0, false
}
return metadata.maxRetry, true
} }
// GetQueueName extracts queue name from a context, if any. // GetQueueName extracts queue name from a context, if any.
// //
// Return value qname indicates which queue the task was pulled from. // Return value queue indicates which queue the task was pulled from.
func GetQueueName(ctx context.Context) (qname string, ok bool) { func GetQueueName(ctx context.Context) (queue string, ok bool) {
metadata, ok := ctx.Value(metadataCtxKey).(taskMetadata) return asynqcontext.GetQueueName(ctx)
if !ok {
return "", false
}
return metadata.qname, true
} }

View File

@@ -5,6 +5,7 @@
package asynq_test package asynq_test
import ( import (
"context"
"fmt" "fmt"
"log" "log"
"os" "os"
@@ -113,3 +114,20 @@ func ExampleParseRedisURI() {
// localhost:6379 // localhost:6379
// 10 // 10
} }
func ExampleResultWriter() {
// ResultWriter is only accessible in Handler.
h := func(ctx context.Context, task *asynq.Task) error {
// .. do task processing work
res := []byte("task result data")
n, err := task.ResultWriter().Write(res) // implements io.Writer
if err != nil {
return fmt.Errorf("failed to write task result: %v", err)
}
log.Printf(" %d bytes written", n)
return nil
}
_ = h
}

View File

@@ -56,13 +56,15 @@ func (f *forwarder) start(wg *sync.WaitGroup) {
wg.Add(1) wg.Add(1)
go func() { go func() {
defer wg.Done() defer wg.Done()
timer := time.NewTimer(f.avgInterval)
for { for {
select { select {
case <-f.done: case <-f.done:
f.logger.Debug("Forwarder done") f.logger.Debug("Forwarder done")
return return
case <-time.After(f.avgInterval): case <-timer.C:
f.exec() f.exec()
timer.Reset(f.avgInterval)
} }
} }
}() }()
@@ -70,6 +72,6 @@ func (f *forwarder) start(wg *sync.WaitGroup) {
func (f *forwarder) exec() { func (f *forwarder) exec() {
if err := f.broker.ForwardIfReady(f.queues...); err != nil { if err := f.broker.ForwardIfReady(f.queues...); err != nil {
f.logger.Errorf("Could not enqueue scheduled tasks: %v", err) f.logger.Errorf("Failed to forward scheduled tasks: %v", err)
} }
} }

View File

@@ -10,9 +10,9 @@ import (
"time" "time"
"github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp"
h "github.com/hibiken/asynq/internal/asynqtest"
"github.com/hibiken/asynq/internal/base" "github.com/hibiken/asynq/internal/base"
"github.com/hibiken/asynq/internal/rdb" "github.com/hibiken/asynq/internal/rdb"
h "github.com/hibiken/asynq/internal/testutil"
) )
func TestForwarder(t *testing.T) { func TestForwarder(t *testing.T) {

14
go.mod
View File

@@ -1,17 +1,19 @@
module github.com/hibiken/asynq module github.com/hibiken/asynq
go 1.13 go 1.14
require ( require (
github.com/go-redis/redis/v7 v7.4.0 github.com/go-redis/redis/v8 v8.11.2
github.com/golang/protobuf v1.4.1 github.com/golang/protobuf v1.4.2
github.com/google/go-cmp v0.5.0 github.com/google/go-cmp v0.5.6
github.com/google/uuid v1.2.0 github.com/google/uuid v1.2.0
github.com/kr/pretty v0.1.0 // indirect
github.com/robfig/cron/v3 v3.0.1 github.com/robfig/cron/v3 v3.0.1
github.com/spf13/cast v1.3.1 github.com/spf13/cast v1.3.1
github.com/stretchr/testify v1.6.1 // indirect
go.uber.org/goleak v0.10.0 go.uber.org/goleak v0.10.0
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e golang.org/x/sys v0.0.0-20210112080510-489259a85091
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 golang.org/x/time v0.0.0-20190308202827-9d24e82272b4
google.golang.org/protobuf v1.25.0 google.golang.org/protobuf v1.25.0
gopkg.in/yaml.v2 v2.2.7 // indirect gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect
) )

111
go.sum
View File

@@ -1,91 +1,129 @@
cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ=
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473 h1:4cmBvAEBNJaGARUEs3/suWRyfyBfhf7I60WBZq+bv2w=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/go-redis/redis/v7 v7.2.0 h1:CrCexy/jYWZjW0AyVoHlcJUeZN19VWlbepTh1Vq6dJs= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
github.com/go-redis/redis/v7 v7.2.0/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRfnwdHj/Dcg= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/go-redis/redis/v7 v7.4.0 h1:7obg6wUoj05T0EpY0o8B59S9w5yeMWql7sw2kwNW1x4= github.com/go-redis/redis/v8 v8.11.2 h1:WqlSpAwz8mxDSMCvbyz1Mkiqe0LE5OY4j3lgkvu1Ts0=
github.com/go-redis/redis/v7 v7.4.0/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRfnwdHj/Dcg= github.com/go-redis/redis/v8 v8.11.2/go.mod h1:DLomh7y2e3ggQXQLd1YgmvIfecPJoFl7WU5SOQ/r06M=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/mock v1.1.1 h1:G5FRp8JnTd7RQH5kemVNlMeyXQAztQ3mOWV95KxsXH8=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1 h1:ZFgWrT+bLgsYPirOnRfKLYJLvssAegOj/hgyMFdJZe0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs= github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs=
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1 h1:VkoXIwSboBpnk99O/KFauAEILuNHv5DVFKZMBN/gUgw=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.15.0 h1:1V1NfVQR87RtWAgp1lv9JZJ5Jap+XFGKPi00andXGi4=
github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME= github.com/onsi/ginkgo v1.15.0/go.mod h1:hF8qUzuuC8DJGygJH3726JnCZX4MYbRB8yFfISqnKUg=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.10.5 h1:7n6FEkpFmfCoo2t+YYqXH0evK+a9ICQz0xcAy9dYcaQ=
github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng=
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/yuin/goldmark v1.2.1 h1:ruQGxdhGHe7FWOJPT0mKs5+pD2Xs1Bm/kdGlHO04FmM=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.uber.org/goleak v0.10.0 h1:G3eWbSNIskeRqtsN/1uI5B+eP73y3JUuBsv9AZjehb4= go.uber.org/goleak v0.10.0 h1:G3eWbSNIskeRqtsN/1uI5B+eP73y3JUuBsv9AZjehb4=
go.uber.org/goleak v0.10.0/go.mod h1:VCZuO8V8mFPlL0F5J5GK1rtHV3DrFcQ1R8ryq7FK0aI= go.uber.org/goleak v0.10.0/go.mod h1:VCZuO8V8mFPlL0F5J5GK1rtHV3DrFcQ1R8ryq7FK0aI=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4 h1:c2HOrn5iMezYjSlGPncknSEr/8x5LELb/ilJbXi9DEA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3 h1:XQyxROzUlZH+WIQwySDgnISgOivlhjIEwaQaJEJrrN0=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190923162816-aa69164e4478 h1:l5EDrHhldLYb3ZRHDUhXF7Om7MvYXnkV9/iQNo1lX6g= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb h1:eBmm0M9fYhWpKZLjQUUKka/LtIxf46G4fxeEz5KJr9U=
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 h1:SQFwaSi55rU7vdNs9Yr0Z324VNlrF+0wMqRXT4St8ck=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e h1:o3PsSEY8E4eXWkXrIP9YJALUkVZqzHJT5DOasTyn8Vs=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e h1:9vRrk9YW2BTzLP0VCB9ZDjU4cPqkg+IDWL7XgxA1yxQ= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210112080510-489259a85091 h1:DMyOG0U+gKfu8JZzg2UQe9MeaC1X+xQWlAKcRnjxjCw=
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -93,15 +131,24 @@ golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGm
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e h1:4nW4NLDYnU28ojHaHO8OVxFHk/aQ33U01a9cjED+pzE=
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.27.0 h1:rRYRFMVgRv6E0D70Skyfsr28tDXIuuPZyWGMPdMcnXg=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
@@ -109,6 +156,7 @@ google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQ
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
@@ -119,10 +167,11 @@ gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo= gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc h1:/hemPrYIhOhy8zYrNj+069zDB68us2sMGsfkFJO0iZs=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

View File

@@ -12,6 +12,7 @@ import (
"github.com/google/uuid" "github.com/google/uuid"
"github.com/hibiken/asynq/internal/base" "github.com/hibiken/asynq/internal/base"
"github.com/hibiken/asynq/internal/log" "github.com/hibiken/asynq/internal/log"
"github.com/hibiken/asynq/internal/timeutil"
) )
// heartbeater is responsible for writing process info to redis periodically to // heartbeater is responsible for writing process info to redis periodically to
@@ -19,6 +20,7 @@ import (
type heartbeater struct { type heartbeater struct {
logger *log.Logger logger *log.Logger
broker base.Broker broker base.Broker
clock timeutil.Clock
// channel to communicate back to the long running "heartbeater" goroutine. // channel to communicate back to the long running "heartbeater" goroutine.
done chan struct{} done chan struct{}
@@ -41,7 +43,7 @@ type heartbeater struct {
workers map[string]*workerInfo workers map[string]*workerInfo
// state is shared with other goroutine but is concurrency safe. // state is shared with other goroutine but is concurrency safe.
state *base.ServerState state *serverState
// channels to receive updates on active workers. // channels to receive updates on active workers.
starting <-chan *workerInfo starting <-chan *workerInfo
@@ -55,7 +57,7 @@ type heartbeaterParams struct {
concurrency int concurrency int
queues map[string]int queues map[string]int
strictPriority bool strictPriority bool
state *base.ServerState state *serverState
starting <-chan *workerInfo starting <-chan *workerInfo
finished <-chan *base.TaskMessage finished <-chan *base.TaskMessage
} }
@@ -69,6 +71,7 @@ func newHeartbeater(params heartbeaterParams) *heartbeater {
return &heartbeater{ return &heartbeater{
logger: params.logger, logger: params.logger,
broker: params.broker, broker: params.broker,
clock: timeutil.NewRealClock(),
done: make(chan struct{}), done: make(chan struct{}),
interval: params.interval, interval: params.interval,
@@ -100,6 +103,8 @@ type workerInfo struct {
started time.Time started time.Time
// deadline the worker has to finish processing the task by. // deadline the worker has to finish processing the task by.
deadline time.Time deadline time.Time
// lease the worker holds for the task.
lease *base.Lease
} }
func (h *heartbeater) start(wg *sync.WaitGroup) { func (h *heartbeater) start(wg *sync.WaitGroup) {
@@ -107,7 +112,7 @@ func (h *heartbeater) start(wg *sync.WaitGroup) {
go func() { go func() {
defer wg.Done() defer wg.Done()
h.started = time.Now() h.started = h.clock.Now()
h.beat() h.beat()
@@ -125,16 +130,21 @@ func (h *heartbeater) start(wg *sync.WaitGroup) {
timer.Reset(h.interval) timer.Reset(h.interval)
case w := <-h.starting: case w := <-h.starting:
h.workers[w.msg.ID.String()] = w h.workers[w.msg.ID] = w
case msg := <-h.finished: case msg := <-h.finished:
delete(h.workers, msg.ID.String()) delete(h.workers, msg.ID)
} }
} }
}() }()
} }
// beat extends lease for workers and writes server/worker info to redis.
func (h *heartbeater) beat() { func (h *heartbeater) beat() {
h.state.mu.Lock()
srvStatus := h.state.value.String()
h.state.mu.Unlock()
info := base.ServerInfo{ info := base.ServerInfo{
Host: h.host, Host: h.host,
PID: h.pid, PID: h.pid,
@@ -142,12 +152,13 @@ func (h *heartbeater) beat() {
Concurrency: h.concurrency, Concurrency: h.concurrency,
Queues: h.queues, Queues: h.queues,
StrictPriority: h.strictPriority, StrictPriority: h.strictPriority,
Status: h.state.String(), Status: srvStatus,
Started: h.started, Started: h.started,
ActiveWorkerCount: len(h.workers), ActiveWorkerCount: len(h.workers),
} }
var ws []*base.WorkerInfo var ws []*base.WorkerInfo
idsByQueue := make(map[string][]string)
for id, w := range h.workers { for id, w := range h.workers {
ws = append(ws, &base.WorkerInfo{ ws = append(ws, &base.WorkerInfo{
Host: h.host, Host: h.host,
@@ -160,11 +171,30 @@ func (h *heartbeater) beat() {
Started: w.started, Started: w.started,
Deadline: w.deadline, Deadline: w.deadline,
}) })
// Check lease before adding to the set to make sure not to extend the lease if the lease is already expired.
if w.lease.IsValid() {
idsByQueue[w.msg.Queue] = append(idsByQueue[w.msg.Queue], id)
} else {
w.lease.NotifyExpiration() // notify processor if the lease is expired
}
} }
// Note: Set TTL to be long enough so that it won't expire before we write again // Note: Set TTL to be long enough so that it won't expire before we write again
// and short enough to expire quickly once the process is shut down or killed. // and short enough to expire quickly once the process is shut down or killed.
if err := h.broker.WriteServerState(&info, ws, h.interval*2); err != nil { if err := h.broker.WriteServerState(&info, ws, h.interval*2); err != nil {
h.logger.Errorf("could not write server state data: %v", err) h.logger.Errorf("Failed to write server state data: %v", err)
}
for qname, ids := range idsByQueue {
expirationTime, err := h.broker.ExtendLease(qname, ids...)
if err != nil {
h.logger.Errorf("Failed to extend lease for tasks %v: %v", ids, err)
continue
}
for _, id := range ids {
if l := h.workers[id].lease; !l.Reset(expirationTime) {
h.logger.Warnf("Lease reset failed for %s; lease deadline: %v", id, l.Deadline())
}
}
} }
} }

View File

@@ -5,31 +5,154 @@
package asynq package asynq
import ( import (
"context"
"sync" "sync"
"testing" "testing"
"time" "time"
"github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts" "github.com/google/go-cmp/cmp/cmpopts"
h "github.com/hibiken/asynq/internal/asynqtest"
"github.com/hibiken/asynq/internal/base" "github.com/hibiken/asynq/internal/base"
"github.com/hibiken/asynq/internal/rdb" "github.com/hibiken/asynq/internal/rdb"
"github.com/hibiken/asynq/internal/testbroker" "github.com/hibiken/asynq/internal/testbroker"
h "github.com/hibiken/asynq/internal/testutil"
"github.com/hibiken/asynq/internal/timeutil"
) )
// Test goes through a few phases.
//
// Phase1: Simulate Server startup; Simulate starting tasks listed in startedWorkers
// Phase2: Simluate finishing tasks listed in finishedTasks
// Phase3: Simulate Server shutdown;
func TestHeartbeater(t *testing.T) { func TestHeartbeater(t *testing.T) {
r := setup(t) r := setup(t)
defer r.Close() defer r.Close()
rdbClient := rdb.NewRDB(r) rdbClient := rdb.NewRDB(r)
now := time.Now()
const elapsedTime = 10 * time.Second // simulated time elapsed between phase1 and phase2
clock := timeutil.NewSimulatedClock(time.Time{}) // time will be set in each test
t1 := h.NewTaskMessageWithQueue("task1", nil, "default")
t2 := h.NewTaskMessageWithQueue("task2", nil, "default")
t3 := h.NewTaskMessageWithQueue("task3", nil, "default")
t4 := h.NewTaskMessageWithQueue("task4", nil, "custom")
t5 := h.NewTaskMessageWithQueue("task5", nil, "custom")
t6 := h.NewTaskMessageWithQueue("task6", nil, "default")
// Note: intentionally set to time less than now.Add(rdb.LeaseDuration) to test lease extention is working.
lease1 := h.NewLeaseWithClock(now.Add(10*time.Second), clock)
lease2 := h.NewLeaseWithClock(now.Add(10*time.Second), clock)
lease3 := h.NewLeaseWithClock(now.Add(10*time.Second), clock)
lease4 := h.NewLeaseWithClock(now.Add(10*time.Second), clock)
lease5 := h.NewLeaseWithClock(now.Add(10*time.Second), clock)
lease6 := h.NewLeaseWithClock(now.Add(10*time.Second), clock)
tests := []struct { tests := []struct {
interval time.Duration desc string
// Interval between heartbeats.
interval time.Duration
// Server info.
host string host string
pid int pid int
queues map[string]int queues map[string]int
concurrency int concurrency int
active map[string][]*base.TaskMessage // initial active set state
lease map[string][]base.Z // initial lease set state
wantLease1 map[string][]base.Z // expected lease set state after starting all startedWorkers
wantLease2 map[string][]base.Z // expected lease set state after finishing all finishedTasks
startedWorkers []*workerInfo // workerInfo to send via the started channel
finishedTasks []*base.TaskMessage // tasks to send via the finished channel
startTime time.Time // simulated start time
elapsedTime time.Duration // simulated time elapsed between starting and finishing processing tasks
}{ }{
{2 * time.Second, "localhost", 45678, map[string]int{"default": 1}, 10}, {
desc: "With single queue",
interval: 2 * time.Second,
host: "localhost",
pid: 45678,
queues: map[string]int{"default": 1},
concurrency: 10,
active: map[string][]*base.TaskMessage{
"default": {t1, t2, t3},
},
lease: map[string][]base.Z{
"default": {
{Message: t1, Score: now.Add(10 * time.Second).Unix()},
{Message: t2, Score: now.Add(10 * time.Second).Unix()},
{Message: t3, Score: now.Add(10 * time.Second).Unix()},
},
},
startedWorkers: []*workerInfo{
{msg: t1, started: now, deadline: now.Add(2 * time.Minute), lease: lease1},
{msg: t2, started: now, deadline: now.Add(2 * time.Minute), lease: lease2},
{msg: t3, started: now, deadline: now.Add(2 * time.Minute), lease: lease3},
},
finishedTasks: []*base.TaskMessage{t1, t2},
wantLease1: map[string][]base.Z{
"default": {
{Message: t1, Score: now.Add(rdb.LeaseDuration).Unix()},
{Message: t2, Score: now.Add(rdb.LeaseDuration).Unix()},
{Message: t3, Score: now.Add(rdb.LeaseDuration).Unix()},
},
},
wantLease2: map[string][]base.Z{
"default": {
{Message: t3, Score: now.Add(elapsedTime).Add(rdb.LeaseDuration).Unix()},
},
},
startTime: now,
elapsedTime: elapsedTime,
},
{
desc: "With multiple queue",
interval: 2 * time.Second,
host: "localhost",
pid: 45678,
queues: map[string]int{"default": 1, "custom": 2},
concurrency: 10,
active: map[string][]*base.TaskMessage{
"default": {t6},
"custom": {t4, t5},
},
lease: map[string][]base.Z{
"default": {
{Message: t6, Score: now.Add(10 * time.Second).Unix()},
},
"custom": {
{Message: t4, Score: now.Add(10 * time.Second).Unix()},
{Message: t5, Score: now.Add(10 * time.Second).Unix()},
},
},
startedWorkers: []*workerInfo{
{msg: t6, started: now, deadline: now.Add(2 * time.Minute), lease: lease6},
{msg: t4, started: now, deadline: now.Add(2 * time.Minute), lease: lease4},
{msg: t5, started: now, deadline: now.Add(2 * time.Minute), lease: lease5},
},
finishedTasks: []*base.TaskMessage{t6, t5},
wantLease1: map[string][]base.Z{
"default": {
{Message: t6, Score: now.Add(rdb.LeaseDuration).Unix()},
},
"custom": {
{Message: t4, Score: now.Add(rdb.LeaseDuration).Unix()},
{Message: t5, Score: now.Add(rdb.LeaseDuration).Unix()},
},
},
wantLease2: map[string][]base.Z{
"default": {},
"custom": {
{Message: t4, Score: now.Add(elapsedTime).Add(rdb.LeaseDuration).Unix()},
},
},
startTime: now,
elapsedTime: elapsedTime,
},
} }
timeCmpOpt := cmpopts.EquateApproxTime(10 * time.Millisecond) timeCmpOpt := cmpopts.EquateApproxTime(10 * time.Millisecond)
@@ -37,8 +160,15 @@ func TestHeartbeater(t *testing.T) {
ignoreFieldOpt := cmpopts.IgnoreFields(base.ServerInfo{}, "ServerID") ignoreFieldOpt := cmpopts.IgnoreFields(base.ServerInfo{}, "ServerID")
for _, tc := range tests { for _, tc := range tests {
h.FlushDB(t, r) h.FlushDB(t, r)
h.SeedAllActiveQueues(t, r, tc.active)
h.SeedAllLease(t, r, tc.lease)
state := base.NewServerState() clock.SetTime(tc.startTime)
rdbClient.SetClock(clock)
srvState := &serverState{}
startingCh := make(chan *workerInfo)
finishedCh := make(chan *base.TaskMessage)
hb := newHeartbeater(heartbeaterParams{ hb := newHeartbeater(heartbeaterParams{
logger: testLogger, logger: testLogger,
broker: rdbClient, broker: rdbClient,
@@ -46,72 +176,134 @@ func TestHeartbeater(t *testing.T) {
concurrency: tc.concurrency, concurrency: tc.concurrency,
queues: tc.queues, queues: tc.queues,
strictPriority: false, strictPriority: false,
state: state, state: srvState,
starting: make(chan *workerInfo), starting: startingCh,
finished: make(chan *base.TaskMessage), finished: finishedCh,
}) })
hb.clock = clock
// Change host and pid fields for testing purpose. // Change host and pid fields for testing purpose.
hb.host = tc.host hb.host = tc.host
hb.pid = tc.pid hb.pid = tc.pid
state.Set(base.StateActive) //===================
// Start Phase1
//===================
srvState.mu.Lock()
srvState.value = srvStateActive // simulating Server.Start
srvState.mu.Unlock()
var wg sync.WaitGroup var wg sync.WaitGroup
hb.start(&wg) hb.start(&wg)
want := &base.ServerInfo{ // Simulate processor starting to work on tasks.
Host: tc.host, for _, w := range tc.startedWorkers {
PID: tc.pid, startingCh <- w
Queues: tc.queues,
Concurrency: tc.concurrency,
Started: time.Now(),
Status: "active",
} }
// allow for heartbeater to write to redis // Wait for heartbeater to write to redis
time.Sleep(tc.interval) time.Sleep(tc.interval * 2)
ss, err := rdbClient.ListServers() ss, err := rdbClient.ListServers()
if err != nil { if err != nil {
t.Errorf("could not read server info from redis: %v", err) t.Errorf("%s: could not read server info from redis: %v", tc.desc, err)
hb.shutdown() hb.shutdown()
continue continue
} }
if len(ss) != 1 { if len(ss) != 1 {
t.Errorf("(*RDB).ListServers returned %d process info, want 1", len(ss)) t.Errorf("%s: (*RDB).ListServers returned %d server info, want 1", tc.desc, len(ss))
hb.shutdown() hb.shutdown()
continue continue
} }
if diff := cmp.Diff(want, ss[0], timeCmpOpt, ignoreOpt, ignoreFieldOpt); diff != "" { wantInfo := &base.ServerInfo{
t.Errorf("redis stored process status %+v, want %+v; (-want, +got)\n%s", ss[0], want, diff) Host: tc.host,
PID: tc.pid,
Queues: tc.queues,
Concurrency: tc.concurrency,
Started: now,
Status: "active",
ActiveWorkerCount: len(tc.startedWorkers),
}
if diff := cmp.Diff(wantInfo, ss[0], timeCmpOpt, ignoreOpt, ignoreFieldOpt); diff != "" {
t.Errorf("%s: redis stored server status %+v, want %+v; (-want, +got)\n%s", tc.desc, ss[0], wantInfo, diff)
hb.shutdown() hb.shutdown()
continue continue
} }
// status change for qname, wantLease := range tc.wantLease1 {
state.Set(base.StateClosed) gotLease := h.GetLeaseEntries(t, r, qname)
if diff := cmp.Diff(wantLease, gotLease, h.SortZSetEntryOpt); diff != "" {
t.Errorf("%s: mismatch found in %q: (-want,+got):\n%s", tc.desc, base.LeaseKey(qname), diff)
}
}
// allow for heartbeater to write to redis for _, w := range tc.startedWorkers {
if want := now.Add(rdb.LeaseDuration); w.lease.Deadline() != want {
t.Errorf("%s: lease deadline for %v is set to %v, want %v", tc.desc, w.msg, w.lease.Deadline(), want)
}
}
//===================
// Start Phase2
//===================
clock.AdvanceTime(tc.elapsedTime)
// Simulate processor finished processing tasks.
for _, msg := range tc.finishedTasks {
if err := rdbClient.Done(context.Background(), msg); err != nil {
t.Fatalf("RDB.Done failed: %v", err)
}
finishedCh <- msg
}
// Wait for heartbeater to write to redis
time.Sleep(tc.interval * 2) time.Sleep(tc.interval * 2)
want.Status = "closed" for qname, wantLease := range tc.wantLease2 {
gotLease := h.GetLeaseEntries(t, r, qname)
if diff := cmp.Diff(wantLease, gotLease, h.SortZSetEntryOpt); diff != "" {
t.Errorf("%s: mismatch found in %q: (-want,+got):\n%s", tc.desc, base.LeaseKey(qname), diff)
}
}
//===================
// Start Phase3
//===================
// Server state change; simulating Server.Shutdown
srvState.mu.Lock()
srvState.value = srvStateClosed
srvState.mu.Unlock()
// Wait for heartbeater to write to redis
time.Sleep(tc.interval * 2)
wantInfo = &base.ServerInfo{
Host: tc.host,
PID: tc.pid,
Queues: tc.queues,
Concurrency: tc.concurrency,
Started: now,
Status: "closed",
ActiveWorkerCount: len(tc.startedWorkers) - len(tc.finishedTasks),
}
ss, err = rdbClient.ListServers() ss, err = rdbClient.ListServers()
if err != nil { if err != nil {
t.Errorf("could not read process status from redis: %v", err) t.Errorf("%s: could not read server status from redis: %v", tc.desc, err)
hb.shutdown() hb.shutdown()
continue continue
} }
if len(ss) != 1 { if len(ss) != 1 {
t.Errorf("(*RDB).ListProcesses returned %d process info, want 1", len(ss)) t.Errorf("%s: (*RDB).ListServers returned %d server info, want 1", tc.desc, len(ss))
hb.shutdown() hb.shutdown()
continue continue
} }
if diff := cmp.Diff(want, ss[0], timeCmpOpt, ignoreOpt, ignoreFieldOpt); diff != "" { if diff := cmp.Diff(wantInfo, ss[0], timeCmpOpt, ignoreOpt, ignoreFieldOpt); diff != "" {
t.Errorf("redis stored process status %+v, want %+v; (-want, +got)\n%s", ss[0], want, diff) t.Errorf("%s: redis stored process status %+v, want %+v; (-want, +got)\n%s", tc.desc, ss[0], wantInfo, diff)
hb.shutdown() hb.shutdown()
continue continue
} }
@@ -131,8 +323,7 @@ func TestHeartbeaterWithRedisDown(t *testing.T) {
r := rdb.NewRDB(setup(t)) r := rdb.NewRDB(setup(t))
defer r.Close() defer r.Close()
testBroker := testbroker.NewTestBroker(r) testBroker := testbroker.NewTestBroker(r)
state := base.NewServerState() state := &serverState{value: srvStateActive}
state.Set(base.StateActive)
hb := newHeartbeater(heartbeaterParams{ hb := newHeartbeater(heartbeaterParams{
logger: testLogger, logger: testLogger,
broker: testBroker, broker: testBroker,

View File

@@ -10,8 +10,7 @@ import (
"strings" "strings"
"time" "time"
"github.com/go-redis/redis/v7" "github.com/go-redis/redis/v8"
"github.com/google/uuid"
"github.com/hibiken/asynq/internal/base" "github.com/hibiken/asynq/internal/base"
"github.com/hibiken/asynq/internal/errors" "github.com/hibiken/asynq/internal/errors"
"github.com/hibiken/asynq/internal/rdb" "github.com/hibiken/asynq/internal/rdb"
@@ -44,18 +43,50 @@ func (i *Inspector) Queues() ([]string, error) {
return i.rdb.AllQueues() return i.rdb.AllQueues()
} }
// QueueInfo represents a state of queues at a certain time. // Groups returns a list of all groups within the given queue.
func (i *Inspector) Groups(queue string) ([]*GroupInfo, error) {
stats, err := i.rdb.GroupStats(queue)
if err != nil {
return nil, err
}
var res []*GroupInfo
for _, s := range stats {
res = append(res, &GroupInfo{
Group: s.Group,
Size: s.Size,
})
}
return res, nil
}
// GroupInfo represents a state of a group at a cerntain time.
type GroupInfo struct {
// Name of the group.
Group string
// Size is the total number of tasks in the group.
Size int
}
// QueueInfo represents a state of a queue at a certain time.
type QueueInfo struct { type QueueInfo struct {
// Name of the queue. // Name of the queue.
Queue string Queue string
// Total number of bytes that the queue and its tasks require to be stored in redis. // Total number of bytes that the queue and its tasks require to be stored in redis.
// It is an approximate memory usage value in bytes since the value is computed by sampling.
MemoryUsage int64 MemoryUsage int64
// Latency of the queue, measured by the oldest pending task in the queue.
Latency time.Duration
// Size is the total number of tasks in the queue. // Size is the total number of tasks in the queue.
// The value is the sum of Pending, Active, Scheduled, Retry, and Archived. // The value is the sum of Pending, Active, Scheduled, Retry, Aggregating and Archived.
Size int Size int
// Groups is the total number of groups in the queue.
Groups int
// Number of pending tasks. // Number of pending tasks.
Pending int Pending int
// Number of active tasks. // Number of active tasks.
@@ -66,13 +97,22 @@ type QueueInfo struct {
Retry int Retry int
// Number of archived tasks. // Number of archived tasks.
Archived int Archived int
// Number of stored completed tasks.
Completed int
// Number of aggregating tasks.
Aggregating int
// Total number of tasks being processed during the given date. // Total number of tasks being processed within the given date (counter resets daily).
// The number includes both succeeded and failed tasks. // The number includes both succeeded and failed tasks.
Processed int Processed int
// Total number of tasks failed to be processed during the given date. // Total number of tasks failed to be processed within the given date (counter resets daily).
Failed int Failed int
// Total number of tasks processed (cumulative).
ProcessedTotal int
// Total number of tasks failed (cumulative).
FailedTotal int
// Paused indicates whether the queue is paused. // Paused indicates whether the queue is paused.
// If true, tasks in the queue will not be processed. // If true, tasks in the queue will not be processed.
Paused bool Paused bool
@@ -82,27 +122,33 @@ type QueueInfo struct {
} }
// GetQueueInfo returns current information of the given queue. // GetQueueInfo returns current information of the given queue.
func (i *Inspector) GetQueueInfo(qname string) (*QueueInfo, error) { func (i *Inspector) GetQueueInfo(queue string) (*QueueInfo, error) {
if err := base.ValidateQueueName(qname); err != nil { if err := base.ValidateQueueName(queue); err != nil {
return nil, err return nil, err
} }
stats, err := i.rdb.CurrentStats(qname) stats, err := i.rdb.CurrentStats(queue)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return &QueueInfo{ return &QueueInfo{
Queue: stats.Queue, Queue: stats.Queue,
MemoryUsage: stats.MemoryUsage, MemoryUsage: stats.MemoryUsage,
Size: stats.Size, Latency: stats.Latency,
Pending: stats.Pending, Size: stats.Size,
Active: stats.Active, Groups: stats.Groups,
Scheduled: stats.Scheduled, Pending: stats.Pending,
Retry: stats.Retry, Active: stats.Active,
Archived: stats.Archived, Scheduled: stats.Scheduled,
Processed: stats.Processed, Retry: stats.Retry,
Failed: stats.Failed, Archived: stats.Archived,
Paused: stats.Paused, Completed: stats.Completed,
Timestamp: stats.Timestamp, Aggregating: stats.Aggregating,
Processed: stats.Processed,
Failed: stats.Failed,
ProcessedTotal: stats.ProcessedTotal,
FailedTotal: stats.FailedTotal,
Paused: stats.Paused,
Timestamp: stats.Timestamp,
}, nil }, nil
} }
@@ -120,11 +166,11 @@ type DailyStats struct {
} }
// History returns a list of stats from the last n days. // History returns a list of stats from the last n days.
func (i *Inspector) History(qname string, n int) ([]*DailyStats, error) { func (i *Inspector) History(queue string, n int) ([]*DailyStats, error) {
if err := base.ValidateQueueName(qname); err != nil { if err := base.ValidateQueueName(queue); err != nil {
return nil, err return nil, err
} }
stats, err := i.rdb.HistoricalStats(qname, n) stats, err := i.rdb.HistoricalStats(queue, n)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -161,27 +207,23 @@ var (
// If the specified queue does not exist, DeleteQueue returns ErrQueueNotFound. // If the specified queue does not exist, DeleteQueue returns ErrQueueNotFound.
// If force is set to false and the specified queue is not empty, DeleteQueue // If force is set to false and the specified queue is not empty, DeleteQueue
// returns ErrQueueNotEmpty. // returns ErrQueueNotEmpty.
func (i *Inspector) DeleteQueue(qname string, force bool) error { func (i *Inspector) DeleteQueue(queue string, force bool) error {
err := i.rdb.RemoveQueue(qname, force) err := i.rdb.RemoveQueue(queue, force)
if errors.IsQueueNotFound(err) { if errors.IsQueueNotFound(err) {
return fmt.Errorf("%w: queue=%q", ErrQueueNotFound, qname) return fmt.Errorf("%w: queue=%q", ErrQueueNotFound, queue)
} }
if errors.IsQueueNotEmpty(err) { if errors.IsQueueNotEmpty(err) {
return fmt.Errorf("%w: queue=%q", ErrQueueNotEmpty, qname) return fmt.Errorf("%w: queue=%q", ErrQueueNotEmpty, queue)
} }
return err return err
} }
// GetTaskInfo retrieves task information given a task id and queue name. // GetTaskInfo retrieves task information given a task id and queue name.
// //
// Returns ErrQueueNotFound if a queue with the given name doesn't exist. // Returns an error wrapping ErrQueueNotFound if a queue with the given name doesn't exist.
// Returns ErrTaskNotFound if a task with the given id doesn't exist in the queue. // Returns an error wrapping ErrTaskNotFound if a task with the given id doesn't exist in the queue.
func (i *Inspector) GetTaskInfo(qname, id string) (*TaskInfo, error) { func (i *Inspector) GetTaskInfo(queue, id string) (*TaskInfo, error) {
taskid, err := uuid.Parse(id) info, err := i.rdb.GetTaskInfo(queue, id)
if err != nil {
return nil, fmt.Errorf("asynq: %s is not a valid task id", id)
}
info, err := i.rdb.GetTaskInfo(qname, taskid)
switch { switch {
case errors.IsQueueNotFound(err): case errors.IsQueueNotFound(err):
return nil, fmt.Errorf("asynq: %w", ErrQueueNotFound) return nil, fmt.Errorf("asynq: %w", ErrQueueNotFound)
@@ -190,7 +232,7 @@ func (i *Inspector) GetTaskInfo(qname, id string) (*TaskInfo, error) {
case err != nil: case err != nil:
return nil, fmt.Errorf("asynq: %v", err) return nil, fmt.Errorf("asynq: %v", err)
} }
return newTaskInfo(info.Message, info.State, info.NextProcessAt), nil return newTaskInfo(info.Message, info.State, info.NextProcessAt, info.Result), nil
} }
// ListOption specifies behavior of list operation. // ListOption specifies behavior of list operation.
@@ -257,23 +299,27 @@ func Page(n int) ListOption {
// ListPendingTasks retrieves pending tasks from the specified queue. // ListPendingTasks retrieves pending tasks from the specified queue.
// //
// By default, it retrieves the first 30 tasks. // By default, it retrieves the first 30 tasks.
func (i *Inspector) ListPendingTasks(qname string, opts ...ListOption) ([]*TaskInfo, error) { func (i *Inspector) ListPendingTasks(queue string, opts ...ListOption) ([]*TaskInfo, error) {
if err := base.ValidateQueueName(qname); err != nil { if err := base.ValidateQueueName(queue); err != nil {
return nil, fmt.Errorf("asynq: %v", err) return nil, fmt.Errorf("asynq: %v", err)
} }
opt := composeListOptions(opts...) opt := composeListOptions(opts...)
pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1} pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1}
msgs, err := i.rdb.ListPending(qname, pgn) infos, err := i.rdb.ListPending(queue, pgn)
switch { switch {
case errors.IsQueueNotFound(err): case errors.IsQueueNotFound(err):
return nil, fmt.Errorf("asynq: %w", ErrQueueNotFound) return nil, fmt.Errorf("asynq: %w", ErrQueueNotFound)
case err != nil: case err != nil:
return nil, fmt.Errorf("asynq: %v", err) return nil, fmt.Errorf("asynq: %v", err)
} }
now := time.Now()
var tasks []*TaskInfo var tasks []*TaskInfo
for _, m := range msgs { for _, i := range infos {
tasks = append(tasks, newTaskInfo(m, base.TaskStatePending, now)) tasks = append(tasks, newTaskInfo(
i.Message,
i.State,
i.NextProcessAt,
i.Result,
))
} }
return tasks, err return tasks, err
} }
@@ -281,13 +327,53 @@ func (i *Inspector) ListPendingTasks(qname string, opts ...ListOption) ([]*TaskI
// ListActiveTasks retrieves active tasks from the specified queue. // ListActiveTasks retrieves active tasks from the specified queue.
// //
// By default, it retrieves the first 30 tasks. // By default, it retrieves the first 30 tasks.
func (i *Inspector) ListActiveTasks(qname string, opts ...ListOption) ([]*TaskInfo, error) { func (i *Inspector) ListActiveTasks(queue string, opts ...ListOption) ([]*TaskInfo, error) {
if err := base.ValidateQueueName(qname); err != nil { if err := base.ValidateQueueName(queue); err != nil {
return nil, fmt.Errorf("asynq: %v", err) return nil, fmt.Errorf("asynq: %v", err)
} }
opt := composeListOptions(opts...) opt := composeListOptions(opts...)
pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1} pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1}
msgs, err := i.rdb.ListActive(qname, pgn) infos, err := i.rdb.ListActive(queue, pgn)
switch {
case errors.IsQueueNotFound(err):
return nil, fmt.Errorf("asynq: %w", ErrQueueNotFound)
case err != nil:
return nil, fmt.Errorf("asynq: %v", err)
}
expired, err := i.rdb.ListLeaseExpired(time.Now(), queue)
if err != nil {
return nil, fmt.Errorf("asynq: %v", err)
}
expiredSet := make(map[string]struct{}) // set of expired message IDs
for _, msg := range expired {
expiredSet[msg.ID] = struct{}{}
}
var tasks []*TaskInfo
for _, i := range infos {
t := newTaskInfo(
i.Message,
i.State,
i.NextProcessAt,
i.Result,
)
if _, ok := expiredSet[i.Message.ID]; ok {
t.IsOrphaned = true
}
tasks = append(tasks, t)
}
return tasks, nil
}
// ListAggregatingTasks retrieves scheduled tasks from the specified group.
//
// By default, it retrieves the first 30 tasks.
func (i *Inspector) ListAggregatingTasks(queue, group string, opts ...ListOption) ([]*TaskInfo, error) {
if err := base.ValidateQueueName(queue); err != nil {
return nil, fmt.Errorf("asynq: %v", err)
}
opt := composeListOptions(opts...)
pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1}
infos, err := i.rdb.ListAggregating(queue, group, pgn)
switch { switch {
case errors.IsQueueNotFound(err): case errors.IsQueueNotFound(err):
return nil, fmt.Errorf("asynq: %w", ErrQueueNotFound) return nil, fmt.Errorf("asynq: %w", ErrQueueNotFound)
@@ -295,23 +381,28 @@ func (i *Inspector) ListActiveTasks(qname string, opts ...ListOption) ([]*TaskIn
return nil, fmt.Errorf("asynq: %v", err) return nil, fmt.Errorf("asynq: %v", err)
} }
var tasks []*TaskInfo var tasks []*TaskInfo
for _, m := range msgs { for _, i := range infos {
tasks = append(tasks, newTaskInfo(m, base.TaskStateActive, time.Time{})) tasks = append(tasks, newTaskInfo(
i.Message,
i.State,
i.NextProcessAt,
i.Result,
))
} }
return tasks, err return tasks, nil
} }
// ListScheduledTasks retrieves scheduled tasks from the specified queue. // ListScheduledTasks retrieves scheduled tasks from the specified queue.
// Tasks are sorted by NextProcessAt in ascending order. // Tasks are sorted by NextProcessAt in ascending order.
// //
// By default, it retrieves the first 30 tasks. // By default, it retrieves the first 30 tasks.
func (i *Inspector) ListScheduledTasks(qname string, opts ...ListOption) ([]*TaskInfo, error) { func (i *Inspector) ListScheduledTasks(queue string, opts ...ListOption) ([]*TaskInfo, error) {
if err := base.ValidateQueueName(qname); err != nil { if err := base.ValidateQueueName(queue); err != nil {
return nil, fmt.Errorf("asynq: %v", err) return nil, fmt.Errorf("asynq: %v", err)
} }
opt := composeListOptions(opts...) opt := composeListOptions(opts...)
pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1} pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1}
zs, err := i.rdb.ListScheduled(qname, pgn) infos, err := i.rdb.ListScheduled(queue, pgn)
switch { switch {
case errors.IsQueueNotFound(err): case errors.IsQueueNotFound(err):
return nil, fmt.Errorf("asynq: %w", ErrQueueNotFound) return nil, fmt.Errorf("asynq: %w", ErrQueueNotFound)
@@ -319,11 +410,12 @@ func (i *Inspector) ListScheduledTasks(qname string, opts ...ListOption) ([]*Tas
return nil, fmt.Errorf("asynq: %v", err) return nil, fmt.Errorf("asynq: %v", err)
} }
var tasks []*TaskInfo var tasks []*TaskInfo
for _, z := range zs { for _, i := range infos {
tasks = append(tasks, newTaskInfo( tasks = append(tasks, newTaskInfo(
z.Message, i.Message,
base.TaskStateScheduled, i.State,
time.Unix(z.Score, 0), i.NextProcessAt,
i.Result,
)) ))
} }
return tasks, nil return tasks, nil
@@ -333,13 +425,13 @@ func (i *Inspector) ListScheduledTasks(qname string, opts ...ListOption) ([]*Tas
// Tasks are sorted by NextProcessAt in ascending order. // Tasks are sorted by NextProcessAt in ascending order.
// //
// By default, it retrieves the first 30 tasks. // By default, it retrieves the first 30 tasks.
func (i *Inspector) ListRetryTasks(qname string, opts ...ListOption) ([]*TaskInfo, error) { func (i *Inspector) ListRetryTasks(queue string, opts ...ListOption) ([]*TaskInfo, error) {
if err := base.ValidateQueueName(qname); err != nil { if err := base.ValidateQueueName(queue); err != nil {
return nil, fmt.Errorf("asynq: %v", err) return nil, fmt.Errorf("asynq: %v", err)
} }
opt := composeListOptions(opts...) opt := composeListOptions(opts...)
pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1} pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1}
zs, err := i.rdb.ListRetry(qname, pgn) infos, err := i.rdb.ListRetry(queue, pgn)
switch { switch {
case errors.IsQueueNotFound(err): case errors.IsQueueNotFound(err):
return nil, fmt.Errorf("asynq: %w", ErrQueueNotFound) return nil, fmt.Errorf("asynq: %w", ErrQueueNotFound)
@@ -347,11 +439,12 @@ func (i *Inspector) ListRetryTasks(qname string, opts ...ListOption) ([]*TaskInf
return nil, fmt.Errorf("asynq: %v", err) return nil, fmt.Errorf("asynq: %v", err)
} }
var tasks []*TaskInfo var tasks []*TaskInfo
for _, z := range zs { for _, i := range infos {
tasks = append(tasks, newTaskInfo( tasks = append(tasks, newTaskInfo(
z.Message, i.Message,
base.TaskStateRetry, i.State,
time.Unix(z.Score, 0), i.NextProcessAt,
i.Result,
)) ))
} }
return tasks, nil return tasks, nil
@@ -361,13 +454,13 @@ func (i *Inspector) ListRetryTasks(qname string, opts ...ListOption) ([]*TaskInf
// Tasks are sorted by LastFailedAt in descending order. // Tasks are sorted by LastFailedAt in descending order.
// //
// By default, it retrieves the first 30 tasks. // By default, it retrieves the first 30 tasks.
func (i *Inspector) ListArchivedTasks(qname string, opts ...ListOption) ([]*TaskInfo, error) { func (i *Inspector) ListArchivedTasks(queue string, opts ...ListOption) ([]*TaskInfo, error) {
if err := base.ValidateQueueName(qname); err != nil { if err := base.ValidateQueueName(queue); err != nil {
return nil, fmt.Errorf("asynq: %v", err) return nil, fmt.Errorf("asynq: %v", err)
} }
opt := composeListOptions(opts...) opt := composeListOptions(opts...)
pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1} pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1}
zs, err := i.rdb.ListArchived(qname, pgn) infos, err := i.rdb.ListArchived(queue, pgn)
switch { switch {
case errors.IsQueueNotFound(err): case errors.IsQueueNotFound(err):
return nil, fmt.Errorf("asynq: %w", ErrQueueNotFound) return nil, fmt.Errorf("asynq: %w", ErrQueueNotFound)
@@ -375,11 +468,41 @@ func (i *Inspector) ListArchivedTasks(qname string, opts ...ListOption) ([]*Task
return nil, fmt.Errorf("asynq: %v", err) return nil, fmt.Errorf("asynq: %v", err)
} }
var tasks []*TaskInfo var tasks []*TaskInfo
for _, z := range zs { for _, i := range infos {
tasks = append(tasks, newTaskInfo( tasks = append(tasks, newTaskInfo(
z.Message, i.Message,
base.TaskStateArchived, i.State,
time.Time{}, i.NextProcessAt,
i.Result,
))
}
return tasks, nil
}
// ListCompletedTasks retrieves completed tasks from the specified queue.
// Tasks are sorted by expiration time (i.e. CompletedAt + Retention) in descending order.
//
// By default, it retrieves the first 30 tasks.
func (i *Inspector) ListCompletedTasks(queue string, opts ...ListOption) ([]*TaskInfo, error) {
if err := base.ValidateQueueName(queue); err != nil {
return nil, fmt.Errorf("asynq: %v", err)
}
opt := composeListOptions(opts...)
pgn := rdb.Pagination{Size: opt.pageSize, Page: opt.pageNum - 1}
infos, err := i.rdb.ListCompleted(queue, pgn)
switch {
case errors.IsQueueNotFound(err):
return nil, fmt.Errorf("asynq: %w", ErrQueueNotFound)
case err != nil:
return nil, fmt.Errorf("asynq: %v", err)
}
var tasks []*TaskInfo
for _, i := range infos {
tasks = append(tasks, newTaskInfo(
i.Message,
i.State,
i.NextProcessAt,
i.Result,
)) ))
} }
return tasks, nil return tasks, nil
@@ -387,41 +510,61 @@ func (i *Inspector) ListArchivedTasks(qname string, opts ...ListOption) ([]*Task
// DeleteAllPendingTasks deletes all pending tasks from the specified queue, // DeleteAllPendingTasks deletes all pending tasks from the specified queue,
// and reports the number tasks deleted. // and reports the number tasks deleted.
func (i *Inspector) DeleteAllPendingTasks(qname string) (int, error) { func (i *Inspector) DeleteAllPendingTasks(queue string) (int, error) {
if err := base.ValidateQueueName(qname); err != nil { if err := base.ValidateQueueName(queue); err != nil {
return 0, err return 0, err
} }
n, err := i.rdb.DeleteAllPendingTasks(qname) n, err := i.rdb.DeleteAllPendingTasks(queue)
return int(n), err return int(n), err
} }
// DeleteAllScheduledTasks deletes all scheduled tasks from the specified queue, // DeleteAllScheduledTasks deletes all scheduled tasks from the specified queue,
// and reports the number tasks deleted. // and reports the number tasks deleted.
func (i *Inspector) DeleteAllScheduledTasks(qname string) (int, error) { func (i *Inspector) DeleteAllScheduledTasks(queue string) (int, error) {
if err := base.ValidateQueueName(qname); err != nil { if err := base.ValidateQueueName(queue); err != nil {
return 0, err return 0, err
} }
n, err := i.rdb.DeleteAllScheduledTasks(qname) n, err := i.rdb.DeleteAllScheduledTasks(queue)
return int(n), err return int(n), err
} }
// DeleteAllRetryTasks deletes all retry tasks from the specified queue, // DeleteAllRetryTasks deletes all retry tasks from the specified queue,
// and reports the number tasks deleted. // and reports the number tasks deleted.
func (i *Inspector) DeleteAllRetryTasks(qname string) (int, error) { func (i *Inspector) DeleteAllRetryTasks(queue string) (int, error) {
if err := base.ValidateQueueName(qname); err != nil { if err := base.ValidateQueueName(queue); err != nil {
return 0, err return 0, err
} }
n, err := i.rdb.DeleteAllRetryTasks(qname) n, err := i.rdb.DeleteAllRetryTasks(queue)
return int(n), err return int(n), err
} }
// DeleteAllArchivedTasks deletes all archived tasks from the specified queue, // DeleteAllArchivedTasks deletes all archived tasks from the specified queue,
// and reports the number tasks deleted. // and reports the number tasks deleted.
func (i *Inspector) DeleteAllArchivedTasks(qname string) (int, error) { func (i *Inspector) DeleteAllArchivedTasks(queue string) (int, error) {
if err := base.ValidateQueueName(qname); err != nil { if err := base.ValidateQueueName(queue); err != nil {
return 0, err return 0, err
} }
n, err := i.rdb.DeleteAllArchivedTasks(qname) n, err := i.rdb.DeleteAllArchivedTasks(queue)
return int(n), err
}
// DeleteAllCompletedTasks deletes all completed tasks from the specified queue,
// and reports the number tasks deleted.
func (i *Inspector) DeleteAllCompletedTasks(queue string) (int, error) {
if err := base.ValidateQueueName(queue); err != nil {
return 0, err
}
n, err := i.rdb.DeleteAllCompletedTasks(queue)
return int(n), err
}
// DeleteAllAggregatingTasks deletes all tasks from the specified group,
// and reports the number of tasks deleted.
func (i *Inspector) DeleteAllAggregatingTasks(queue, group string) (int, error) {
if err := base.ValidateQueueName(queue); err != nil {
return 0, err
}
n, err := i.rdb.DeleteAllAggregatingTasks(queue, group)
return int(n), err return int(n), err
} }
@@ -429,18 +572,14 @@ func (i *Inspector) DeleteAllArchivedTasks(qname string) (int, error) {
// The task needs to be in pending, scheduled, retry, or archived state, // The task needs to be in pending, scheduled, retry, or archived state,
// otherwise DeleteTask will return an error. // otherwise DeleteTask will return an error.
// //
// If a queue with the given name doesn't exist, it returns ErrQueueNotFound. // If a queue with the given name doesn't exist, it returns an error wrapping ErrQueueNotFound.
// If a task with the given id doesn't exist in the queue, it returns ErrTaskNotFound. // If a task with the given id doesn't exist in the queue, it returns an error wrapping ErrTaskNotFound.
// If the task is in active state, it returns a non-nil error. // If the task is in active state, it returns a non-nil error.
func (i *Inspector) DeleteTask(qname, id string) error { func (i *Inspector) DeleteTask(queue, id string) error {
if err := base.ValidateQueueName(qname); err != nil { if err := base.ValidateQueueName(queue); err != nil {
return fmt.Errorf("asynq: %v", err) return fmt.Errorf("asynq: %v", err)
} }
taskid, err := uuid.Parse(id) err := i.rdb.DeleteTask(queue, id)
if err != nil {
return fmt.Errorf("asynq: %s is not a valid task id", id)
}
err = i.rdb.DeleteTask(qname, taskid)
switch { switch {
case errors.IsQueueNotFound(err): case errors.IsQueueNotFound(err):
return fmt.Errorf("asynq: %w", ErrQueueNotFound) return fmt.Errorf("asynq: %w", ErrQueueNotFound)
@@ -453,33 +592,43 @@ func (i *Inspector) DeleteTask(qname, id string) error {
} }
// RunAllScheduledTasks transition all scheduled tasks to pending state from the given queue, // RunAllScheduledTasks schedules all scheduled tasks from the given queue to run,
// and reports the number of tasks transitioned. // and reports the number of tasks scheduled to run.
func (i *Inspector) RunAllScheduledTasks(qname string) (int, error) { func (i *Inspector) RunAllScheduledTasks(queue string) (int, error) {
if err := base.ValidateQueueName(qname); err != nil { if err := base.ValidateQueueName(queue); err != nil {
return 0, err return 0, err
} }
n, err := i.rdb.RunAllScheduledTasks(qname) n, err := i.rdb.RunAllScheduledTasks(queue)
return int(n), err return int(n), err
} }
// RunAllRetryTasks transition all retry tasks to pending state from the given queue, // RunAllRetryTasks schedules all retry tasks from the given queue to run,
// and reports the number of tasks transitioned. // and reports the number of tasks scheduled to run.
func (i *Inspector) RunAllRetryTasks(qname string) (int, error) { func (i *Inspector) RunAllRetryTasks(queue string) (int, error) {
if err := base.ValidateQueueName(qname); err != nil { if err := base.ValidateQueueName(queue); err != nil {
return 0, err return 0, err
} }
n, err := i.rdb.RunAllRetryTasks(qname) n, err := i.rdb.RunAllRetryTasks(queue)
return int(n), err return int(n), err
} }
// RunAllArchivedTasks transition all archived tasks to pending state from the given queue, // RunAllArchivedTasks schedules all archived tasks from the given queue to run,
// and reports the number of tasks transitioned. // and reports the number of tasks scheduled to run.
func (i *Inspector) RunAllArchivedTasks(qname string) (int, error) { func (i *Inspector) RunAllArchivedTasks(queue string) (int, error) {
if err := base.ValidateQueueName(qname); err != nil { if err := base.ValidateQueueName(queue); err != nil {
return 0, err return 0, err
} }
n, err := i.rdb.RunAllArchivedTasks(qname) n, err := i.rdb.RunAllArchivedTasks(queue)
return int(n), err
}
// RunAllAggregatingTasks schedules all tasks from the given grou to run.
// and reports the number of tasks scheduled to run.
func (i *Inspector) RunAllAggregatingTasks(queue, group string) (int, error) {
if err := base.ValidateQueueName(queue); err != nil {
return 0, err
}
n, err := i.rdb.RunAllAggregatingTasks(queue, group)
return int(n), err return int(n), err
} }
@@ -487,18 +636,14 @@ func (i *Inspector) RunAllArchivedTasks(qname string) (int, error) {
// The task needs to be in scheduled, retry, or archived state, otherwise RunTask // The task needs to be in scheduled, retry, or archived state, otherwise RunTask
// will return an error. // will return an error.
// //
// If a queue with the given name doesn't exist, it returns ErrQueueNotFound. // If a queue with the given name doesn't exist, it returns an error wrapping ErrQueueNotFound.
// If a task with the given id doesn't exist in the queue, it returns ErrTaskNotFound. // If a task with the given id doesn't exist in the queue, it returns an error wrapping ErrTaskNotFound.
// If the task is in pending or active state, it returns a non-nil error. // If the task is in pending or active state, it returns a non-nil error.
func (i *Inspector) RunTask(qname, id string) error { func (i *Inspector) RunTask(queue, id string) error {
if err := base.ValidateQueueName(qname); err != nil { if err := base.ValidateQueueName(queue); err != nil {
return fmt.Errorf("asynq: %v", err) return fmt.Errorf("asynq: %v", err)
} }
taskid, err := uuid.Parse(id) err := i.rdb.RunTask(queue, id)
if err != nil {
return fmt.Errorf("asynq: %s is not a valid task id", id)
}
err = i.rdb.RunTask(qname, taskid)
switch { switch {
case errors.IsQueueNotFound(err): case errors.IsQueueNotFound(err):
return fmt.Errorf("asynq: %w", ErrQueueNotFound) return fmt.Errorf("asynq: %w", ErrQueueNotFound)
@@ -512,31 +657,41 @@ func (i *Inspector) RunTask(qname, id string) error {
// ArchiveAllPendingTasks archives all pending tasks from the given queue, // ArchiveAllPendingTasks archives all pending tasks from the given queue,
// and reports the number of tasks archived. // and reports the number of tasks archived.
func (i *Inspector) ArchiveAllPendingTasks(qname string) (int, error) { func (i *Inspector) ArchiveAllPendingTasks(queue string) (int, error) {
if err := base.ValidateQueueName(qname); err != nil { if err := base.ValidateQueueName(queue); err != nil {
return 0, err return 0, err
} }
n, err := i.rdb.ArchiveAllPendingTasks(qname) n, err := i.rdb.ArchiveAllPendingTasks(queue)
return int(n), err return int(n), err
} }
// ArchiveAllScheduledTasks archives all scheduled tasks from the given queue, // ArchiveAllScheduledTasks archives all scheduled tasks from the given queue,
// and reports the number of tasks archiveed. // and reports the number of tasks archiveed.
func (i *Inspector) ArchiveAllScheduledTasks(qname string) (int, error) { func (i *Inspector) ArchiveAllScheduledTasks(queue string) (int, error) {
if err := base.ValidateQueueName(qname); err != nil { if err := base.ValidateQueueName(queue); err != nil {
return 0, err return 0, err
} }
n, err := i.rdb.ArchiveAllScheduledTasks(qname) n, err := i.rdb.ArchiveAllScheduledTasks(queue)
return int(n), err return int(n), err
} }
// ArchiveAllRetryTasks archives all retry tasks from the given queue, // ArchiveAllRetryTasks archives all retry tasks from the given queue,
// and reports the number of tasks archiveed. // and reports the number of tasks archiveed.
func (i *Inspector) ArchiveAllRetryTasks(qname string) (int, error) { func (i *Inspector) ArchiveAllRetryTasks(queue string) (int, error) {
if err := base.ValidateQueueName(qname); err != nil { if err := base.ValidateQueueName(queue); err != nil {
return 0, err return 0, err
} }
n, err := i.rdb.ArchiveAllRetryTasks(qname) n, err := i.rdb.ArchiveAllRetryTasks(queue)
return int(n), err
}
// ArchiveAllAggregatingTasks archives all tasks from the given group,
// and reports the number of tasks archived.
func (i *Inspector) ArchiveAllAggregatingTasks(queue, group string) (int, error) {
if err := base.ValidateQueueName(queue); err != nil {
return 0, err
}
n, err := i.rdb.ArchiveAllAggregatingTasks(queue, group)
return int(n), err return int(n), err
} }
@@ -544,18 +699,14 @@ func (i *Inspector) ArchiveAllRetryTasks(qname string) (int, error) {
// The task needs to be in pending, scheduled, or retry state, otherwise ArchiveTask // The task needs to be in pending, scheduled, or retry state, otherwise ArchiveTask
// will return an error. // will return an error.
// //
// If a queue with the given name doesn't exist, it returns ErrQueueNotFound. // If a queue with the given name doesn't exist, it returns an error wrapping ErrQueueNotFound.
// If a task with the given id doesn't exist in the queue, it returns ErrTaskNotFound. // If a task with the given id doesn't exist in the queue, it returns an error wrapping ErrTaskNotFound.
// If the task is in already archived, it returns a non-nil error. // If the task is in already archived, it returns a non-nil error.
func (i *Inspector) ArchiveTask(qname, id string) error { func (i *Inspector) ArchiveTask(queue, id string) error {
if err := base.ValidateQueueName(qname); err != nil { if err := base.ValidateQueueName(queue); err != nil {
return fmt.Errorf("asynq: err") return fmt.Errorf("asynq: err")
} }
taskid, err := uuid.Parse(id) err := i.rdb.ArchiveTask(queue, id)
if err != nil {
return fmt.Errorf("asynq: %s is not a valid task id", id)
}
err = i.rdb.ArchiveTask(qname, taskid)
switch { switch {
case errors.IsQueueNotFound(err): case errors.IsQueueNotFound(err):
return fmt.Errorf("asynq: %w", ErrQueueNotFound) return fmt.Errorf("asynq: %w", ErrQueueNotFound)
@@ -577,20 +728,20 @@ func (i *Inspector) CancelProcessing(id string) error {
// PauseQueue pauses task processing on the specified queue. // PauseQueue pauses task processing on the specified queue.
// If the queue is already paused, it will return a non-nil error. // If the queue is already paused, it will return a non-nil error.
func (i *Inspector) PauseQueue(qname string) error { func (i *Inspector) PauseQueue(queue string) error {
if err := base.ValidateQueueName(qname); err != nil { if err := base.ValidateQueueName(queue); err != nil {
return err return err
} }
return i.rdb.Pause(qname) return i.rdb.Pause(queue)
} }
// UnpauseQueue resumes task processing on the specified queue. // UnpauseQueue resumes task processing on the specified queue.
// If the queue is not paused, it will return a non-nil error. // If the queue is not paused, it will return a non-nil error.
func (i *Inspector) UnpauseQueue(qname string) error { func (i *Inspector) UnpauseQueue(queue string) error {
if err := base.ValidateQueueName(qname); err != nil { if err := base.ValidateQueueName(queue); err != nil {
return err return err
} }
return i.rdb.Unpause(qname) return i.rdb.Unpause(queue)
} }
// Servers return a list of running servers' information. // Servers return a list of running servers' information.
@@ -680,8 +831,8 @@ type WorkerInfo struct {
} }
// ClusterKeySlot returns an integer identifying the hash slot the given queue hashes to. // ClusterKeySlot returns an integer identifying the hash slot the given queue hashes to.
func (i *Inspector) ClusterKeySlot(qname string) (int64, error) { func (i *Inspector) ClusterKeySlot(queue string) (int64, error) {
return i.rdb.ClusterKeySlot(qname) return i.rdb.ClusterKeySlot(queue)
} }
// ClusterNode describes a node in redis cluster. // ClusterNode describes a node in redis cluster.
@@ -696,8 +847,8 @@ type ClusterNode struct {
// ClusterNodes returns a list of nodes the given queue belongs to. // ClusterNodes returns a list of nodes the given queue belongs to.
// //
// Only relevant if task queues are stored in redis cluster. // Only relevant if task queues are stored in redis cluster.
func (i *Inspector) ClusterNodes(qname string) ([]*ClusterNode, error) { func (i *Inspector) ClusterNodes(queue string) ([]*ClusterNode, error) {
nodes, err := i.rdb.ClusterNodes(qname) nodes, err := i.rdb.ClusterNodes(queue)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -765,11 +916,11 @@ func parseOption(s string) (Option, error) {
fn, arg := parseOptionFunc(s), parseOptionArg(s) fn, arg := parseOptionFunc(s), parseOptionArg(s)
switch fn { switch fn {
case "Queue": case "Queue":
qname, err := strconv.Unquote(arg) queue, err := strconv.Unquote(arg)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return Queue(qname), nil return Queue(queue), nil
case "MaxRetry": case "MaxRetry":
n, err := strconv.Atoi(arg) n, err := strconv.Atoi(arg)
if err != nil { if err != nil {
@@ -806,6 +957,12 @@ func parseOption(s string) (Option, error) {
return nil, err return nil, err
} }
return ProcessIn(d), nil return ProcessIn(d), nil
case "Retention":
d, err := time.ParseDuration(arg)
if err != nil {
return nil, err
}
return Retention(d), nil
default: default:
return nil, fmt.Errorf("cannot not parse option string %q", s) return nil, fmt.Errorf("cannot not parse option string %q", s)
} }

View File

@@ -5,19 +5,21 @@
package asynq package asynq
import ( import (
"context"
"errors" "errors"
"fmt" "fmt"
"math"
"sort" "sort"
"testing" "testing"
"time" "time"
"github.com/go-redis/redis/v8"
"github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts" "github.com/google/go-cmp/cmp/cmpopts"
"github.com/google/uuid" "github.com/google/uuid"
h "github.com/hibiken/asynq/internal/asynqtest"
"github.com/hibiken/asynq/internal/base" "github.com/hibiken/asynq/internal/base"
"github.com/hibiken/asynq/internal/rdb" "github.com/hibiken/asynq/internal/rdb"
h "github.com/hibiken/asynq/internal/testutil"
"github.com/hibiken/asynq/internal/timeutil"
) )
func TestInspectorQueues(t *testing.T) { func TestInspectorQueues(t *testing.T) {
@@ -37,7 +39,7 @@ func TestInspectorQueues(t *testing.T) {
for _, tc := range tests { for _, tc := range tests {
h.FlushDB(t, r) h.FlushDB(t, r)
for _, qname := range tc.queues { for _, qname := range tc.queues {
if err := r.SAdd(base.AllQueues, qname).Err(); err != nil { if err := r.SAdd(context.Background(), base.AllQueues, qname).Err(); err != nil {
t.Fatalf("could not initialize all queue set: %v", err) t.Fatalf("could not initialize all queue set: %v", err)
} }
} }
@@ -136,7 +138,7 @@ func TestInspectorDeleteQueue(t *testing.T) {
tc.qname, tc.force, err) tc.qname, tc.force, err)
continue continue
} }
if r.SIsMember(base.AllQueues, tc.qname).Val() { if r.SIsMember(context.Background(), base.AllQueues, tc.qname).Val() {
t.Errorf("%q is a member of %q", tc.qname, base.AllQueues) t.Errorf("%q is a member of %q", tc.qname, base.AllQueues)
} }
} }
@@ -268,17 +270,22 @@ func TestInspectorGetQueueInfo(t *testing.T) {
ignoreMemUsg := cmpopts.IgnoreFields(QueueInfo{}, "MemoryUsage") ignoreMemUsg := cmpopts.IgnoreFields(QueueInfo{}, "MemoryUsage")
inspector := NewInspector(getRedisConnOpt(t)) inspector := NewInspector(getRedisConnOpt(t))
inspector.rdb.SetClock(timeutil.NewSimulatedClock(now))
tests := []struct { tests := []struct {
pending map[string][]*base.TaskMessage pending map[string][]*base.TaskMessage
active map[string][]*base.TaskMessage active map[string][]*base.TaskMessage
scheduled map[string][]base.Z scheduled map[string][]base.Z
retry map[string][]base.Z retry map[string][]base.Z
archived map[string][]base.Z archived map[string][]base.Z
processed map[string]int completed map[string][]base.Z
failed map[string]int processed map[string]int
qname string failed map[string]int
want *QueueInfo processedTotal map[string]int
failedTotal map[string]int
oldestPendingMessageEnqueueTime map[string]time.Time
qname string
want *QueueInfo
}{ }{
{ {
pending: map[string][]*base.TaskMessage{ pending: map[string][]*base.TaskMessage{
@@ -309,6 +316,11 @@ func TestInspectorGetQueueInfo(t *testing.T) {
"critical": {}, "critical": {},
"low": {}, "low": {},
}, },
completed: map[string][]base.Z{
"default": {},
"critical": {},
"low": {},
},
processed: map[string]int{ processed: map[string]int{
"default": 120, "default": 120,
"critical": 100, "critical": 100,
@@ -319,19 +331,38 @@ func TestInspectorGetQueueInfo(t *testing.T) {
"critical": 0, "critical": 0,
"low": 5, "low": 5,
}, },
processedTotal: map[string]int{
"default": 11111,
"critical": 22222,
"low": 33333,
},
failedTotal: map[string]int{
"default": 111,
"critical": 222,
"low": 333,
},
oldestPendingMessageEnqueueTime: map[string]time.Time{
"default": now.Add(-15 * time.Second),
"critical": now.Add(-200 * time.Millisecond),
"low": now.Add(-30 * time.Second),
},
qname: "default", qname: "default",
want: &QueueInfo{ want: &QueueInfo{
Queue: "default", Queue: "default",
Size: 4, Latency: 15 * time.Second,
Pending: 1, Size: 4,
Active: 1, Pending: 1,
Scheduled: 2, Active: 1,
Retry: 0, Scheduled: 2,
Archived: 0, Retry: 0,
Processed: 120, Archived: 0,
Failed: 2, Completed: 0,
Paused: false, Processed: 120,
Timestamp: now, Failed: 2,
ProcessedTotal: 11111,
FailedTotal: 111,
Paused: false,
Timestamp: now,
}, },
}, },
} }
@@ -343,13 +374,26 @@ func TestInspectorGetQueueInfo(t *testing.T) {
h.SeedAllScheduledQueues(t, r, tc.scheduled) h.SeedAllScheduledQueues(t, r, tc.scheduled)
h.SeedAllRetryQueues(t, r, tc.retry) h.SeedAllRetryQueues(t, r, tc.retry)
h.SeedAllArchivedQueues(t, r, tc.archived) h.SeedAllArchivedQueues(t, r, tc.archived)
h.SeedAllCompletedQueues(t, r, tc.completed)
ctx := context.Background()
for qname, n := range tc.processed { for qname, n := range tc.processed {
processedKey := base.ProcessedKey(qname, now) r.Set(ctx, base.ProcessedKey(qname, now), n, 0)
r.Set(processedKey, n, 0)
} }
for qname, n := range tc.failed { for qname, n := range tc.failed {
failedKey := base.FailedKey(qname, now) r.Set(ctx, base.FailedKey(qname, now), n, 0)
r.Set(failedKey, n, 0) }
for qname, n := range tc.processedTotal {
r.Set(ctx, base.ProcessedTotalKey(qname), n, 0)
}
for qname, n := range tc.failedTotal {
r.Set(ctx, base.FailedTotalKey(qname), n, 0)
}
for qname, enqueueTime := range tc.oldestPendingMessageEnqueueTime {
if enqueueTime.IsZero() {
continue
}
oldestPendingMessageID := r.LRange(ctx, base.PendingKey(qname), -1, -1).Val()[0] // get the right most msg in the list
r.HSet(ctx, base.TaskKey(qname, oldestPendingMessageID), "pending_since", enqueueTime.UnixNano())
} }
got, err := inspector.GetQueueInfo(tc.qname) got, err := inspector.GetQueueInfo(tc.qname)
@@ -385,14 +429,14 @@ func TestInspectorHistory(t *testing.T) {
for _, tc := range tests { for _, tc := range tests {
h.FlushDB(t, r) h.FlushDB(t, r)
r.SAdd(base.AllQueues, tc.qname) r.SAdd(context.Background(), base.AllQueues, tc.qname)
// populate last n days data // populate last n days data
for i := 0; i < tc.n; i++ { for i := 0; i < tc.n; i++ {
ts := now.Add(-time.Duration(i) * 24 * time.Hour) ts := now.Add(-time.Duration(i) * 24 * time.Hour)
processedKey := base.ProcessedKey(tc.qname, ts) processedKey := base.ProcessedKey(tc.qname, ts)
failedKey := base.FailedKey(tc.qname, ts) failedKey := base.FailedKey(tc.qname, ts)
r.Set(processedKey, (i+1)*1000, 0) r.Set(context.Background(), processedKey, (i+1)*1000, 0)
r.Set(failedKey, (i+1)*10, 0) r.Set(context.Background(), failedKey, (i+1)*10, 0)
} }
got, err := inspector.History(tc.qname, tc.n) got, err := inspector.History(tc.qname, tc.n)
@@ -423,7 +467,7 @@ func TestInspectorHistory(t *testing.T) {
} }
func createPendingTask(msg *base.TaskMessage) *TaskInfo { func createPendingTask(msg *base.TaskMessage) *TaskInfo {
return newTaskInfo(msg, base.TaskStatePending, time.Now()) return newTaskInfo(msg, base.TaskStatePending, time.Now(), nil)
} }
func TestInspectorGetTaskInfo(t *testing.T) { func TestInspectorGetTaskInfo(t *testing.T) {
@@ -483,47 +527,52 @@ func TestInspectorGetTaskInfo(t *testing.T) {
}{ }{
{ {
qname: "default", qname: "default",
id: m1.ID.String(), id: m1.ID,
want: newTaskInfo( want: newTaskInfo(
m1, m1,
base.TaskStateActive, base.TaskStateActive,
time.Time{}, // zero value for n/a time.Time{}, // zero value for n/a
nil,
), ),
}, },
{ {
qname: "default", qname: "default",
id: m2.ID.String(), id: m2.ID,
want: newTaskInfo( want: newTaskInfo(
m2, m2,
base.TaskStateScheduled, base.TaskStateScheduled,
fiveMinsFromNow, fiveMinsFromNow,
nil,
), ),
}, },
{ {
qname: "custom", qname: "custom",
id: m3.ID.String(), id: m3.ID,
want: newTaskInfo( want: newTaskInfo(
m3, m3,
base.TaskStateRetry, base.TaskStateRetry,
oneHourFromNow, oneHourFromNow,
nil,
), ),
}, },
{ {
qname: "custom", qname: "custom",
id: m4.ID.String(), id: m4.ID,
want: newTaskInfo( want: newTaskInfo(
m4, m4,
base.TaskStateArchived, base.TaskStateArchived,
time.Time{}, // zero value for n/a time.Time{}, // zero value for n/a
nil,
), ),
}, },
{ {
qname: "custom", qname: "custom",
id: m5.ID.String(), id: m5.ID,
want: newTaskInfo( want: newTaskInfo(
m5, m5,
base.TaskStatePending, base.TaskStatePending,
now, now,
nil,
), ),
}, },
} }
@@ -602,7 +651,7 @@ func TestInspectorGetTaskInfoError(t *testing.T) {
}{ }{
{ {
qname: "nonexistent", qname: "nonexistent",
id: m1.ID.String(), id: m1.ID,
wantErr: ErrQueueNotFound, wantErr: ErrQueueNotFound,
}, },
{ {
@@ -697,6 +746,12 @@ func TestInspectorListPendingTasks(t *testing.T) {
} }
} }
func newOrphanedTaskInfo(msg *base.TaskMessage) *TaskInfo {
info := newTaskInfo(msg, base.TaskStateActive, time.Time{}, nil)
info.IsOrphaned = true
return info
}
func TestInspectorListActiveTasks(t *testing.T) { func TestInspectorListActiveTasks(t *testing.T) {
r := setup(t) r := setup(t)
defer r.Close() defer r.Close()
@@ -706,10 +761,12 @@ func TestInspectorListActiveTasks(t *testing.T) {
m4 := h.NewTaskMessageWithQueue("task4", nil, "custom") m4 := h.NewTaskMessageWithQueue("task4", nil, "custom")
inspector := NewInspector(getRedisConnOpt(t)) inspector := NewInspector(getRedisConnOpt(t))
now := time.Now()
tests := []struct { tests := []struct {
desc string desc string
active map[string][]*base.TaskMessage active map[string][]*base.TaskMessage
lease map[string][]base.Z
qname string qname string
want []*TaskInfo want []*TaskInfo
}{ }{
@@ -719,10 +776,42 @@ func TestInspectorListActiveTasks(t *testing.T) {
"default": {m1, m2}, "default": {m1, m2},
"custom": {m3, m4}, "custom": {m3, m4},
}, },
lease: map[string][]base.Z{
"default": {
{Message: m1, Score: now.Add(20 * time.Second).Unix()},
{Message: m2, Score: now.Add(20 * time.Second).Unix()},
},
"custom": {
{Message: m3, Score: now.Add(20 * time.Second).Unix()},
{Message: m4, Score: now.Add(20 * time.Second).Unix()},
},
},
qname: "custom",
want: []*TaskInfo{
newTaskInfo(m3, base.TaskStateActive, time.Time{}, nil),
newTaskInfo(m4, base.TaskStateActive, time.Time{}, nil),
},
},
{
desc: "with an orphaned task",
active: map[string][]*base.TaskMessage{
"default": {m1, m2},
"custom": {m3, m4},
},
lease: map[string][]base.Z{
"default": {
{Message: m1, Score: now.Add(20 * time.Second).Unix()},
{Message: m2, Score: now.Add(-10 * time.Second).Unix()}, // orphaned task
},
"custom": {
{Message: m3, Score: now.Add(20 * time.Second).Unix()},
{Message: m4, Score: now.Add(20 * time.Second).Unix()},
},
},
qname: "default", qname: "default",
want: []*TaskInfo{ want: []*TaskInfo{
newTaskInfo(m1, base.TaskStateActive, time.Time{}), newTaskInfo(m1, base.TaskStateActive, time.Time{}, nil),
newTaskInfo(m2, base.TaskStateActive, time.Time{}), newOrphanedTaskInfo(m2),
}, },
}, },
} }
@@ -730,6 +819,7 @@ func TestInspectorListActiveTasks(t *testing.T) {
for _, tc := range tests { for _, tc := range tests {
h.FlushDB(t, r) h.FlushDB(t, r)
h.SeedAllActiveQueues(t, r, tc.active) h.SeedAllActiveQueues(t, r, tc.active)
h.SeedAllLease(t, r, tc.lease)
got, err := inspector.ListActiveTasks(tc.qname) got, err := inspector.ListActiveTasks(tc.qname)
if err != nil { if err != nil {
@@ -748,6 +838,7 @@ func createScheduledTask(z base.Z) *TaskInfo {
z.Message, z.Message,
base.TaskStateScheduled, base.TaskStateScheduled,
time.Unix(z.Score, 0), time.Unix(z.Score, 0),
nil,
) )
} }
@@ -817,6 +908,7 @@ func createRetryTask(z base.Z) *TaskInfo {
z.Message, z.Message,
base.TaskStateRetry, base.TaskStateRetry,
time.Unix(z.Score, 0), time.Unix(z.Score, 0),
nil,
) )
} }
@@ -887,6 +979,7 @@ func createArchivedTask(z base.Z) *TaskInfo {
z.Message, z.Message,
base.TaskStateArchived, base.TaskStateArchived,
time.Time{}, // zero value for n/a time.Time{}, // zero value for n/a
nil,
) )
} }
@@ -951,6 +1044,183 @@ func TestInspectorListArchivedTasks(t *testing.T) {
} }
} }
func newCompletedTaskMessage(typename, qname string, retention time.Duration, completedAt time.Time) *base.TaskMessage {
msg := h.NewTaskMessageWithQueue(typename, nil, qname)
msg.Retention = int64(retention.Seconds())
msg.CompletedAt = completedAt.Unix()
return msg
}
func createCompletedTask(z base.Z) *TaskInfo {
return newTaskInfo(
z.Message,
base.TaskStateCompleted,
time.Time{}, // zero value for n/a
nil, // TODO: Test with result data
)
}
func TestInspectorListCompletedTasks(t *testing.T) {
r := setup(t)
defer r.Close()
now := time.Now()
m1 := newCompletedTaskMessage("task1", "default", 1*time.Hour, now.Add(-3*time.Minute)) // Expires in 57 mins
m2 := newCompletedTaskMessage("task2", "default", 30*time.Minute, now.Add(-10*time.Minute)) // Expires in 20 mins
m3 := newCompletedTaskMessage("task3", "default", 2*time.Hour, now.Add(-30*time.Minute)) // Expires in 90 mins
m4 := newCompletedTaskMessage("task4", "custom", 15*time.Minute, now.Add(-2*time.Minute)) // Expires in 13 mins
z1 := base.Z{Message: m1, Score: m1.CompletedAt + m1.Retention}
z2 := base.Z{Message: m2, Score: m2.CompletedAt + m2.Retention}
z3 := base.Z{Message: m3, Score: m3.CompletedAt + m3.Retention}
z4 := base.Z{Message: m4, Score: m4.CompletedAt + m4.Retention}
inspector := NewInspector(getRedisConnOpt(t))
tests := []struct {
desc string
completed map[string][]base.Z
qname string
want []*TaskInfo
}{
{
desc: "with a few completed tasks",
completed: map[string][]base.Z{
"default": {z1, z2, z3},
"custom": {z4},
},
qname: "default",
// Should be sorted by expiration time (CompletedAt + Retention).
want: []*TaskInfo{
createCompletedTask(z2),
createCompletedTask(z1),
createCompletedTask(z3),
},
},
{
desc: "with empty completed queue",
completed: map[string][]base.Z{
"default": {},
},
qname: "default",
want: []*TaskInfo(nil),
},
}
for _, tc := range tests {
h.FlushDB(t, r)
h.SeedAllCompletedQueues(t, r, tc.completed)
got, err := inspector.ListCompletedTasks(tc.qname)
if err != nil {
t.Errorf("%s; ListCompletedTasks(%q) returned error: %v", tc.desc, tc.qname, err)
continue
}
if diff := cmp.Diff(tc.want, got, cmp.AllowUnexported(TaskInfo{})); diff != "" {
t.Errorf("%s; ListCompletedTasks(%q) = %v, want %v; (-want,+got)\n%s",
tc.desc, tc.qname, got, tc.want, diff)
}
}
}
func TestInspectorListAggregatingTasks(t *testing.T) {
r := setup(t)
defer r.Close()
now := time.Now()
m1 := h.NewTaskMessageBuilder().SetType("task1").SetQueue("default").SetGroup("group1").Build()
m2 := h.NewTaskMessageBuilder().SetType("task2").SetQueue("default").SetGroup("group1").Build()
m3 := h.NewTaskMessageBuilder().SetType("task3").SetQueue("default").SetGroup("group1").Build()
m4 := h.NewTaskMessageBuilder().SetType("task4").SetQueue("default").SetGroup("group2").Build()
m5 := h.NewTaskMessageBuilder().SetType("task5").SetQueue("custom").SetGroup("group1").Build()
inspector := NewInspector(getRedisConnOpt(t))
fxt := struct {
tasks []*h.TaskSeedData
allQueues []string
allGroups map[string][]string
groups map[string][]*redis.Z
}{
tasks: []*h.TaskSeedData{
{Msg: m1, State: base.TaskStateAggregating},
{Msg: m2, State: base.TaskStateAggregating},
{Msg: m3, State: base.TaskStateAggregating},
{Msg: m4, State: base.TaskStateAggregating},
{Msg: m5, State: base.TaskStateAggregating},
},
allQueues: []string{"default", "custom"},
allGroups: map[string][]string{
base.AllGroups("default"): {"group1", "group2"},
base.AllGroups("custom"): {"group1"},
},
groups: map[string][]*redis.Z{
base.GroupKey("default", "group1"): {
{Member: m1.ID, Score: float64(now.Add(-30 * time.Second).Unix())},
{Member: m2.ID, Score: float64(now.Add(-20 * time.Second).Unix())},
{Member: m3.ID, Score: float64(now.Add(-10 * time.Second).Unix())},
},
base.GroupKey("default", "group2"): {
{Member: m4.ID, Score: float64(now.Add(-30 * time.Second).Unix())},
},
base.GroupKey("custom", "group1"): {
{Member: m5.ID, Score: float64(now.Add(-30 * time.Second).Unix())},
},
},
}
tests := []struct {
desc string
qname string
gname string
want []*TaskInfo
}{
{
desc: "default queue group1",
qname: "default",
gname: "group1",
want: []*TaskInfo{
createAggregatingTaskInfo(m1),
createAggregatingTaskInfo(m2),
createAggregatingTaskInfo(m3),
},
},
{
desc: "custom queue group1",
qname: "custom",
gname: "group1",
want: []*TaskInfo{
createAggregatingTaskInfo(m5),
},
},
}
for _, tc := range tests {
h.FlushDB(t, r)
h.SeedTasks(t, r, fxt.tasks)
h.SeedRedisSet(t, r, base.AllQueues, fxt.allQueues)
h.SeedRedisSets(t, r, fxt.allGroups)
h.SeedRedisZSets(t, r, fxt.groups)
t.Run(tc.desc, func(t *testing.T) {
got, err := inspector.ListAggregatingTasks(tc.qname, tc.gname)
if err != nil {
t.Fatalf("ListAggregatingTasks returned error: %v", err)
}
cmpOpts := []cmp.Option{
cmpopts.EquateApproxTime(2 * time.Second),
cmp.AllowUnexported(TaskInfo{}),
}
if diff := cmp.Diff(tc.want, got, cmpOpts...); diff != "" {
t.Errorf("ListAggregatingTasks = %v, want = %v; (-want,+got)\n%s", got, tc.want, diff)
}
})
}
}
func createAggregatingTaskInfo(msg *base.TaskMessage) *TaskInfo {
return newTaskInfo(msg, base.TaskStateAggregating, time.Time{}, nil)
}
func TestInspectorListPagination(t *testing.T) { func TestInspectorListPagination(t *testing.T) {
// Create 100 tasks. // Create 100 tasks.
var msgs []*base.TaskMessage var msgs []*base.TaskMessage
@@ -1049,6 +1319,12 @@ func TestInspectorListTasksQueueNotFoundError(t *testing.T) {
if _, err := inspector.ListArchivedTasks(tc.qname); !errors.Is(err, tc.wantErr) { if _, err := inspector.ListArchivedTasks(tc.qname); !errors.Is(err, tc.wantErr) {
t.Errorf("ListArchivedTasks(%q) returned error %v, want %v", tc.qname, err, tc.wantErr) t.Errorf("ListArchivedTasks(%q) returned error %v, want %v", tc.qname, err, tc.wantErr)
} }
if _, err := inspector.ListCompletedTasks(tc.qname); !errors.Is(err, tc.wantErr) {
t.Errorf("ListCompletedTasks(%q) returned error %v, want %v", tc.qname, err, tc.wantErr)
}
if _, err := inspector.ListAggregatingTasks(tc.qname, "mygroup"); !errors.Is(err, tc.wantErr) {
t.Errorf("ListAggregatingTasks(%q, \"mygroup\") returned error %v, want %v", tc.qname, err, tc.wantErr)
}
} }
} }
@@ -1314,6 +1590,72 @@ func TestInspectorDeleteAllArchivedTasks(t *testing.T) {
} }
} }
func TestInspectorDeleteAllCompletedTasks(t *testing.T) {
r := setup(t)
defer r.Close()
now := time.Now()
m1 := newCompletedTaskMessage("task1", "default", 30*time.Minute, now.Add(-2*time.Minute))
m2 := newCompletedTaskMessage("task2", "default", 30*time.Minute, now.Add(-5*time.Minute))
m3 := newCompletedTaskMessage("task3", "default", 30*time.Minute, now.Add(-10*time.Minute))
m4 := newCompletedTaskMessage("task4", "custom", 30*time.Minute, now.Add(-3*time.Minute))
z1 := base.Z{Message: m1, Score: m1.CompletedAt + m1.Retention}
z2 := base.Z{Message: m2, Score: m2.CompletedAt + m2.Retention}
z3 := base.Z{Message: m3, Score: m3.CompletedAt + m3.Retention}
z4 := base.Z{Message: m4, Score: m4.CompletedAt + m4.Retention}
inspector := NewInspector(getRedisConnOpt(t))
tests := []struct {
completed map[string][]base.Z
qname string
want int
wantCompleted map[string][]base.Z
}{
{
completed: map[string][]base.Z{
"default": {z1, z2, z3},
"custom": {z4},
},
qname: "default",
want: 3,
wantCompleted: map[string][]base.Z{
"default": {},
"custom": {z4},
},
},
{
completed: map[string][]base.Z{
"default": {},
},
qname: "default",
want: 0,
wantCompleted: map[string][]base.Z{
"default": {},
},
},
}
for _, tc := range tests {
h.FlushDB(t, r)
h.SeedAllCompletedQueues(t, r, tc.completed)
got, err := inspector.DeleteAllCompletedTasks(tc.qname)
if err != nil {
t.Errorf("DeleteAllCompletedTasks(%q) returned error: %v", tc.qname, err)
continue
}
if got != tc.want {
t.Errorf("DeleteAllCompletedTasks(%q) = %d, want %d", tc.qname, got, tc.want)
}
for qname, want := range tc.wantCompleted {
gotCompleted := h.GetCompletedEntries(t, r, qname)
if diff := cmp.Diff(want, gotCompleted, h.SortZSetEntryOpt); diff != "" {
t.Errorf("unexpected completed tasks in queue %q: (-want, +got)\n%s", qname, diff)
}
}
}
}
func TestInspectorArchiveAllPendingTasks(t *testing.T) { func TestInspectorArchiveAllPendingTasks(t *testing.T) {
r := setup(t) r := setup(t)
defer r.Close() defer r.Close()
@@ -1325,6 +1667,7 @@ func TestInspectorArchiveAllPendingTasks(t *testing.T) {
z1 := base.Z{Message: m1, Score: now.Add(5 * time.Minute).Unix()} z1 := base.Z{Message: m1, Score: now.Add(5 * time.Minute).Unix()}
z2 := base.Z{Message: m2, Score: now.Add(15 * time.Minute).Unix()} z2 := base.Z{Message: m2, Score: now.Add(15 * time.Minute).Unix()}
inspector := NewInspector(getRedisConnOpt(t)) inspector := NewInspector(getRedisConnOpt(t))
inspector.rdb.SetClock(timeutil.NewSimulatedClock(now))
tests := []struct { tests := []struct {
pending map[string][]*base.TaskMessage pending map[string][]*base.TaskMessage
@@ -1416,12 +1759,8 @@ func TestInspectorArchiveAllPendingTasks(t *testing.T) {
} }
} }
for qname, want := range tc.wantArchived { for qname, want := range tc.wantArchived {
// Allow Z.Score to differ by up to 2.
approxOpt := cmp.Comparer(func(a, b int64) bool {
return math.Abs(float64(a-b)) < 2
})
gotArchived := h.GetArchivedEntries(t, r, qname) gotArchived := h.GetArchivedEntries(t, r, qname)
if diff := cmp.Diff(want, gotArchived, h.SortZSetEntryOpt, approxOpt); diff != "" { if diff := cmp.Diff(want, gotArchived, h.SortZSetEntryOpt); diff != "" {
t.Errorf("unexpected archived tasks in queue %q: (-want, +got)\n%s", qname, diff) t.Errorf("unexpected archived tasks in queue %q: (-want, +got)\n%s", qname, diff)
} }
} }
@@ -1442,6 +1781,7 @@ func TestInspectorArchiveAllScheduledTasks(t *testing.T) {
z4 := base.Z{Message: m4, Score: now.Add(2 * time.Minute).Unix()} z4 := base.Z{Message: m4, Score: now.Add(2 * time.Minute).Unix()}
inspector := NewInspector(getRedisConnOpt(t)) inspector := NewInspector(getRedisConnOpt(t))
inspector.rdb.SetClock(timeutil.NewSimulatedClock(now))
tests := []struct { tests := []struct {
scheduled map[string][]base.Z scheduled map[string][]base.Z
@@ -1549,12 +1889,8 @@ func TestInspectorArchiveAllScheduledTasks(t *testing.T) {
} }
} }
for qname, want := range tc.wantArchived { for qname, want := range tc.wantArchived {
// Allow Z.Score to differ by up to 2.
approxOpt := cmp.Comparer(func(a, b int64) bool {
return math.Abs(float64(a-b)) < 2
})
gotArchived := h.GetArchivedEntries(t, r, qname) gotArchived := h.GetArchivedEntries(t, r, qname)
if diff := cmp.Diff(want, gotArchived, h.SortZSetEntryOpt, approxOpt); diff != "" { if diff := cmp.Diff(want, gotArchived, h.SortZSetEntryOpt); diff != "" {
t.Errorf("unexpected archived tasks in queue %q: (-want, +got)\n%s", qname, diff) t.Errorf("unexpected archived tasks in queue %q: (-want, +got)\n%s", qname, diff)
} }
} }
@@ -1575,6 +1911,7 @@ func TestInspectorArchiveAllRetryTasks(t *testing.T) {
z4 := base.Z{Message: m4, Score: now.Add(2 * time.Minute).Unix()} z4 := base.Z{Message: m4, Score: now.Add(2 * time.Minute).Unix()}
inspector := NewInspector(getRedisConnOpt(t)) inspector := NewInspector(getRedisConnOpt(t))
inspector.rdb.SetClock(timeutil.NewSimulatedClock(now))
tests := []struct { tests := []struct {
retry map[string][]base.Z retry map[string][]base.Z
@@ -1665,10 +2002,9 @@ func TestInspectorArchiveAllRetryTasks(t *testing.T) {
t.Errorf("unexpected retry tasks in queue %q: (-want, +got)\n%s", qname, diff) t.Errorf("unexpected retry tasks in queue %q: (-want, +got)\n%s", qname, diff)
} }
} }
cmpOpt := h.EquateInt64Approx(2) // allow for 2 seconds difference in Z.Score
for qname, want := range tc.wantArchived { for qname, want := range tc.wantArchived {
wantArchived := h.GetArchivedEntries(t, r, qname) wantArchived := h.GetArchivedEntries(t, r, qname)
if diff := cmp.Diff(want, wantArchived, h.SortZSetEntryOpt, cmpOpt); diff != "" { if diff := cmp.Diff(want, wantArchived, h.SortZSetEntryOpt); diff != "" {
t.Errorf("unexpected archived tasks in queue %q: (-want, +got)\n%s", qname, diff) t.Errorf("unexpected archived tasks in queue %q: (-want, +got)\n%s", qname, diff)
} }
} }
@@ -2616,8 +2952,9 @@ func TestInspectorArchiveTaskArchivesPendingTask(t *testing.T) {
m1 := h.NewTaskMessage("task1", nil) m1 := h.NewTaskMessage("task1", nil)
m2 := h.NewTaskMessageWithQueue("task2", nil, "custom") m2 := h.NewTaskMessageWithQueue("task2", nil, "custom")
m3 := h.NewTaskMessageWithQueue("task3", nil, "custom") m3 := h.NewTaskMessageWithQueue("task3", nil, "custom")
inspector := NewInspector(getRedisConnOpt(t))
now := time.Now() now := time.Now()
inspector := NewInspector(getRedisConnOpt(t))
inspector.rdb.SetClock(timeutil.NewSimulatedClock(now))
tests := []struct { tests := []struct {
pending map[string][]*base.TaskMessage pending map[string][]*base.TaskMessage
@@ -2712,6 +3049,7 @@ func TestInspectorArchiveTaskArchivesScheduledTask(t *testing.T) {
z3 := base.Z{Message: m3, Score: now.Add(2 * time.Minute).Unix()} z3 := base.Z{Message: m3, Score: now.Add(2 * time.Minute).Unix()}
inspector := NewInspector(getRedisConnOpt(t)) inspector := NewInspector(getRedisConnOpt(t))
inspector.rdb.SetClock(timeutil.NewSimulatedClock(now))
tests := []struct { tests := []struct {
scheduled map[string][]base.Z scheduled map[string][]base.Z
@@ -2788,6 +3126,7 @@ func TestInspectorArchiveTaskArchivesRetryTask(t *testing.T) {
z3 := base.Z{Message: m3, Score: now.Add(2 * time.Minute).Unix()} z3 := base.Z{Message: m3, Score: now.Add(2 * time.Minute).Unix()}
inspector := NewInspector(getRedisConnOpt(t)) inspector := NewInspector(getRedisConnOpt(t))
inspector.rdb.SetClock(timeutil.NewSimulatedClock(now))
tests := []struct { tests := []struct {
retry map[string][]base.Z retry map[string][]base.Z
@@ -2862,6 +3201,7 @@ func TestInspectorArchiveTaskError(t *testing.T) {
z3 := base.Z{Message: m3, Score: now.Add(2 * time.Minute).Unix()} z3 := base.Z{Message: m3, Score: now.Add(2 * time.Minute).Unix()}
inspector := NewInspector(getRedisConnOpt(t)) inspector := NewInspector(getRedisConnOpt(t))
inspector.rdb.SetClock(timeutil.NewSimulatedClock(now))
tests := []struct { tests := []struct {
retry map[string][]base.Z retry map[string][]base.Z
@@ -3033,6 +3373,7 @@ func TestParseOption(t *testing.T) {
{`Unique(1h)`, UniqueOpt, 1 * time.Hour}, {`Unique(1h)`, UniqueOpt, 1 * time.Hour},
{ProcessAt(oneHourFromNow).String(), ProcessAtOpt, oneHourFromNow}, {ProcessAt(oneHourFromNow).String(), ProcessAtOpt, oneHourFromNow},
{`ProcessIn(10m)`, ProcessInOpt, 10 * time.Minute}, {`ProcessIn(10m)`, ProcessInOpt, 10 * time.Minute},
{`Retention(24h)`, RetentionOpt, 24 * time.Hour},
} }
for _, tc := range tests { for _, tc := range tests {
@@ -3064,7 +3405,7 @@ func TestParseOption(t *testing.T) {
if gotVal != tc.wantVal.(int) { if gotVal != tc.wantVal.(int) {
t.Fatalf("got value %v, want %v", gotVal, tc.wantVal) t.Fatalf("got value %v, want %v", gotVal, tc.wantVal)
} }
case TimeoutOpt, UniqueOpt, ProcessInOpt: case TimeoutOpt, UniqueOpt, ProcessInOpt, RetentionOpt:
gotVal, ok := got.Value().(time.Duration) gotVal, ok := got.Value().(time.Duration)
if !ok { if !ok {
t.Fatal("returned Option with non duration value") t.Fatal("returned Option with non duration value")
@@ -3086,3 +3427,99 @@ func TestParseOption(t *testing.T) {
}) })
} }
} }
func TestInspectorGroups(t *testing.T) {
r := setup(t)
defer r.Close()
inspector := NewInspector(getRedisConnOpt(t))
m1 := h.NewTaskMessageBuilder().SetGroup("group1").Build()
m2 := h.NewTaskMessageBuilder().SetGroup("group1").Build()
m3 := h.NewTaskMessageBuilder().SetGroup("group1").Build()
m4 := h.NewTaskMessageBuilder().SetGroup("group2").Build()
m5 := h.NewTaskMessageBuilder().SetQueue("custom").SetGroup("group1").Build()
m6 := h.NewTaskMessageBuilder().SetQueue("custom").SetGroup("group1").Build()
now := time.Now()
fixtures := struct {
tasks []*h.TaskSeedData
allGroups map[string][]string
groups map[string][]*redis.Z
}{
tasks: []*h.TaskSeedData{
{Msg: m1, State: base.TaskStateAggregating},
{Msg: m2, State: base.TaskStateAggregating},
{Msg: m3, State: base.TaskStateAggregating},
{Msg: m4, State: base.TaskStateAggregating},
{Msg: m5, State: base.TaskStateAggregating},
},
allGroups: map[string][]string{
base.AllGroups("default"): {"group1", "group2"},
base.AllGroups("custom"): {"group1"},
},
groups: map[string][]*redis.Z{
base.GroupKey("default", "group1"): {
{Member: m1.ID, Score: float64(now.Add(-10 * time.Second).Unix())},
{Member: m2.ID, Score: float64(now.Add(-20 * time.Second).Unix())},
{Member: m3.ID, Score: float64(now.Add(-30 * time.Second).Unix())},
},
base.GroupKey("default", "group2"): {
{Member: m4.ID, Score: float64(now.Add(-20 * time.Second).Unix())},
},
base.GroupKey("custom", "group1"): {
{Member: m5.ID, Score: float64(now.Add(-10 * time.Second).Unix())},
{Member: m6.ID, Score: float64(now.Add(-20 * time.Second).Unix())},
},
},
}
tests := []struct {
desc string
qname string
want []*GroupInfo
}{
{
desc: "default queue groups",
qname: "default",
want: []*GroupInfo{
{Group: "group1", Size: 3},
{Group: "group2", Size: 1},
},
},
{
desc: "custom queue groups",
qname: "custom",
want: []*GroupInfo{
{Group: "group1", Size: 2},
},
},
}
var sortGroupInfosOpt = cmp.Transformer(
"SortGroupInfos",
func(in []*GroupInfo) []*GroupInfo {
out := append([]*GroupInfo(nil), in...)
sort.Slice(out, func(i, j int) bool {
return out[i].Group < out[j].Group
})
return out
})
for _, tc := range tests {
h.FlushDB(t, r)
h.SeedTasks(t, r, fixtures.tasks)
h.SeedRedisSets(t, r, fixtures.allGroups)
h.SeedRedisZSets(t, r, fixtures.groups)
t.Run(tc.desc, func(t *testing.T) {
got, err := inspector.Groups(tc.qname)
if err != nil {
t.Fatalf("Groups returned error: %v", err)
}
if diff := cmp.Diff(tc.want, got, sortGroupInfosOpt); diff != "" {
t.Errorf("Groups = %v, want %v; (-want,+got)\n%s", got, tc.want, diff)
}
})
}
}

View File

@@ -10,19 +10,20 @@ import (
"crypto/md5" "crypto/md5"
"encoding/hex" "encoding/hex"
"fmt" "fmt"
"strings"
"sync" "sync"
"time" "time"
"github.com/go-redis/redis/v7" "github.com/go-redis/redis/v8"
"github.com/golang/protobuf/ptypes" "github.com/golang/protobuf/ptypes"
"github.com/google/uuid"
"github.com/hibiken/asynq/internal/errors" "github.com/hibiken/asynq/internal/errors"
pb "github.com/hibiken/asynq/internal/proto" pb "github.com/hibiken/asynq/internal/proto"
"github.com/hibiken/asynq/internal/timeutil"
"google.golang.org/protobuf/proto" "google.golang.org/protobuf/proto"
) )
// Version of asynq library and CLI. // Version of asynq library and CLI.
const Version = "0.18.1" const Version = "0.23.0"
// DefaultQueueName is the queue name used if none are specified by user. // DefaultQueueName is the queue name used if none are specified by user.
const DefaultQueueName = "default" const DefaultQueueName = "default"
@@ -48,6 +49,8 @@ const (
TaskStateScheduled TaskStateScheduled
TaskStateRetry TaskStateRetry
TaskStateArchived TaskStateArchived
TaskStateCompleted
TaskStateAggregating // describes a state where task is waiting in a group to be aggregated
) )
func (s TaskState) String() string { func (s TaskState) String() string {
@@ -62,6 +65,10 @@ func (s TaskState) String() string {
return "retry" return "retry"
case TaskStateArchived: case TaskStateArchived:
return "archived" return "archived"
case TaskStateCompleted:
return "completed"
case TaskStateAggregating:
return "aggregating"
} }
panic(fmt.Sprintf("internal error: unknown task state %d", s)) panic(fmt.Sprintf("internal error: unknown task state %d", s))
} }
@@ -78,6 +85,10 @@ func TaskStateFromString(s string) (TaskState, error) {
return TaskStateRetry, nil return TaskStateRetry, nil
case "archived": case "archived":
return TaskStateArchived, nil return TaskStateArchived, nil
case "completed":
return TaskStateCompleted, nil
case "aggregating":
return TaskStateAggregating, nil
} }
return 0, errors.E(errors.FailedPrecondition, fmt.Sprintf("%q is not supported task state", s)) return 0, errors.E(errors.FailedPrecondition, fmt.Sprintf("%q is not supported task state", s))
} }
@@ -85,7 +96,7 @@ func TaskStateFromString(s string) (TaskState, error) {
// ValidateQueueName validates a given qname to be used as a queue name. // ValidateQueueName validates a given qname to be used as a queue name.
// Returns nil if valid, otherwise returns non-nil error. // Returns nil if valid, otherwise returns non-nil error.
func ValidateQueueName(qname string) error { func ValidateQueueName(qname string) error {
if len(qname) == 0 { if len(strings.TrimSpace(qname)) == 0 {
return fmt.Errorf("queue name must contain one or more characters") return fmt.Errorf("queue name must contain one or more characters")
} }
return nil return nil
@@ -131,9 +142,13 @@ func ArchivedKey(qname string) string {
return fmt.Sprintf("%sarchived", QueueKeyPrefix(qname)) return fmt.Sprintf("%sarchived", QueueKeyPrefix(qname))
} }
// DeadlinesKey returns a redis key for the deadlines. // LeaseKey returns a redis key for the lease.
func DeadlinesKey(qname string) string { func LeaseKey(qname string) string {
return fmt.Sprintf("%sdeadlines", QueueKeyPrefix(qname)) return fmt.Sprintf("%slease", QueueKeyPrefix(qname))
}
func CompletedKey(qname string) string {
return fmt.Sprintf("%scompleted", QueueKeyPrefix(qname))
} }
// PausedKey returns a redis key to indicate that the given queue is paused. // PausedKey returns a redis key to indicate that the given queue is paused.
@@ -141,6 +156,16 @@ func PausedKey(qname string) string {
return fmt.Sprintf("%spaused", QueueKeyPrefix(qname)) return fmt.Sprintf("%spaused", QueueKeyPrefix(qname))
} }
// ProcessedTotalKey returns a redis key for total processed count for the given queue.
func ProcessedTotalKey(qname string) string {
return fmt.Sprintf("%sprocessed", QueueKeyPrefix(qname))
}
// FailedTotalKey returns a redis key for total failure count for the given queue.
func FailedTotalKey(qname string) string {
return fmt.Sprintf("%sfailed", QueueKeyPrefix(qname))
}
// ProcessedKey returns a redis key for processed count for the given day for the queue. // ProcessedKey returns a redis key for processed count for the given day for the queue.
func ProcessedKey(qname string, t time.Time) string { func ProcessedKey(qname string, t time.Time) string {
return fmt.Sprintf("%sprocessed:%s", QueueKeyPrefix(qname), t.UTC().Format("2006-01-02")) return fmt.Sprintf("%sprocessed:%s", QueueKeyPrefix(qname), t.UTC().Format("2006-01-02"))
@@ -180,6 +205,32 @@ func UniqueKey(qname, tasktype string, payload []byte) string {
return fmt.Sprintf("%sunique:%s:%s", QueueKeyPrefix(qname), tasktype, hex.EncodeToString(checksum[:])) return fmt.Sprintf("%sunique:%s:%s", QueueKeyPrefix(qname), tasktype, hex.EncodeToString(checksum[:]))
} }
// GroupKeyPrefix returns a prefix for group key.
func GroupKeyPrefix(qname string) string {
return fmt.Sprintf("%sg:", QueueKeyPrefix(qname))
}
// GroupKey returns a redis key used to group tasks belong in the same group.
func GroupKey(qname, gkey string) string {
return fmt.Sprintf("%s%s", GroupKeyPrefix(qname), gkey)
}
// AggregationSetKey returns a redis key used for an aggregation set.
func AggregationSetKey(qname, gname, setID string) string {
return fmt.Sprintf("%s:%s", GroupKey(qname, gname), setID)
}
// AllGroups return a redis key used to store all group keys used in a given queue.
func AllGroups(qname string) string {
return fmt.Sprintf("%sgroups", QueueKeyPrefix(qname))
}
// AllAggregationSets returns a redis key used to store all aggregation sets (set of tasks staged to be aggregated)
// in a given queue.
func AllAggregationSets(qname string) string {
return fmt.Sprintf("%saggregation_sets", QueueKeyPrefix(qname))
}
// TaskMessage is the internal representation of a task with additional metadata fields. // TaskMessage is the internal representation of a task with additional metadata fields.
// Serialized data of this type gets written to redis. // Serialized data of this type gets written to redis.
type TaskMessage struct { type TaskMessage struct {
@@ -190,7 +241,7 @@ type TaskMessage struct {
Payload []byte Payload []byte
// ID is a unique identifier for each task. // ID is a unique identifier for each task.
ID uuid.UUID ID string
// Queue is a name this message should be enqueued to. // Queue is a name this message should be enqueued to.
Queue string Queue string
@@ -229,6 +280,20 @@ type TaskMessage struct {
// //
// Empty string indicates that no uniqueness lock was used. // Empty string indicates that no uniqueness lock was used.
UniqueKey string UniqueKey string
// GroupKey holds the group key used for task aggregation.
//
// Empty string indicates no aggregation is used for this task.
GroupKey string
// Retention specifies the number of seconds the task should be retained after completion.
Retention int64
// CompletedAt is the time the task was processed successfully in Unix time,
// the number of seconds elapsed since January 1, 1970 UTC.
//
// Use zero to indicate no value.
CompletedAt int64
} }
// EncodeMessage marshals the given task message and returns an encoded bytes. // EncodeMessage marshals the given task message and returns an encoded bytes.
@@ -239,7 +304,7 @@ func EncodeMessage(msg *TaskMessage) ([]byte, error) {
return proto.Marshal(&pb.TaskMessage{ return proto.Marshal(&pb.TaskMessage{
Type: msg.Type, Type: msg.Type,
Payload: msg.Payload, Payload: msg.Payload,
Id: msg.ID.String(), Id: msg.ID,
Queue: msg.Queue, Queue: msg.Queue,
Retry: int32(msg.Retry), Retry: int32(msg.Retry),
Retried: int32(msg.Retried), Retried: int32(msg.Retried),
@@ -248,6 +313,9 @@ func EncodeMessage(msg *TaskMessage) ([]byte, error) {
Timeout: msg.Timeout, Timeout: msg.Timeout,
Deadline: msg.Deadline, Deadline: msg.Deadline,
UniqueKey: msg.UniqueKey, UniqueKey: msg.UniqueKey,
GroupKey: msg.GroupKey,
Retention: msg.Retention,
CompletedAt: msg.CompletedAt,
}) })
} }
@@ -260,7 +328,7 @@ func DecodeMessage(data []byte) (*TaskMessage, error) {
return &TaskMessage{ return &TaskMessage{
Type: pbmsg.GetType(), Type: pbmsg.GetType(),
Payload: pbmsg.GetPayload(), Payload: pbmsg.GetPayload(),
ID: uuid.MustParse(pbmsg.GetId()), ID: pbmsg.GetId(),
Queue: pbmsg.GetQueue(), Queue: pbmsg.GetQueue(),
Retry: int(pbmsg.GetRetry()), Retry: int(pbmsg.GetRetry()),
Retried: int(pbmsg.GetRetried()), Retried: int(pbmsg.GetRetried()),
@@ -269,6 +337,9 @@ func DecodeMessage(data []byte) (*TaskMessage, error) {
Timeout: pbmsg.GetTimeout(), Timeout: pbmsg.GetTimeout(),
Deadline: pbmsg.GetDeadline(), Deadline: pbmsg.GetDeadline(),
UniqueKey: pbmsg.GetUniqueKey(), UniqueKey: pbmsg.GetUniqueKey(),
GroupKey: pbmsg.GetGroupKey(),
Retention: pbmsg.GetRetention(),
CompletedAt: pbmsg.GetCompletedAt(),
}, nil }, nil
} }
@@ -277,6 +348,7 @@ type TaskInfo struct {
Message *TaskMessage Message *TaskMessage
State TaskState State TaskState
NextProcessAt time.Time NextProcessAt time.Time
Result []byte
} }
// Z represents sorted set member. // Z represents sorted set member.
@@ -285,68 +357,6 @@ type Z struct {
Score int64 Score int64
} }
// ServerState represents state of a server.
// ServerState methods are concurrency safe.
type ServerState struct {
mu sync.Mutex
val ServerStateValue
}
// NewServerState returns a new state instance.
// Initial state is set to StateNew.
func NewServerState() *ServerState {
return &ServerState{val: StateNew}
}
type ServerStateValue int
const (
// StateNew represents a new server. Server begins in
// this state and then transition to StatusActive when
// Start or Run is callled.
StateNew ServerStateValue = iota
// StateActive indicates the server is up and active.
StateActive
// StateStopped indicates the server is up but no longer processing new tasks.
StateStopped
// StateClosed indicates the server has been shutdown.
StateClosed
)
var serverStates = []string{
"new",
"active",
"stopped",
"closed",
}
func (s *ServerState) String() string {
s.mu.Lock()
defer s.mu.Unlock()
if StateNew <= s.val && s.val <= StateClosed {
return serverStates[s.val]
}
return "unknown status"
}
// Get returns the status value.
func (s *ServerState) Get() ServerStateValue {
s.mu.Lock()
v := s.val
s.mu.Unlock()
return v
}
// Set sets the status value.
func (s *ServerState) Set(v ServerStateValue) {
s.mu.Lock()
s.val = v
s.mu.Unlock()
}
// ServerInfo holds information about a running server. // ServerInfo holds information about a running server.
type ServerInfo struct { type ServerInfo struct {
Host string Host string
@@ -632,25 +642,114 @@ func (c *Cancelations) Get(id string) (fn context.CancelFunc, ok bool) {
return fn, ok return fn, ok
} }
// Lease is a time bound lease for worker to process task.
// It provides a communication channel between lessor and lessee about lease expiration.
type Lease struct {
once sync.Once
ch chan struct{}
Clock timeutil.Clock
mu sync.Mutex
expireAt time.Time // guarded by mu
}
func NewLease(expirationTime time.Time) *Lease {
return &Lease{
ch: make(chan struct{}),
expireAt: expirationTime,
Clock: timeutil.NewRealClock(),
}
}
// Reset chanegs the lease to expire at the given time.
// It returns true if the lease is still valid and reset operation was successful, false if the lease had been expired.
func (l *Lease) Reset(expirationTime time.Time) bool {
if !l.IsValid() {
return false
}
l.mu.Lock()
defer l.mu.Unlock()
l.expireAt = expirationTime
return true
}
// Sends a notification to lessee about expired lease
// Returns true if notification was sent, returns false if the lease is still valid and notification was not sent.
func (l *Lease) NotifyExpiration() bool {
if l.IsValid() {
return false
}
l.once.Do(l.closeCh)
return true
}
func (l *Lease) closeCh() {
close(l.ch)
}
// Done returns a communication channel from which the lessee can read to get notified when lessor notifies about lease expiration.
func (l *Lease) Done() <-chan struct{} {
return l.ch
}
// Deadline returns the expiration time of the lease.
func (l *Lease) Deadline() time.Time {
l.mu.Lock()
defer l.mu.Unlock()
return l.expireAt
}
// IsValid returns true if the lease's expieration time is in the future or equals to the current time,
// returns false otherwise.
func (l *Lease) IsValid() bool {
now := l.Clock.Now()
l.mu.Lock()
defer l.mu.Unlock()
return l.expireAt.After(now) || l.expireAt.Equal(now)
}
// Broker is a message broker that supports operations to manage task queues. // Broker is a message broker that supports operations to manage task queues.
// //
// See rdb.RDB as a reference implementation. // See rdb.RDB as a reference implementation.
type Broker interface { type Broker interface {
Ping() error Ping() error
Enqueue(msg *TaskMessage) error Close() error
EnqueueUnique(msg *TaskMessage, ttl time.Duration) error Enqueue(ctx context.Context, msg *TaskMessage) error
EnqueueUnique(ctx context.Context, msg *TaskMessage, ttl time.Duration) error
Dequeue(qnames ...string) (*TaskMessage, time.Time, error) Dequeue(qnames ...string) (*TaskMessage, time.Time, error)
Done(msg *TaskMessage) error Done(ctx context.Context, msg *TaskMessage) error
Requeue(msg *TaskMessage) error MarkAsComplete(ctx context.Context, msg *TaskMessage) error
Schedule(msg *TaskMessage, processAt time.Time) error Requeue(ctx context.Context, msg *TaskMessage) error
ScheduleUnique(msg *TaskMessage, processAt time.Time, ttl time.Duration) error Schedule(ctx context.Context, msg *TaskMessage, processAt time.Time) error
Retry(msg *TaskMessage, processAt time.Time, errMsg string) error ScheduleUnique(ctx context.Context, msg *TaskMessage, processAt time.Time, ttl time.Duration) error
Archive(msg *TaskMessage, errMsg string) error Retry(ctx context.Context, msg *TaskMessage, processAt time.Time, errMsg string, isFailure bool) error
Archive(ctx context.Context, msg *TaskMessage, errMsg string) error
ForwardIfReady(qnames ...string) error ForwardIfReady(qnames ...string) error
ListDeadlineExceeded(deadline time.Time, qnames ...string) ([]*TaskMessage, error)
// Group aggregation related methods
AddToGroup(ctx context.Context, msg *TaskMessage, gname string) error
AddToGroupUnique(ctx context.Context, msg *TaskMessage, groupKey string, ttl time.Duration) error
ListGroups(qname string) ([]string, error)
AggregationCheck(qname, gname string, t time.Time, gracePeriod, maxDelay time.Duration, maxSize int) (aggregationSetID string, err error)
ReadAggregationSet(qname, gname, aggregationSetID string) ([]*TaskMessage, time.Time, error)
DeleteAggregationSet(ctx context.Context, qname, gname, aggregationSetID string) error
ReclaimStaleAggregationSets(qname string) error
// Task retention related method
DeleteExpiredCompletedTasks(qname string) error
// Lease related methods
ListLeaseExpired(cutoff time.Time, qnames ...string) ([]*TaskMessage, error)
ExtendLease(qname string, ids ...string) (time.Time, error)
// State snapshot related methods
WriteServerState(info *ServerInfo, workers []*WorkerInfo, ttl time.Duration) error WriteServerState(info *ServerInfo, workers []*WorkerInfo, ttl time.Duration) error
ClearServerState(host string, pid int, serverID string) error ClearServerState(host string, pid int, serverID string) error
// Cancelation related methods
CancelationPubSub() (*redis.PubSub, error) // TODO: Need to decouple from redis to support other brokers CancelationPubSub() (*redis.PubSub, error) // TODO: Need to decouple from redis to support other brokers
PublishCancelation(id string) error PublishCancelation(id string) error
Close() error
WriteResult(qname, id string, data []byte) (n int, err error)
} }

View File

@@ -16,6 +16,7 @@ import (
"github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/hibiken/asynq/internal/timeutil"
) )
func TestTaskKey(t *testing.T) { func TestTaskKey(t *testing.T) {
@@ -71,19 +72,19 @@ func TestActiveKey(t *testing.T) {
} }
} }
func TestDeadlinesKey(t *testing.T) { func TestLeaseKey(t *testing.T) {
tests := []struct { tests := []struct {
qname string qname string
want string want string
}{ }{
{"default", "asynq:{default}:deadlines"}, {"default", "asynq:{default}:lease"},
{"custom", "asynq:{custom}:deadlines"}, {"custom", "asynq:{custom}:lease"},
} }
for _, tc := range tests { for _, tc := range tests {
got := DeadlinesKey(tc.qname) got := LeaseKey(tc.qname)
if got != tc.want { if got != tc.want {
t.Errorf("DeadlinesKey(%q) = %q, want %q", tc.qname, got, tc.want) t.Errorf("LeaseKey(%q) = %q, want %q", tc.qname, got, tc.want)
} }
} }
} }
@@ -139,6 +140,23 @@ func TestArchivedKey(t *testing.T) {
} }
} }
func TestCompletedKey(t *testing.T) {
tests := []struct {
qname string
want string
}{
{"default", "asynq:{default}:completed"},
{"custom", "asynq:{custom}:completed"},
}
for _, tc := range tests {
got := CompletedKey(tc.qname)
if got != tc.want {
t.Errorf("CompletedKey(%q) = %q, want %q", tc.qname, got, tc.want)
}
}
}
func TestPausedKey(t *testing.T) { func TestPausedKey(t *testing.T) {
tests := []struct { tests := []struct {
qname string qname string
@@ -156,6 +174,40 @@ func TestPausedKey(t *testing.T) {
} }
} }
func TestProcessedTotalKey(t *testing.T) {
tests := []struct {
qname string
want string
}{
{"default", "asynq:{default}:processed"},
{"custom", "asynq:{custom}:processed"},
}
for _, tc := range tests {
got := ProcessedTotalKey(tc.qname)
if got != tc.want {
t.Errorf("ProcessedTotalKey(%q) = %q, want %q", tc.qname, got, tc.want)
}
}
}
func TestFailedTotalKey(t *testing.T) {
tests := []struct {
qname string
want string
}{
{"default", "asynq:{default}:failed"},
{"custom", "asynq:{custom}:failed"},
}
for _, tc := range tests {
got := FailedTotalKey(tc.qname)
if got != tc.want {
t.Errorf("FailedTotalKey(%q) = %q, want %q", tc.qname, got, tc.want)
}
}
}
func TestProcessedKey(t *testing.T) { func TestProcessedKey(t *testing.T) {
tests := []struct { tests := []struct {
qname string qname string
@@ -343,32 +395,137 @@ func TestUniqueKey(t *testing.T) {
} }
} }
func TestGroupKey(t *testing.T) {
tests := []struct {
qname string
gkey string
want string
}{
{
qname: "default",
gkey: "mygroup",
want: "asynq:{default}:g:mygroup",
},
{
qname: "custom",
gkey: "foo",
want: "asynq:{custom}:g:foo",
},
}
for _, tc := range tests {
got := GroupKey(tc.qname, tc.gkey)
if got != tc.want {
t.Errorf("GroupKey(%q, %q) = %q, want %q", tc.qname, tc.gkey, got, tc.want)
}
}
}
func TestAggregationSetKey(t *testing.T) {
tests := []struct {
qname string
gname string
setID string
want string
}{
{
qname: "default",
gname: "mygroup",
setID: "12345",
want: "asynq:{default}:g:mygroup:12345",
},
{
qname: "custom",
gname: "foo",
setID: "98765",
want: "asynq:{custom}:g:foo:98765",
},
}
for _, tc := range tests {
got := AggregationSetKey(tc.qname, tc.gname, tc.setID)
if got != tc.want {
t.Errorf("AggregationSetKey(%q, %q, %q) = %q, want %q", tc.qname, tc.gname, tc.setID, got, tc.want)
}
}
}
func TestAllGroups(t *testing.T) {
tests := []struct {
qname string
want string
}{
{
qname: "default",
want: "asynq:{default}:groups",
},
{
qname: "custom",
want: "asynq:{custom}:groups",
},
}
for _, tc := range tests {
got := AllGroups(tc.qname)
if got != tc.want {
t.Errorf("AllGroups(%q) = %q, want %q", tc.qname, got, tc.want)
}
}
}
func TestAllAggregationSets(t *testing.T) {
tests := []struct {
qname string
want string
}{
{
qname: "default",
want: "asynq:{default}:aggregation_sets",
},
{
qname: "custom",
want: "asynq:{custom}:aggregation_sets",
},
}
for _, tc := range tests {
got := AllAggregationSets(tc.qname)
if got != tc.want {
t.Errorf("AllAggregationSets(%q) = %q, want %q", tc.qname, got, tc.want)
}
}
}
func TestMessageEncoding(t *testing.T) { func TestMessageEncoding(t *testing.T) {
id := uuid.New() id := uuid.NewString()
tests := []struct { tests := []struct {
in *TaskMessage in *TaskMessage
out *TaskMessage out *TaskMessage
}{ }{
{ {
in: &TaskMessage{ in: &TaskMessage{
Type: "task1", Type: "task1",
Payload: toBytes(map[string]interface{}{"a": 1, "b": "hello!", "c": true}), Payload: toBytes(map[string]interface{}{"a": 1, "b": "hello!", "c": true}),
ID: id, ID: id,
Queue: "default", Queue: "default",
Retry: 10, GroupKey: "mygroup",
Retried: 0, Retry: 10,
Timeout: 1800, Retried: 0,
Deadline: 1692311100, Timeout: 1800,
Deadline: 1692311100,
Retention: 3600,
}, },
out: &TaskMessage{ out: &TaskMessage{
Type: "task1", Type: "task1",
Payload: toBytes(map[string]interface{}{"a": json.Number("1"), "b": "hello!", "c": true}), Payload: toBytes(map[string]interface{}{"a": json.Number("1"), "b": "hello!", "c": true}),
ID: id, ID: id,
Queue: "default", Queue: "default",
Retry: 10, GroupKey: "mygroup",
Retried: 0, Retry: 10,
Timeout: 1800, Retried: 0,
Deadline: 1692311100, Timeout: 1800,
Deadline: 1692311100,
Retention: 3600,
}, },
}, },
} }
@@ -530,30 +687,6 @@ func TestSchedulerEnqueueEventEncoding(t *testing.T) {
} }
} }
// Test for status being accessed by multiple goroutines.
// Run with -race flag to check for data race.
func TestStatusConcurrentAccess(t *testing.T) {
status := NewServerState()
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
status.Get()
_ = status.String()
}()
wg.Add(1)
go func() {
defer wg.Done()
status.Set(StateClosed)
_ = status.String()
}()
wg.Wait()
}
// Test for cancelations being accessed by multiple goroutines. // Test for cancelations being accessed by multiple goroutines.
// Run with -race flag to check for data race. // Run with -race flag to check for data race.
func TestCancelationsConcurrentAccess(t *testing.T) { func TestCancelationsConcurrentAccess(t *testing.T) {
@@ -598,3 +731,75 @@ func TestCancelationsConcurrentAccess(t *testing.T) {
t.Errorf("(*Cancelations).Get(%q) = _, true, want <nil>, false", key2) t.Errorf("(*Cancelations).Get(%q) = _, true, want <nil>, false", key2)
} }
} }
func TestLeaseReset(t *testing.T) {
now := time.Now()
clock := timeutil.NewSimulatedClock(now)
l := NewLease(now.Add(30 * time.Second))
l.Clock = clock
// Check initial state
if !l.IsValid() {
t.Errorf("lease should be valid when expiration is set to a future time")
}
if want := now.Add(30 * time.Second); l.Deadline() != want {
t.Errorf("Lease.Deadline() = %v, want %v", l.Deadline(), want)
}
// Test Reset
if !l.Reset(now.Add(45 * time.Second)) {
t.Fatalf("Lease.Reset returned false when extending")
}
if want := now.Add(45 * time.Second); l.Deadline() != want {
t.Errorf("After Reset: Lease.Deadline() = %v, want %v", l.Deadline(), want)
}
clock.AdvanceTime(1 * time.Minute) // simulate lease expiration
if l.IsValid() {
t.Errorf("lease should be invalid after expiration")
}
// Reset should return false if lease is expired.
if l.Reset(time.Now().Add(20 * time.Second)) {
t.Errorf("Lease.Reset should return false after expiration")
}
}
func TestLeaseNotifyExpiration(t *testing.T) {
now := time.Now()
clock := timeutil.NewSimulatedClock(now)
l := NewLease(now.Add(30 * time.Second))
l.Clock = clock
select {
case <-l.Done():
t.Fatalf("Lease.Done() did not block")
default:
}
if l.NotifyExpiration() {
t.Fatalf("Lease.NotifyExpiration() should return false when lease is still valid")
}
clock.AdvanceTime(1 * time.Minute) // simulate lease expiration
if l.IsValid() {
t.Errorf("Lease should be invalid after expiration")
}
if !l.NotifyExpiration() {
t.Errorf("Lease.NotifyExpiration() return return true after expiration")
}
if !l.NotifyExpiration() {
t.Errorf("It should be leagal to call Lease.NotifyExpiration multiple times")
}
select {
case <-l.Done():
// expected
default:
t.Errorf("Lease.Done() blocked after call to Lease.NotifyExpiration()")
}
}

View File

@@ -0,0 +1,87 @@
// Copyright 2020 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
package context
import (
"context"
"time"
"github.com/hibiken/asynq/internal/base"
)
// A taskMetadata holds task scoped data to put in context.
type taskMetadata struct {
id string
maxRetry int
retryCount int
qname string
}
// ctxKey type is unexported to prevent collisions with context keys defined in
// other packages.
type ctxKey int
// metadataCtxKey is the context key for the task metadata.
// Its value of zero is arbitrary.
const metadataCtxKey ctxKey = 0
// New returns a context and cancel function for a given task message.
func New(base context.Context, msg *base.TaskMessage, deadline time.Time) (context.Context, context.CancelFunc) {
metadata := taskMetadata{
id: msg.ID,
maxRetry: msg.Retry,
retryCount: msg.Retried,
qname: msg.Queue,
}
ctx := context.WithValue(base, metadataCtxKey, metadata)
return context.WithDeadline(ctx, deadline)
}
// GetTaskID extracts a task ID from a context, if any.
//
// ID of a task is guaranteed to be unique.
// ID of a task doesn't change if the task is being retried.
func GetTaskID(ctx context.Context) (id string, ok bool) {
metadata, ok := ctx.Value(metadataCtxKey).(taskMetadata)
if !ok {
return "", false
}
return metadata.id, true
}
// GetRetryCount extracts retry count from a context, if any.
//
// Return value n indicates the number of times associated task has been
// retried so far.
func GetRetryCount(ctx context.Context) (n int, ok bool) {
metadata, ok := ctx.Value(metadataCtxKey).(taskMetadata)
if !ok {
return 0, false
}
return metadata.retryCount, true
}
// GetMaxRetry extracts maximum retry from a context, if any.
//
// Return value n indicates the maximum number of times the assoicated task
// can be retried if ProcessTask returns a non-nil error.
func GetMaxRetry(ctx context.Context) (n int, ok bool) {
metadata, ok := ctx.Value(metadataCtxKey).(taskMetadata)
if !ok {
return 0, false
}
return metadata.maxRetry, true
}
// GetQueueName extracts queue name from a context, if any.
//
// Return value qname indicates which queue the task was pulled from.
func GetQueueName(ctx context.Context) (qname string, ok bool) {
metadata, ok := ctx.Value(metadataCtxKey).(taskMetadata)
if !ok {
return "", false
}
return metadata.qname, true
}

View File

@@ -2,10 +2,11 @@
// Use of this source code is governed by a MIT license // Use of this source code is governed by a MIT license
// that can be found in the LICENSE file. // that can be found in the LICENSE file.
package asynq package context
import ( import (
"context" "context"
"fmt"
"testing" "testing"
"time" "time"
@@ -24,12 +25,11 @@ func TestCreateContextWithFutureDeadline(t *testing.T) {
for _, tc := range tests { for _, tc := range tests {
msg := &base.TaskMessage{ msg := &base.TaskMessage{
Type: "something", Type: "something",
ID: uuid.New(), ID: uuid.NewString(),
Payload: nil, Payload: nil,
} }
ctx, cancel := createContext(msg, tc.deadline) ctx, cancel := New(context.Background(), msg, tc.deadline)
select { select {
case x := <-ctx.Done(): case x := <-ctx.Done():
t.Errorf("<-ctx.Done() == %v, want nothing (it should block)", x) t.Errorf("<-ctx.Done() == %v, want nothing (it should block)", x)
@@ -54,6 +54,53 @@ func TestCreateContextWithFutureDeadline(t *testing.T) {
} }
} }
func TestCreateContextWithBaseContext(t *testing.T) {
type ctxKey string
type ctxValue string
var key ctxKey = "key"
var value ctxValue = "value"
tests := []struct {
baseCtx context.Context
validate func(ctx context.Context, t *testing.T) error
}{
{
baseCtx: context.WithValue(context.Background(), key, value),
validate: func(ctx context.Context, t *testing.T) error {
got, ok := ctx.Value(key).(ctxValue)
if !ok {
return fmt.Errorf("ctx.Value().(ctxValue) returned false, expected to be true")
}
if want := value; got != want {
return fmt.Errorf("ctx.Value().(ctxValue) returned unknown value (%v), expected to be %s", got, value)
}
return nil
},
},
}
for _, tc := range tests {
msg := &base.TaskMessage{
Type: "something",
ID: uuid.NewString(),
Payload: nil,
}
ctx, cancel := New(tc.baseCtx, msg, time.Now().Add(30*time.Minute))
defer cancel()
select {
case x := <-ctx.Done():
t.Errorf("<-ctx.Done() == %v, want nothing (it should block)", x)
default:
}
if err := tc.validate(ctx, t); err != nil {
t.Errorf("%v", err)
}
}
}
func TestCreateContextWithPastDeadline(t *testing.T) { func TestCreateContextWithPastDeadline(t *testing.T) {
tests := []struct { tests := []struct {
deadline time.Time deadline time.Time
@@ -64,11 +111,11 @@ func TestCreateContextWithPastDeadline(t *testing.T) {
for _, tc := range tests { for _, tc := range tests {
msg := &base.TaskMessage{ msg := &base.TaskMessage{
Type: "something", Type: "something",
ID: uuid.New(), ID: uuid.NewString(),
Payload: nil, Payload: nil,
} }
ctx, cancel := createContext(msg, tc.deadline) ctx, cancel := New(context.Background(), msg, tc.deadline)
defer cancel() defer cancel()
select { select {
@@ -92,21 +139,21 @@ func TestGetTaskMetadataFromContext(t *testing.T) {
desc string desc string
msg *base.TaskMessage msg *base.TaskMessage
}{ }{
{"with zero retried message", &base.TaskMessage{Type: "something", ID: uuid.New(), Retry: 25, Retried: 0, Timeout: 1800, Queue: "default"}}, {"with zero retried message", &base.TaskMessage{Type: "something", ID: uuid.NewString(), Retry: 25, Retried: 0, Timeout: 1800, Queue: "default"}},
{"with non-zero retried message", &base.TaskMessage{Type: "something", ID: uuid.New(), Retry: 10, Retried: 5, Timeout: 1800, Queue: "default"}}, {"with non-zero retried message", &base.TaskMessage{Type: "something", ID: uuid.NewString(), Retry: 10, Retried: 5, Timeout: 1800, Queue: "default"}},
{"with custom queue name", &base.TaskMessage{Type: "something", ID: uuid.New(), Retry: 25, Retried: 0, Timeout: 1800, Queue: "custom"}}, {"with custom queue name", &base.TaskMessage{Type: "something", ID: uuid.NewString(), Retry: 25, Retried: 0, Timeout: 1800, Queue: "custom"}},
} }
for _, tc := range tests { for _, tc := range tests {
ctx, cancel := createContext(tc.msg, time.Now().Add(30*time.Minute)) ctx, cancel := New(context.Background(), tc.msg, time.Now().Add(30*time.Minute))
defer cancel() defer cancel()
id, ok := GetTaskID(ctx) id, ok := GetTaskID(ctx)
if !ok { if !ok {
t.Errorf("%s: GetTaskID(ctx) returned ok == false", tc.desc) t.Errorf("%s: GetTaskID(ctx) returned ok == false", tc.desc)
} }
if ok && id != tc.msg.ID.String() { if ok && id != tc.msg.ID {
t.Errorf("%s: GetTaskID(ctx) returned id == %q, want %q", tc.desc, id, tc.msg.ID.String()) t.Errorf("%s: GetTaskID(ctx) returned id == %q, want %q", tc.desc, id, tc.msg.ID)
} }
retried, ok := GetRetryCount(ctx) retried, ok := GetRetryCount(ctx)

View File

@@ -170,6 +170,9 @@ var (
// ErrDuplicateTask indicates that another task with the same unique key holds the uniqueness lock. // ErrDuplicateTask indicates that another task with the same unique key holds the uniqueness lock.
ErrDuplicateTask = errors.New("task already exists") ErrDuplicateTask = errors.New("task already exists")
// ErrTaskIdConflict indicates that another task with the same task ID already exist
ErrTaskIdConflict = errors.New("task id conflicts with another task")
) )
// TaskNotFoundError indicates that a task with the given ID does not exist // TaskNotFoundError indicates that a task with the given ID does not exist

View File

@@ -5,7 +5,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT. // Code generated by protoc-gen-go. DO NOT EDIT.
// versions: // versions:
// protoc-gen-go v1.25.0 // protoc-gen-go v1.25.0
// protoc v3.14.0 // protoc v3.17.3
// source: asynq.proto // source: asynq.proto
package proto package proto
@@ -32,6 +32,7 @@ const _ = proto.ProtoPackageIsVersion4
// TaskMessage is the internal representation of a task with additional // TaskMessage is the internal representation of a task with additional
// metadata fields. // metadata fields.
// Next ID: 15
type TaskMessage struct { type TaskMessage struct {
state protoimpl.MessageState state protoimpl.MessageState
sizeCache protoimpl.SizeCache sizeCache protoimpl.SizeCache
@@ -65,6 +66,17 @@ type TaskMessage struct {
// UniqueKey holds the redis key used for uniqueness lock for this task. // UniqueKey holds the redis key used for uniqueness lock for this task.
// Empty string indicates that no uniqueness lock was used. // Empty string indicates that no uniqueness lock was used.
UniqueKey string `protobuf:"bytes,10,opt,name=unique_key,json=uniqueKey,proto3" json:"unique_key,omitempty"` UniqueKey string `protobuf:"bytes,10,opt,name=unique_key,json=uniqueKey,proto3" json:"unique_key,omitempty"`
// GroupKey is a name of the group used for task aggregation.
// This field is optional and empty value means no aggregation for the task.
GroupKey string `protobuf:"bytes,14,opt,name=group_key,json=groupKey,proto3" json:"group_key,omitempty"`
// Retention period specified in a number of seconds.
// The task will be stored in redis as a completed task until the TTL
// expires.
Retention int64 `protobuf:"varint,12,opt,name=retention,proto3" json:"retention,omitempty"`
// Time when the task completed in success in Unix time,
// the number of seconds elapsed since January 1, 1970 UTC.
// This field is populated if result_ttl > 0 upon completion.
CompletedAt int64 `protobuf:"varint,13,opt,name=completed_at,json=completedAt,proto3" json:"completed_at,omitempty"`
} }
func (x *TaskMessage) Reset() { func (x *TaskMessage) Reset() {
@@ -176,6 +188,27 @@ func (x *TaskMessage) GetUniqueKey() string {
return "" return ""
} }
func (x *TaskMessage) GetGroupKey() string {
if x != nil {
return x.GroupKey
}
return ""
}
func (x *TaskMessage) GetRetention() int64 {
if x != nil {
return x.Retention
}
return 0
}
func (x *TaskMessage) GetCompletedAt() int64 {
if x != nil {
return x.CompletedAt
}
return 0
}
// ServerInfo holds information about a running server. // ServerInfo holds information about a running server.
type ServerInfo struct { type ServerInfo struct {
state protoimpl.MessageState state protoimpl.MessageState
@@ -592,7 +625,7 @@ var file_asynq_proto_rawDesc = []byte{
0x0a, 0x0b, 0x61, 0x73, 0x79, 0x6e, 0x71, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x61, 0x0a, 0x0b, 0x61, 0x73, 0x79, 0x6e, 0x71, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x61,
0x73, 0x79, 0x6e, 0x71, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x73, 0x79, 0x6e, 0x71, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa9, 0x02, 0x0a, 0x0b, 0x54, 0x61, 0x73, 0x6b, 0x4d, 0x65, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x87, 0x03, 0x0a, 0x0b, 0x54, 0x61, 0x73, 0x6b, 0x4d, 0x65,
0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20,
0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79,
0x6c, 0x6f, 0x61, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c,
@@ -611,80 +644,86 @@ var file_asynq_proto_rawDesc = []byte{
0x6e, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69,
0x6e, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x6e, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x5f, 0x6b, 0x65, 0x79,
0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x4b, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x4b, 0x65,
0x79, 0x22, 0x8f, 0x03, 0x0a, 0x0a, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x0e,
0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x4b, 0x65, 0x79, 0x12, 0x1c,
0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0c, 0x20, 0x01, 0x28,
0x03, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x0c,
0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x0d, 0x20, 0x01,
0x28, 0x03, 0x52, 0x0b, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x41, 0x74, 0x22,
0x8f, 0x03, 0x0a, 0x0a, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x12,
0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x6f,
0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52,
0x03, 0x70, 0x69, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x69,
0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x49,
0x64, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79,
0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65,
0x6e, 0x63, 0x79, 0x12, 0x35, 0x0a, 0x06, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x18, 0x05, 0x20,
0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x61, 0x73, 0x79, 0x6e, 0x71, 0x2e, 0x53, 0x65, 0x72, 0x76,
0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x51, 0x75, 0x65, 0x75, 0x65, 0x73, 0x45, 0x6e, 0x74,
0x72, 0x79, 0x52, 0x06, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x74,
0x72, 0x69, 0x63, 0x74, 0x5f, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x06, 0x20,
0x01, 0x28, 0x08, 0x52, 0x0e, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x50, 0x72, 0x69, 0x6f, 0x72,
0x69, 0x74, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x07, 0x20,
0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x39, 0x0a, 0x0a, 0x73,
0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32,
0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, 0x61,
0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x13, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65,
0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x09, 0x20,
0x01, 0x28, 0x05, 0x52, 0x11, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x65,
0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x1a, 0x39, 0x0a, 0x0b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x73,
0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01,
0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
0x01, 0x22, 0xb1, 0x02, 0x0a, 0x0a, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f,
0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
0x68, 0x6f, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28,
0x05, 0x52, 0x03, 0x70, 0x69, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x05, 0x52, 0x03, 0x70, 0x69, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72,
0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x72, 0x76, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x72, 0x76, 0x65,
0x72, 0x49, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x72, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x04,
0x63, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09,
0x72, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x35, 0x0a, 0x06, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x18, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52,
0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x61, 0x73, 0x79, 0x6e, 0x71, 0x2e, 0x53, 0x65, 0x08, 0x74, 0x61, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x61, 0x73,
0x72, 0x76, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x51, 0x75, 0x65, 0x75, 0x65, 0x73, 0x45, 0x6b, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52,
0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x0b, 0x74, 0x61, 0x73, 0x6b, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x14, 0x0a, 0x05,
0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x5f, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x71, 0x75, 0x65, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65,
0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x50, 0x72, 0x69, 0x75, 0x65, 0x12, 0x39, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65,
0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x39, 0x0a, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x36, 0x0a,
0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32,
0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73,
0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x13, 0x61, 0x63, 0x74, 0x69,
0x76, 0x65, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18,
0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x11, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x57, 0x6f, 0x72,
0x6b, 0x65, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x1a, 0x39, 0x0a, 0x0b, 0x51, 0x75, 0x65, 0x75,
0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01,
0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c,
0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a,
0x02, 0x38, 0x01, 0x22, 0xb1, 0x02, 0x0a, 0x0a, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x6e,
0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x02, 0x20,
0x01, 0x28, 0x05, 0x52, 0x03, 0x70, 0x69, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x65, 0x72, 0x76,
0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x72,
0x76, 0x65, 0x72, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64,
0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12, 0x1b,
0x0a, 0x09, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28,
0x09, 0x52, 0x08, 0x74, 0x61, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x74,
0x61, 0x73, 0x6b, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28,
0x0c, 0x52, 0x0b, 0x74, 0x61, 0x73, 0x6b, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x14,
0x0a, 0x05, 0x71, 0x75, 0x65, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x71,
0x75, 0x65, 0x75, 0x65, 0x12, 0x39, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69,
0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73,
0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12,
0x36, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x64,
0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x22, 0xad, 0x02, 0x0a, 0x0e, 0x53, 0x63, 0x68, 0x65,
0x64, 0x75, 0x6c, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64,
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x70,
0x65, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x73, 0x70, 0x65, 0x63, 0x12, 0x1b,
0x0a, 0x09, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28,
0x09, 0x52, 0x08, 0x74, 0x61, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x74,
0x61, 0x73, 0x6b, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28,
0x0c, 0x52, 0x0b, 0x74, 0x61, 0x73, 0x6b, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x27,
0x0a, 0x0f, 0x65, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x65, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65,
0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x46, 0x0a, 0x11, 0x6e, 0x65, 0x78, 0x74, 0x5f,
0x65, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01,
0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0f,
0x6e, 0x65, 0x78, 0x74, 0x45, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12,
0x46, 0x0a, 0x11, 0x70, 0x72, 0x65, 0x76, 0x5f, 0x65, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x5f,
0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d,
0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0f, 0x70, 0x72, 0x65, 0x76, 0x45, 0x6e, 0x71, 0x75,
0x65, 0x75, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x22, 0x6f, 0x0a, 0x15, 0x53, 0x63, 0x68, 0x65, 0x64,
0x75, 0x6c, 0x65, 0x72, 0x45, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74,
0x12, 0x17, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
0x09, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12, 0x3d, 0x0a, 0x0c, 0x65, 0x6e, 0x71,
0x75, 0x65, 0x75, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x65, 0x6e, 0x71, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x64, 0x65, 0x61,
0x75, 0x65, 0x75, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x22, 0xad, 0x02, 0x0a, 0x0e, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75,
0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x69, 0x62, 0x69, 0x6b, 0x65, 0x6e, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01,
0x73, 0x79, 0x6e, 0x71, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x70, 0x65, 0x63,
0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x73, 0x70, 0x65, 0x63, 0x12, 0x1b, 0x0a, 0x09,
0x74, 0x61, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
0x08, 0x74, 0x61, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x61, 0x73,
0x6b, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52,
0x0b, 0x74, 0x61, 0x73, 0x6b, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x27, 0x0a, 0x0f,
0x65, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18,
0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x65, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x4f, 0x70,
0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x46, 0x0a, 0x11, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x65, 0x6e,
0x71, 0x75, 0x65, 0x75, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b,
0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0f, 0x6e, 0x65,
0x78, 0x74, 0x45, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x46, 0x0a,
0x11, 0x70, 0x72, 0x65, 0x76, 0x5f, 0x65, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x5f, 0x74, 0x69,
0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73,
0x74, 0x61, 0x6d, 0x70, 0x52, 0x0f, 0x70, 0x72, 0x65, 0x76, 0x45, 0x6e, 0x71, 0x75, 0x65, 0x75,
0x65, 0x54, 0x69, 0x6d, 0x65, 0x22, 0x6f, 0x0a, 0x15, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c,
0x65, 0x72, 0x45, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x17,
0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12, 0x3d, 0x0a, 0x0c, 0x65, 0x6e, 0x71, 0x75, 0x65,
0x75, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x65, 0x6e, 0x71, 0x75, 0x65,
0x75, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62,
0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x69, 0x62, 0x69, 0x6b, 0x65, 0x6e, 0x2f, 0x61, 0x73, 0x79,
0x6e, 0x71, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
} }
var ( var (

View File

@@ -11,6 +11,7 @@ option go_package = "github.com/hibiken/asynq/internal/proto";
// TaskMessage is the internal representation of a task with additional // TaskMessage is the internal representation of a task with additional
// metadata fields. // metadata fields.
// Next ID: 15
message TaskMessage { message TaskMessage {
// Type indicates the kind of the task to be performed. // Type indicates the kind of the task to be performed.
string type = 1; string type = 1;
@@ -51,6 +52,19 @@ message TaskMessage {
// Empty string indicates that no uniqueness lock was used. // Empty string indicates that no uniqueness lock was used.
string unique_key = 10; string unique_key = 10;
// GroupKey is a name of the group used for task aggregation.
// This field is optional and empty value means no aggregation for the task.
string group_key = 14;
// Retention period specified in a number of seconds.
// The task will be stored in redis as a completed task until the TTL
// expires.
int64 retention = 12;
// Time when the task completed in success in Unix time,
// the number of seconds elapsed since January 1, 1970 UTC.
// This field is populated if result_ttl > 0 upon completion.
int64 completed_at = 13;
}; };
// ServerInfo holds information about a running server. // ServerInfo holds information about a running server.

View File

@@ -5,25 +5,27 @@
package rdb package rdb
import ( import (
"context"
"fmt" "fmt"
"testing" "testing"
"time" "time"
"github.com/hibiken/asynq/internal/asynqtest"
"github.com/hibiken/asynq/internal/base" "github.com/hibiken/asynq/internal/base"
"github.com/hibiken/asynq/internal/testutil"
) )
func BenchmarkEnqueue(b *testing.B) { func BenchmarkEnqueue(b *testing.B) {
r := setup(b) r := setup(b)
msg := asynqtest.NewTaskMessage("task1", nil) ctx := context.Background()
msg := testutil.NewTaskMessage("task1", nil)
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
b.StopTimer() b.StopTimer()
asynqtest.FlushDB(b, r.client) testutil.FlushDB(b, r.client)
b.StartTimer() b.StartTimer()
if err := r.Enqueue(msg); err != nil { if err := r.Enqueue(ctx, msg); err != nil {
b.Fatalf("Enqueue failed: %v", err) b.Fatalf("Enqueue failed: %v", err)
} }
} }
@@ -31,6 +33,7 @@ func BenchmarkEnqueue(b *testing.B) {
func BenchmarkEnqueueUnique(b *testing.B) { func BenchmarkEnqueueUnique(b *testing.B) {
r := setup(b) r := setup(b)
ctx := context.Background()
msg := &base.TaskMessage{ msg := &base.TaskMessage{
Type: "task1", Type: "task1",
Payload: nil, Payload: nil,
@@ -42,10 +45,10 @@ func BenchmarkEnqueueUnique(b *testing.B) {
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
b.StopTimer() b.StopTimer()
asynqtest.FlushDB(b, r.client) testutil.FlushDB(b, r.client)
b.StartTimer() b.StartTimer()
if err := r.EnqueueUnique(msg, uniqueTTL); err != nil { if err := r.EnqueueUnique(ctx, msg, uniqueTTL); err != nil {
b.Fatalf("EnqueueUnique failed: %v", err) b.Fatalf("EnqueueUnique failed: %v", err)
} }
} }
@@ -53,16 +56,17 @@ func BenchmarkEnqueueUnique(b *testing.B) {
func BenchmarkSchedule(b *testing.B) { func BenchmarkSchedule(b *testing.B) {
r := setup(b) r := setup(b)
msg := asynqtest.NewTaskMessage("task1", nil) ctx := context.Background()
msg := testutil.NewTaskMessage("task1", nil)
processAt := time.Now().Add(3 * time.Minute) processAt := time.Now().Add(3 * time.Minute)
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
b.StopTimer() b.StopTimer()
asynqtest.FlushDB(b, r.client) testutil.FlushDB(b, r.client)
b.StartTimer() b.StartTimer()
if err := r.Schedule(msg, processAt); err != nil { if err := r.Schedule(ctx, msg, processAt); err != nil {
b.Fatalf("Schedule failed: %v", err) b.Fatalf("Schedule failed: %v", err)
} }
} }
@@ -70,6 +74,7 @@ func BenchmarkSchedule(b *testing.B) {
func BenchmarkScheduleUnique(b *testing.B) { func BenchmarkScheduleUnique(b *testing.B) {
r := setup(b) r := setup(b)
ctx := context.Background()
msg := &base.TaskMessage{ msg := &base.TaskMessage{
Type: "task1", Type: "task1",
Payload: nil, Payload: nil,
@@ -82,10 +87,10 @@ func BenchmarkScheduleUnique(b *testing.B) {
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
b.StopTimer() b.StopTimer()
asynqtest.FlushDB(b, r.client) testutil.FlushDB(b, r.client)
b.StartTimer() b.StartTimer()
if err := r.ScheduleUnique(msg, processAt, uniqueTTL); err != nil { if err := r.ScheduleUnique(ctx, msg, processAt, uniqueTTL); err != nil {
b.Fatalf("EnqueueUnique failed: %v", err) b.Fatalf("EnqueueUnique failed: %v", err)
} }
} }
@@ -93,15 +98,16 @@ func BenchmarkScheduleUnique(b *testing.B) {
func BenchmarkDequeueSingleQueue(b *testing.B) { func BenchmarkDequeueSingleQueue(b *testing.B) {
r := setup(b) r := setup(b)
ctx := context.Background()
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
b.StopTimer() b.StopTimer()
asynqtest.FlushDB(b, r.client) testutil.FlushDB(b, r.client)
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
m := asynqtest.NewTaskMessageWithQueue( m := testutil.NewTaskMessageWithQueue(
fmt.Sprintf("task%d", i), nil, base.DefaultQueueName) fmt.Sprintf("task%d", i), nil, base.DefaultQueueName)
if err := r.Enqueue(m); err != nil { if err := r.Enqueue(ctx, m); err != nil {
b.Fatalf("Enqueue failed: %v", err) b.Fatalf("Enqueue failed: %v", err)
} }
} }
@@ -116,16 +122,17 @@ func BenchmarkDequeueSingleQueue(b *testing.B) {
func BenchmarkDequeueMultipleQueues(b *testing.B) { func BenchmarkDequeueMultipleQueues(b *testing.B) {
qnames := []string{"critical", "default", "low"} qnames := []string{"critical", "default", "low"}
r := setup(b) r := setup(b)
ctx := context.Background()
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
b.StopTimer() b.StopTimer()
asynqtest.FlushDB(b, r.client) testutil.FlushDB(b, r.client)
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
for _, qname := range qnames { for _, qname := range qnames {
m := asynqtest.NewTaskMessageWithQueue( m := testutil.NewTaskMessageWithQueue(
fmt.Sprintf("%s_task%d", qname, i), nil, qname) fmt.Sprintf("%s_task%d", qname, i), nil, qname)
if err := r.Enqueue(m); err != nil { if err := r.Enqueue(ctx, m); err != nil {
b.Fatalf("Enqueue failed: %v", err) b.Fatalf("Enqueue failed: %v", err)
} }
} }
@@ -140,25 +147,26 @@ func BenchmarkDequeueMultipleQueues(b *testing.B) {
func BenchmarkDone(b *testing.B) { func BenchmarkDone(b *testing.B) {
r := setup(b) r := setup(b)
m1 := asynqtest.NewTaskMessage("task1", nil) m1 := testutil.NewTaskMessage("task1", nil)
m2 := asynqtest.NewTaskMessage("task2", nil) m2 := testutil.NewTaskMessage("task2", nil)
m3 := asynqtest.NewTaskMessage("task3", nil) m3 := testutil.NewTaskMessage("task3", nil)
msgs := []*base.TaskMessage{m1, m2, m3} msgs := []*base.TaskMessage{m1, m2, m3}
zs := []base.Z{ zs := []base.Z{
{Message: m1, Score: time.Now().Add(10 * time.Second).Unix()}, {Message: m1, Score: time.Now().Add(10 * time.Second).Unix()},
{Message: m2, Score: time.Now().Add(20 * time.Second).Unix()}, {Message: m2, Score: time.Now().Add(20 * time.Second).Unix()},
{Message: m3, Score: time.Now().Add(30 * time.Second).Unix()}, {Message: m3, Score: time.Now().Add(30 * time.Second).Unix()},
} }
ctx := context.Background()
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
b.StopTimer() b.StopTimer()
asynqtest.FlushDB(b, r.client) testutil.FlushDB(b, r.client)
asynqtest.SeedActiveQueue(b, r.client, msgs, base.DefaultQueueName) testutil.SeedActiveQueue(b, r.client, msgs, base.DefaultQueueName)
asynqtest.SeedDeadlines(b, r.client, zs, base.DefaultQueueName) testutil.SeedLease(b, r.client, zs, base.DefaultQueueName)
b.StartTimer() b.StartTimer()
if err := r.Done(msgs[0]); err != nil { if err := r.Done(ctx, msgs[0]); err != nil {
b.Fatalf("Done failed: %v", err) b.Fatalf("Done failed: %v", err)
} }
} }
@@ -166,25 +174,26 @@ func BenchmarkDone(b *testing.B) {
func BenchmarkRetry(b *testing.B) { func BenchmarkRetry(b *testing.B) {
r := setup(b) r := setup(b)
m1 := asynqtest.NewTaskMessage("task1", nil) m1 := testutil.NewTaskMessage("task1", nil)
m2 := asynqtest.NewTaskMessage("task2", nil) m2 := testutil.NewTaskMessage("task2", nil)
m3 := asynqtest.NewTaskMessage("task3", nil) m3 := testutil.NewTaskMessage("task3", nil)
msgs := []*base.TaskMessage{m1, m2, m3} msgs := []*base.TaskMessage{m1, m2, m3}
zs := []base.Z{ zs := []base.Z{
{Message: m1, Score: time.Now().Add(10 * time.Second).Unix()}, {Message: m1, Score: time.Now().Add(10 * time.Second).Unix()},
{Message: m2, Score: time.Now().Add(20 * time.Second).Unix()}, {Message: m2, Score: time.Now().Add(20 * time.Second).Unix()},
{Message: m3, Score: time.Now().Add(30 * time.Second).Unix()}, {Message: m3, Score: time.Now().Add(30 * time.Second).Unix()},
} }
ctx := context.Background()
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
b.StopTimer() b.StopTimer()
asynqtest.FlushDB(b, r.client) testutil.FlushDB(b, r.client)
asynqtest.SeedActiveQueue(b, r.client, msgs, base.DefaultQueueName) testutil.SeedActiveQueue(b, r.client, msgs, base.DefaultQueueName)
asynqtest.SeedDeadlines(b, r.client, zs, base.DefaultQueueName) testutil.SeedLease(b, r.client, zs, base.DefaultQueueName)
b.StartTimer() b.StartTimer()
if err := r.Retry(msgs[0], time.Now().Add(1*time.Minute), "error"); err != nil { if err := r.Retry(ctx, msgs[0], time.Now().Add(1*time.Minute), "error", true /*isFailure*/); err != nil {
b.Fatalf("Retry failed: %v", err) b.Fatalf("Retry failed: %v", err)
} }
} }
@@ -192,25 +201,26 @@ func BenchmarkRetry(b *testing.B) {
func BenchmarkArchive(b *testing.B) { func BenchmarkArchive(b *testing.B) {
r := setup(b) r := setup(b)
m1 := asynqtest.NewTaskMessage("task1", nil) m1 := testutil.NewTaskMessage("task1", nil)
m2 := asynqtest.NewTaskMessage("task2", nil) m2 := testutil.NewTaskMessage("task2", nil)
m3 := asynqtest.NewTaskMessage("task3", nil) m3 := testutil.NewTaskMessage("task3", nil)
msgs := []*base.TaskMessage{m1, m2, m3} msgs := []*base.TaskMessage{m1, m2, m3}
zs := []base.Z{ zs := []base.Z{
{Message: m1, Score: time.Now().Add(10 * time.Second).Unix()}, {Message: m1, Score: time.Now().Add(10 * time.Second).Unix()},
{Message: m2, Score: time.Now().Add(20 * time.Second).Unix()}, {Message: m2, Score: time.Now().Add(20 * time.Second).Unix()},
{Message: m3, Score: time.Now().Add(30 * time.Second).Unix()}, {Message: m3, Score: time.Now().Add(30 * time.Second).Unix()},
} }
ctx := context.Background()
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
b.StopTimer() b.StopTimer()
asynqtest.FlushDB(b, r.client) testutil.FlushDB(b, r.client)
asynqtest.SeedActiveQueue(b, r.client, msgs, base.DefaultQueueName) testutil.SeedActiveQueue(b, r.client, msgs, base.DefaultQueueName)
asynqtest.SeedDeadlines(b, r.client, zs, base.DefaultQueueName) testutil.SeedLease(b, r.client, zs, base.DefaultQueueName)
b.StartTimer() b.StartTimer()
if err := r.Archive(msgs[0], "error"); err != nil { if err := r.Archive(ctx, msgs[0], "error"); err != nil {
b.Fatalf("Archive failed: %v", err) b.Fatalf("Archive failed: %v", err)
} }
} }
@@ -218,25 +228,26 @@ func BenchmarkArchive(b *testing.B) {
func BenchmarkRequeue(b *testing.B) { func BenchmarkRequeue(b *testing.B) {
r := setup(b) r := setup(b)
m1 := asynqtest.NewTaskMessage("task1", nil) m1 := testutil.NewTaskMessage("task1", nil)
m2 := asynqtest.NewTaskMessage("task2", nil) m2 := testutil.NewTaskMessage("task2", nil)
m3 := asynqtest.NewTaskMessage("task3", nil) m3 := testutil.NewTaskMessage("task3", nil)
msgs := []*base.TaskMessage{m1, m2, m3} msgs := []*base.TaskMessage{m1, m2, m3}
zs := []base.Z{ zs := []base.Z{
{Message: m1, Score: time.Now().Add(10 * time.Second).Unix()}, {Message: m1, Score: time.Now().Add(10 * time.Second).Unix()},
{Message: m2, Score: time.Now().Add(20 * time.Second).Unix()}, {Message: m2, Score: time.Now().Add(20 * time.Second).Unix()},
{Message: m3, Score: time.Now().Add(30 * time.Second).Unix()}, {Message: m3, Score: time.Now().Add(30 * time.Second).Unix()},
} }
ctx := context.Background()
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
b.StopTimer() b.StopTimer()
asynqtest.FlushDB(b, r.client) testutil.FlushDB(b, r.client)
asynqtest.SeedActiveQueue(b, r.client, msgs, base.DefaultQueueName) testutil.SeedActiveQueue(b, r.client, msgs, base.DefaultQueueName)
asynqtest.SeedDeadlines(b, r.client, zs, base.DefaultQueueName) testutil.SeedLease(b, r.client, zs, base.DefaultQueueName)
b.StartTimer() b.StartTimer()
if err := r.Requeue(msgs[0]); err != nil { if err := r.Requeue(ctx, msgs[0]); err != nil {
b.Fatalf("Requeue failed: %v", err) b.Fatalf("Requeue failed: %v", err)
} }
} }
@@ -247,7 +258,7 @@ func BenchmarkCheckAndEnqueue(b *testing.B) {
now := time.Now() now := time.Now()
var zs []base.Z var zs []base.Z
for i := -100; i < 100; i++ { for i := -100; i < 100; i++ {
msg := asynqtest.NewTaskMessage(fmt.Sprintf("task%d", i), nil) msg := testutil.NewTaskMessage(fmt.Sprintf("task%d", i), nil)
score := now.Add(time.Duration(i) * time.Second).Unix() score := now.Add(time.Duration(i) * time.Second).Unix()
zs = append(zs, base.Z{Message: msg, Score: score}) zs = append(zs, base.Z{Message: msg, Score: score})
} }
@@ -255,8 +266,8 @@ func BenchmarkCheckAndEnqueue(b *testing.B) {
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
b.StopTimer() b.StopTimer()
asynqtest.FlushDB(b, r.client) testutil.FlushDB(b, r.client)
asynqtest.SeedScheduledQueue(b, r.client, zs, base.DefaultQueueName) testutil.SeedScheduledQueue(b, r.client, zs, base.DefaultQueueName)
b.StartTimer() b.StartTimer()
if err := r.ForwardIfReady(base.DefaultQueueName); err != nil { if err := r.ForwardIfReady(base.DefaultQueueName); err != nil {

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -6,15 +6,16 @@
package testbroker package testbroker
import ( import (
"context"
"errors" "errors"
"sync" "sync"
"time" "time"
"github.com/go-redis/redis/v7" "github.com/go-redis/redis/v8"
"github.com/hibiken/asynq/internal/base" "github.com/hibiken/asynq/internal/base"
) )
var errRedisDown = errors.New("asynqtest: redis is down") var errRedisDown = errors.New("testutil: redis is down")
// TestBroker is a broker implementation which enables // TestBroker is a broker implementation which enables
// to simulate Redis failure in tests. // to simulate Redis failure in tests.
@@ -45,22 +46,22 @@ func (tb *TestBroker) Wakeup() {
tb.sleeping = false tb.sleeping = false
} }
func (tb *TestBroker) Enqueue(msg *base.TaskMessage) error { func (tb *TestBroker) Enqueue(ctx context.Context, msg *base.TaskMessage) error {
tb.mu.Lock() tb.mu.Lock()
defer tb.mu.Unlock() defer tb.mu.Unlock()
if tb.sleeping { if tb.sleeping {
return errRedisDown return errRedisDown
} }
return tb.real.Enqueue(msg) return tb.real.Enqueue(ctx, msg)
} }
func (tb *TestBroker) EnqueueUnique(msg *base.TaskMessage, ttl time.Duration) error { func (tb *TestBroker) EnqueueUnique(ctx context.Context, msg *base.TaskMessage, ttl time.Duration) error {
tb.mu.Lock() tb.mu.Lock()
defer tb.mu.Unlock() defer tb.mu.Unlock()
if tb.sleeping { if tb.sleeping {
return errRedisDown return errRedisDown
} }
return tb.real.EnqueueUnique(msg, ttl) return tb.real.EnqueueUnique(ctx, msg, ttl)
} }
func (tb *TestBroker) Dequeue(qnames ...string) (*base.TaskMessage, time.Time, error) { func (tb *TestBroker) Dequeue(qnames ...string) (*base.TaskMessage, time.Time, error) {
@@ -72,58 +73,67 @@ func (tb *TestBroker) Dequeue(qnames ...string) (*base.TaskMessage, time.Time, e
return tb.real.Dequeue(qnames...) return tb.real.Dequeue(qnames...)
} }
func (tb *TestBroker) Done(msg *base.TaskMessage) error { func (tb *TestBroker) Done(ctx context.Context, msg *base.TaskMessage) error {
tb.mu.Lock() tb.mu.Lock()
defer tb.mu.Unlock() defer tb.mu.Unlock()
if tb.sleeping { if tb.sleeping {
return errRedisDown return errRedisDown
} }
return tb.real.Done(msg) return tb.real.Done(ctx, msg)
} }
func (tb *TestBroker) Requeue(msg *base.TaskMessage) error { func (tb *TestBroker) MarkAsComplete(ctx context.Context, msg *base.TaskMessage) error {
tb.mu.Lock() tb.mu.Lock()
defer tb.mu.Unlock() defer tb.mu.Unlock()
if tb.sleeping { if tb.sleeping {
return errRedisDown return errRedisDown
} }
return tb.real.Requeue(msg) return tb.real.MarkAsComplete(ctx, msg)
} }
func (tb *TestBroker) Schedule(msg *base.TaskMessage, processAt time.Time) error { func (tb *TestBroker) Requeue(ctx context.Context, msg *base.TaskMessage) error {
tb.mu.Lock() tb.mu.Lock()
defer tb.mu.Unlock() defer tb.mu.Unlock()
if tb.sleeping { if tb.sleeping {
return errRedisDown return errRedisDown
} }
return tb.real.Schedule(msg, processAt) return tb.real.Requeue(ctx, msg)
} }
func (tb *TestBroker) ScheduleUnique(msg *base.TaskMessage, processAt time.Time, ttl time.Duration) error { func (tb *TestBroker) Schedule(ctx context.Context, msg *base.TaskMessage, processAt time.Time) error {
tb.mu.Lock() tb.mu.Lock()
defer tb.mu.Unlock() defer tb.mu.Unlock()
if tb.sleeping { if tb.sleeping {
return errRedisDown return errRedisDown
} }
return tb.real.ScheduleUnique(msg, processAt, ttl) return tb.real.Schedule(ctx, msg, processAt)
} }
func (tb *TestBroker) Retry(msg *base.TaskMessage, processAt time.Time, errMsg string) error { func (tb *TestBroker) ScheduleUnique(ctx context.Context, msg *base.TaskMessage, processAt time.Time, ttl time.Duration) error {
tb.mu.Lock() tb.mu.Lock()
defer tb.mu.Unlock() defer tb.mu.Unlock()
if tb.sleeping { if tb.sleeping {
return errRedisDown return errRedisDown
} }
return tb.real.Retry(msg, processAt, errMsg) return tb.real.ScheduleUnique(ctx, msg, processAt, ttl)
} }
func (tb *TestBroker) Archive(msg *base.TaskMessage, errMsg string) error { func (tb *TestBroker) Retry(ctx context.Context, msg *base.TaskMessage, processAt time.Time, errMsg string, isFailure bool) error {
tb.mu.Lock() tb.mu.Lock()
defer tb.mu.Unlock() defer tb.mu.Unlock()
if tb.sleeping { if tb.sleeping {
return errRedisDown return errRedisDown
} }
return tb.real.Archive(msg, errMsg) return tb.real.Retry(ctx, msg, processAt, errMsg, isFailure)
}
func (tb *TestBroker) Archive(ctx context.Context, msg *base.TaskMessage, errMsg string) error {
tb.mu.Lock()
defer tb.mu.Unlock()
if tb.sleeping {
return errRedisDown
}
return tb.real.Archive(ctx, msg, errMsg)
} }
func (tb *TestBroker) ForwardIfReady(qnames ...string) error { func (tb *TestBroker) ForwardIfReady(qnames ...string) error {
@@ -135,13 +145,31 @@ func (tb *TestBroker) ForwardIfReady(qnames ...string) error {
return tb.real.ForwardIfReady(qnames...) return tb.real.ForwardIfReady(qnames...)
} }
func (tb *TestBroker) ListDeadlineExceeded(deadline time.Time, qnames ...string) ([]*base.TaskMessage, error) { func (tb *TestBroker) DeleteExpiredCompletedTasks(qname string) error {
tb.mu.Lock()
defer tb.mu.Unlock()
if tb.sleeping {
return errRedisDown
}
return tb.real.DeleteExpiredCompletedTasks(qname)
}
func (tb *TestBroker) ListLeaseExpired(cutoff time.Time, qnames ...string) ([]*base.TaskMessage, error) {
tb.mu.Lock() tb.mu.Lock()
defer tb.mu.Unlock() defer tb.mu.Unlock()
if tb.sleeping { if tb.sleeping {
return nil, errRedisDown return nil, errRedisDown
} }
return tb.real.ListDeadlineExceeded(deadline, qnames...) return tb.real.ListLeaseExpired(cutoff, qnames...)
}
func (tb *TestBroker) ExtendLease(qname string, ids ...string) (time.Time, error) {
tb.mu.Lock()
defer tb.mu.Unlock()
if tb.sleeping {
return time.Time{}, errRedisDown
}
return tb.real.ExtendLease(qname, ids...)
} }
func (tb *TestBroker) WriteServerState(info *base.ServerInfo, workers []*base.WorkerInfo, ttl time.Duration) error { func (tb *TestBroker) WriteServerState(info *base.ServerInfo, workers []*base.WorkerInfo, ttl time.Duration) error {
@@ -180,6 +208,15 @@ func (tb *TestBroker) PublishCancelation(id string) error {
return tb.real.PublishCancelation(id) return tb.real.PublishCancelation(id)
} }
func (tb *TestBroker) WriteResult(qname, id string, data []byte) (int, error) {
tb.mu.Lock()
defer tb.mu.Unlock()
if tb.sleeping {
return 0, errRedisDown
}
return tb.real.WriteResult(qname, id, data)
}
func (tb *TestBroker) Ping() error { func (tb *TestBroker) Ping() error {
tb.mu.Lock() tb.mu.Lock()
defer tb.mu.Unlock() defer tb.mu.Unlock()
@@ -197,3 +234,66 @@ func (tb *TestBroker) Close() error {
} }
return tb.real.Close() return tb.real.Close()
} }
func (tb *TestBroker) AddToGroup(ctx context.Context, msg *base.TaskMessage, gname string) error {
tb.mu.Lock()
defer tb.mu.Unlock()
if tb.sleeping {
return errRedisDown
}
return tb.real.AddToGroup(ctx, msg, gname)
}
func (tb *TestBroker) AddToGroupUnique(ctx context.Context, msg *base.TaskMessage, gname string, ttl time.Duration) error {
tb.mu.Lock()
defer tb.mu.Unlock()
if tb.sleeping {
return errRedisDown
}
return tb.real.AddToGroupUnique(ctx, msg, gname, ttl)
}
func (tb *TestBroker) ListGroups(qname string) ([]string, error) {
tb.mu.Lock()
defer tb.mu.Unlock()
if tb.sleeping {
return nil, errRedisDown
}
return tb.real.ListGroups(qname)
}
func (tb *TestBroker) AggregationCheck(qname, gname string, t time.Time, gracePeriod, maxDelay time.Duration, maxSize int) (aggregationSetID string, err error) {
tb.mu.Lock()
defer tb.mu.Unlock()
if tb.sleeping {
return "", errRedisDown
}
return tb.real.AggregationCheck(qname, gname, t, gracePeriod, maxDelay, maxSize)
}
func (tb *TestBroker) ReadAggregationSet(qname, gname, aggregationSetID string) ([]*base.TaskMessage, time.Time, error) {
tb.mu.Lock()
defer tb.mu.Unlock()
if tb.sleeping {
return nil, time.Time{}, errRedisDown
}
return tb.real.ReadAggregationSet(qname, gname, aggregationSetID)
}
func (tb *TestBroker) DeleteAggregationSet(ctx context.Context, qname, gname, aggregationSetID string) error {
tb.mu.Lock()
defer tb.mu.Unlock()
if tb.sleeping {
return errRedisDown
}
return tb.real.DeleteAggregationSet(ctx, qname, gname, aggregationSetID)
}
func (tb *TestBroker) ReclaimStaleAggregationSets(qname string) error {
tb.mu.Lock()
defer tb.mu.Unlock()
if tb.sleeping {
return errRedisDown
}
return tb.real.ReclaimStaleAggregationSets(qname)
}

View File

@@ -0,0 +1,84 @@
// Copyright 2022 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
package testutil
import (
"time"
"github.com/google/uuid"
"github.com/hibiken/asynq/internal/base"
)
func makeDefaultTaskMessage() *base.TaskMessage {
return &base.TaskMessage{
ID: uuid.NewString(),
Type: "default_task",
Queue: "default",
Retry: 25,
Timeout: 1800, // default timeout of 30 mins
Deadline: 0, // no deadline
}
}
type TaskMessageBuilder struct {
msg *base.TaskMessage
}
func NewTaskMessageBuilder() *TaskMessageBuilder {
return &TaskMessageBuilder{}
}
func (b *TaskMessageBuilder) lazyInit() {
if b.msg == nil {
b.msg = makeDefaultTaskMessage()
}
}
func (b *TaskMessageBuilder) Build() *base.TaskMessage {
b.lazyInit()
return b.msg
}
func (b *TaskMessageBuilder) SetType(typename string) *TaskMessageBuilder {
b.lazyInit()
b.msg.Type = typename
return b
}
func (b *TaskMessageBuilder) SetPayload(payload []byte) *TaskMessageBuilder {
b.lazyInit()
b.msg.Payload = payload
return b
}
func (b *TaskMessageBuilder) SetQueue(qname string) *TaskMessageBuilder {
b.lazyInit()
b.msg.Queue = qname
return b
}
func (b *TaskMessageBuilder) SetRetry(n int) *TaskMessageBuilder {
b.lazyInit()
b.msg.Retry = n
return b
}
func (b *TaskMessageBuilder) SetTimeout(timeout time.Duration) *TaskMessageBuilder {
b.lazyInit()
b.msg.Timeout = int64(timeout.Seconds())
return b
}
func (b *TaskMessageBuilder) SetDeadline(deadline time.Time) *TaskMessageBuilder {
b.lazyInit()
b.msg.Deadline = deadline.Unix()
return b
}
func (b *TaskMessageBuilder) SetGroup(gname string) *TaskMessageBuilder {
b.lazyInit()
b.msg.GroupKey = gname
return b
}

View File

@@ -0,0 +1,94 @@
// Copyright 2022 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
package testutil
import (
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/hibiken/asynq/internal/base"
)
func TestTaskMessageBuilder(t *testing.T) {
tests := []struct {
desc string
ops func(b *TaskMessageBuilder) // operations to perform on the builder
want *base.TaskMessage
}{
{
desc: "zero value and build",
ops: nil,
want: &base.TaskMessage{
Type: "default_task",
Queue: "default",
Payload: nil,
Retry: 25,
Timeout: 1800, // 30m
Deadline: 0,
},
},
{
desc: "with type, payload, and queue",
ops: func(b *TaskMessageBuilder) {
b.SetType("foo").SetPayload([]byte("hello")).SetQueue("myqueue")
},
want: &base.TaskMessage{
Type: "foo",
Queue: "myqueue",
Payload: []byte("hello"),
Retry: 25,
Timeout: 1800, // 30m
Deadline: 0,
},
},
{
desc: "with retry, timeout, and deadline",
ops: func(b *TaskMessageBuilder) {
b.SetRetry(1).
SetTimeout(20 * time.Second).
SetDeadline(time.Date(2017, 3, 6, 0, 0, 0, 0, time.UTC))
},
want: &base.TaskMessage{
Type: "default_task",
Queue: "default",
Payload: nil,
Retry: 1,
Timeout: 20,
Deadline: time.Date(2017, 3, 6, 0, 0, 0, 0, time.UTC).Unix(),
},
},
{
desc: "with group",
ops: func(b *TaskMessageBuilder) {
b.SetGroup("mygroup")
},
want: &base.TaskMessage{
Type: "default_task",
Queue: "default",
Payload: nil,
Retry: 25,
Timeout: 1800,
Deadline: 0,
GroupKey: "mygroup",
},
},
}
cmpOpts := []cmp.Option{cmpopts.IgnoreFields(base.TaskMessage{}, "ID")}
for _, tc := range tests {
var b TaskMessageBuilder
if tc.ops != nil {
tc.ops(&b)
}
got := b.Build()
if diff := cmp.Diff(tc.want, got, cmpOpts...); diff != "" {
t.Errorf("%s: TaskMessageBuilder.Build() = %+v, want %+v;\n(-want,+got)\n%s",
tc.desc, got, tc.want, diff)
}
}
}

View File

@@ -2,21 +2,23 @@
// Use of this source code is governed by a MIT license // Use of this source code is governed by a MIT license
// that can be found in the LICENSE file. // that can be found in the LICENSE file.
// Package asynqtest defines test helpers for asynq and its internal packages. // Package testutil defines test helpers for asynq and its internal packages.
package asynqtest package testutil
import ( import (
"context"
"encoding/json" "encoding/json"
"math" "math"
"sort" "sort"
"testing" "testing"
"time" "time"
"github.com/go-redis/redis/v7" "github.com/go-redis/redis/v8"
"github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts" "github.com/google/go-cmp/cmp/cmpopts"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/hibiken/asynq/internal/base" "github.com/hibiken/asynq/internal/base"
"github.com/hibiken/asynq/internal/timeutil"
) )
// EquateInt64Approx returns a Comparer option that treats int64 values // EquateInt64Approx returns a Comparer option that treats int64 values
@@ -31,7 +33,7 @@ func EquateInt64Approx(margin int64) cmp.Option {
var SortMsgOpt = cmp.Transformer("SortTaskMessages", func(in []*base.TaskMessage) []*base.TaskMessage { var SortMsgOpt = cmp.Transformer("SortTaskMessages", func(in []*base.TaskMessage) []*base.TaskMessage {
out := append([]*base.TaskMessage(nil), in...) // Copy input to avoid mutating it out := append([]*base.TaskMessage(nil), in...) // Copy input to avoid mutating it
sort.Slice(out, func(i, j int) bool { sort.Slice(out, func(i, j int) bool {
return out[i].ID.String() < out[j].ID.String() return out[i].ID < out[j].ID
}) })
return out return out
}) })
@@ -40,7 +42,7 @@ var SortMsgOpt = cmp.Transformer("SortTaskMessages", func(in []*base.TaskMessage
var SortZSetEntryOpt = cmp.Transformer("SortZSetEntries", func(in []base.Z) []base.Z { var SortZSetEntryOpt = cmp.Transformer("SortZSetEntries", func(in []base.Z) []base.Z {
out := append([]base.Z(nil), in...) // Copy input to avoid mutating it out := append([]base.Z(nil), in...) // Copy input to avoid mutating it
sort.Slice(out, func(i, j int) bool { sort.Slice(out, func(i, j int) bool {
return out[i].Message.ID.String() < out[j].Message.ID.String() return out[i].Message.ID < out[j].Message.ID
}) })
return out return out
}) })
@@ -91,6 +93,20 @@ var SortStringSliceOpt = cmp.Transformer("SortStringSlice", func(in []string) []
return out return out
}) })
var SortRedisZSetEntryOpt = cmp.Transformer("SortZSetEntries", func(in []redis.Z) []redis.Z {
out := append([]redis.Z(nil), in...) // Copy input to avoid mutating it
sort.Slice(out, func(i, j int) bool {
// TODO: If member is a comparable type (int, string, etc) compare by the member
// Use generic comparable type here once update to go1.18
if _, ok := out[i].Member.(string); ok {
// If member is a string, compare the member
return out[i].Member.(string) < out[j].Member.(string)
}
return out[i].Score < out[j].Score
})
return out
})
// IgnoreIDOpt is an cmp.Option to ignore ID field in task messages when comparing. // IgnoreIDOpt is an cmp.Option to ignore ID field in task messages when comparing.
var IgnoreIDOpt = cmpopts.IgnoreFields(base.TaskMessage{}, "ID") var IgnoreIDOpt = cmpopts.IgnoreFields(base.TaskMessage{}, "ID")
@@ -103,7 +119,7 @@ func NewTaskMessage(taskType string, payload []byte) *base.TaskMessage {
// task type, payload and queue name. // task type, payload and queue name.
func NewTaskMessageWithQueue(taskType string, payload []byte, qname string) *base.TaskMessage { func NewTaskMessageWithQueue(taskType string, payload []byte, qname string) *base.TaskMessage {
return &base.TaskMessage{ return &base.TaskMessage{
ID: uuid.New(), ID: uuid.NewString(),
Type: taskType, Type: taskType,
Queue: qname, Queue: qname,
Retry: 25, Retry: 25,
@@ -113,6 +129,13 @@ func NewTaskMessageWithQueue(taskType string, payload []byte, qname string) *bas
} }
} }
// NewLeaseWithClock returns a new lease with the given expiration time and clock.
func NewLeaseWithClock(expirationTime time.Time, clock timeutil.Clock) *base.Lease {
l := base.NewLease(expirationTime)
l.Clock = clock
return l
}
// JSON serializes the given key-value pairs into stream of bytes in JSON. // JSON serializes the given key-value pairs into stream of bytes in JSON.
func JSON(kv map[string]interface{}) []byte { func JSON(kv map[string]interface{}) []byte {
b, err := json.Marshal(kv) b, err := json.Marshal(kv)
@@ -138,6 +161,12 @@ func TaskMessageWithError(t base.TaskMessage, errMsg string, failedAt time.Time)
return &t return &t
} }
// TaskMessageWithCompletedAt returns an updated copy of t after completion.
func TaskMessageWithCompletedAt(t base.TaskMessage, completedAt time.Time) *base.TaskMessage {
t.CompletedAt = completedAt.Unix()
return &t
}
// MustMarshal marshals given task message and returns a json string. // MustMarshal marshals given task message and returns a json string.
// Calling test will fail if marshaling errors out. // Calling test will fail if marshaling errors out.
func MustMarshal(tb testing.TB, msg *base.TaskMessage) string { func MustMarshal(tb testing.TB, msg *base.TaskMessage) string {
@@ -165,12 +194,12 @@ func FlushDB(tb testing.TB, r redis.UniversalClient) {
tb.Helper() tb.Helper()
switch r := r.(type) { switch r := r.(type) {
case *redis.Client: case *redis.Client:
if err := r.FlushDB().Err(); err != nil { if err := r.FlushDB(context.Background()).Err(); err != nil {
tb.Fatal(err) tb.Fatal(err)
} }
case *redis.ClusterClient: case *redis.ClusterClient:
err := r.ForEachMaster(func(c *redis.Client) error { err := r.ForEachMaster(context.Background(), func(ctx context.Context, c *redis.Client) error {
if err := c.FlushAll().Err(); err != nil { if err := c.FlushAll(ctx).Err(); err != nil {
return err return err
} }
return nil return nil
@@ -184,43 +213,65 @@ func FlushDB(tb testing.TB, r redis.UniversalClient) {
// SeedPendingQueue initializes the specified queue with the given messages. // SeedPendingQueue initializes the specified queue with the given messages.
func SeedPendingQueue(tb testing.TB, r redis.UniversalClient, msgs []*base.TaskMessage, qname string) { func SeedPendingQueue(tb testing.TB, r redis.UniversalClient, msgs []*base.TaskMessage, qname string) {
tb.Helper() tb.Helper()
r.SAdd(base.AllQueues, qname) r.SAdd(context.Background(), base.AllQueues, qname)
seedRedisList(tb, r, base.PendingKey(qname), msgs, base.TaskStatePending) seedRedisList(tb, r, base.PendingKey(qname), msgs, base.TaskStatePending)
} }
// SeedActiveQueue initializes the active queue with the given messages. // SeedActiveQueue initializes the active queue with the given messages.
func SeedActiveQueue(tb testing.TB, r redis.UniversalClient, msgs []*base.TaskMessage, qname string) { func SeedActiveQueue(tb testing.TB, r redis.UniversalClient, msgs []*base.TaskMessage, qname string) {
tb.Helper() tb.Helper()
r.SAdd(base.AllQueues, qname) r.SAdd(context.Background(), base.AllQueues, qname)
seedRedisList(tb, r, base.ActiveKey(qname), msgs, base.TaskStateActive) seedRedisList(tb, r, base.ActiveKey(qname), msgs, base.TaskStateActive)
} }
// SeedScheduledQueue initializes the scheduled queue with the given messages. // SeedScheduledQueue initializes the scheduled queue with the given messages.
func SeedScheduledQueue(tb testing.TB, r redis.UniversalClient, entries []base.Z, qname string) { func SeedScheduledQueue(tb testing.TB, r redis.UniversalClient, entries []base.Z, qname string) {
tb.Helper() tb.Helper()
r.SAdd(base.AllQueues, qname) r.SAdd(context.Background(), base.AllQueues, qname)
seedRedisZSet(tb, r, base.ScheduledKey(qname), entries, base.TaskStateScheduled) seedRedisZSet(tb, r, base.ScheduledKey(qname), entries, base.TaskStateScheduled)
} }
// SeedRetryQueue initializes the retry queue with the given messages. // SeedRetryQueue initializes the retry queue with the given messages.
func SeedRetryQueue(tb testing.TB, r redis.UniversalClient, entries []base.Z, qname string) { func SeedRetryQueue(tb testing.TB, r redis.UniversalClient, entries []base.Z, qname string) {
tb.Helper() tb.Helper()
r.SAdd(base.AllQueues, qname) r.SAdd(context.Background(), base.AllQueues, qname)
seedRedisZSet(tb, r, base.RetryKey(qname), entries, base.TaskStateRetry) seedRedisZSet(tb, r, base.RetryKey(qname), entries, base.TaskStateRetry)
} }
// SeedArchivedQueue initializes the archived queue with the given messages. // SeedArchivedQueue initializes the archived queue with the given messages.
func SeedArchivedQueue(tb testing.TB, r redis.UniversalClient, entries []base.Z, qname string) { func SeedArchivedQueue(tb testing.TB, r redis.UniversalClient, entries []base.Z, qname string) {
tb.Helper() tb.Helper()
r.SAdd(base.AllQueues, qname) r.SAdd(context.Background(), base.AllQueues, qname)
seedRedisZSet(tb, r, base.ArchivedKey(qname), entries, base.TaskStateArchived) seedRedisZSet(tb, r, base.ArchivedKey(qname), entries, base.TaskStateArchived)
} }
// SeedDeadlines initializes the deadlines set with the given entries. // SeedLease initializes the lease set with the given entries.
func SeedDeadlines(tb testing.TB, r redis.UniversalClient, entries []base.Z, qname string) { func SeedLease(tb testing.TB, r redis.UniversalClient, entries []base.Z, qname string) {
tb.Helper() tb.Helper()
r.SAdd(base.AllQueues, qname) r.SAdd(context.Background(), base.AllQueues, qname)
seedRedisZSet(tb, r, base.DeadlinesKey(qname), entries, base.TaskStateActive) seedRedisZSet(tb, r, base.LeaseKey(qname), entries, base.TaskStateActive)
}
// SeedCompletedQueue initializes the completed set witht the given entries.
func SeedCompletedQueue(tb testing.TB, r redis.UniversalClient, entries []base.Z, qname string) {
tb.Helper()
r.SAdd(context.Background(), base.AllQueues, qname)
seedRedisZSet(tb, r, base.CompletedKey(qname), entries, base.TaskStateCompleted)
}
// SeedGroup initializes the group with the given entries.
func SeedGroup(tb testing.TB, r redis.UniversalClient, entries []base.Z, qname, gname string) {
tb.Helper()
ctx := context.Background()
r.SAdd(ctx, base.AllQueues, qname)
r.SAdd(ctx, base.AllGroups(qname), gname)
seedRedisZSet(tb, r, base.GroupKey(qname, gname), entries, base.TaskStateAggregating)
}
func SeedAggregationSet(tb testing.TB, r redis.UniversalClient, entries []base.Z, qname, gname, setID string) {
tb.Helper()
r.SAdd(context.Background(), base.AllQueues, qname)
seedRedisZSet(tb, r, base.AggregationSetKey(qname, gname, setID), entries, base.TaskStateAggregating)
} }
// SeedAllPendingQueues initializes all of the specified queues with the given messages. // SeedAllPendingQueues initializes all of the specified queues with the given messages.
@@ -265,11 +316,31 @@ func SeedAllArchivedQueues(tb testing.TB, r redis.UniversalClient, archived map[
} }
} }
// SeedAllDeadlines initializes all of the deadlines with the given entries. // SeedAllLease initializes all of the lease sets with the given entries.
func SeedAllDeadlines(tb testing.TB, r redis.UniversalClient, deadlines map[string][]base.Z) { func SeedAllLease(tb testing.TB, r redis.UniversalClient, lease map[string][]base.Z) {
tb.Helper() tb.Helper()
for q, entries := range deadlines { for q, entries := range lease {
SeedDeadlines(tb, r, entries, q) SeedLease(tb, r, entries, q)
}
}
// SeedAllCompletedQueues initializes all of the completed queues with the given entries.
func SeedAllCompletedQueues(tb testing.TB, r redis.UniversalClient, completed map[string][]base.Z) {
tb.Helper()
for q, entries := range completed {
SeedCompletedQueue(tb, r, entries, q)
}
}
// SeedAllGroups initializes all groups in all queues.
// The map maps queue names to group names which maps to a list of task messages and the time it was
// added to the group.
func SeedAllGroups(tb testing.TB, r redis.UniversalClient, groups map[string]map[string][]base.Z) {
tb.Helper()
for qname, g := range groups {
for gname, entries := range g {
SeedGroup(tb, r, entries, qname, gname)
}
} }
} }
@@ -278,22 +349,21 @@ func seedRedisList(tb testing.TB, c redis.UniversalClient, key string,
tb.Helper() tb.Helper()
for _, msg := range msgs { for _, msg := range msgs {
encoded := MustMarshal(tb, msg) encoded := MustMarshal(tb, msg)
if err := c.LPush(key, msg.ID.String()).Err(); err != nil { if err := c.LPush(context.Background(), key, msg.ID).Err(); err != nil {
tb.Fatal(err) tb.Fatal(err)
} }
key := base.TaskKey(msg.Queue, msg.ID.String()) taskKey := base.TaskKey(msg.Queue, msg.ID)
data := map[string]interface{}{ data := map[string]interface{}{
"msg": encoded, "msg": encoded,
"state": state.String(), "state": state.String(),
"timeout": msg.Timeout,
"deadline": msg.Deadline,
"unique_key": msg.UniqueKey, "unique_key": msg.UniqueKey,
"group": msg.GroupKey,
} }
if err := c.HSet(key, data).Err(); err != nil { if err := c.HSet(context.Background(), taskKey, data).Err(); err != nil {
tb.Fatal(err) tb.Fatal(err)
} }
if len(msg.UniqueKey) > 0 { if len(msg.UniqueKey) > 0 {
err := c.SetNX(msg.UniqueKey, msg.ID.String(), 1*time.Minute).Err() err := c.SetNX(context.Background(), msg.UniqueKey, msg.ID, 1*time.Minute).Err()
if err != nil { if err != nil {
tb.Fatalf("Failed to set unique lock in redis: %v", err) tb.Fatalf("Failed to set unique lock in redis: %v", err)
} }
@@ -307,23 +377,22 @@ func seedRedisZSet(tb testing.TB, c redis.UniversalClient, key string,
for _, item := range items { for _, item := range items {
msg := item.Message msg := item.Message
encoded := MustMarshal(tb, msg) encoded := MustMarshal(tb, msg)
z := &redis.Z{Member: msg.ID.String(), Score: float64(item.Score)} z := &redis.Z{Member: msg.ID, Score: float64(item.Score)}
if err := c.ZAdd(key, z).Err(); err != nil { if err := c.ZAdd(context.Background(), key, z).Err(); err != nil {
tb.Fatal(err) tb.Fatal(err)
} }
key := base.TaskKey(msg.Queue, msg.ID.String()) taskKey := base.TaskKey(msg.Queue, msg.ID)
data := map[string]interface{}{ data := map[string]interface{}{
"msg": encoded, "msg": encoded,
"state": state.String(), "state": state.String(),
"timeout": msg.Timeout,
"deadline": msg.Deadline,
"unique_key": msg.UniqueKey, "unique_key": msg.UniqueKey,
"group": msg.GroupKey,
} }
if err := c.HSet(key, data).Err(); err != nil { if err := c.HSet(context.Background(), taskKey, data).Err(); err != nil {
tb.Fatal(err) tb.Fatal(err)
} }
if len(msg.UniqueKey) > 0 { if len(msg.UniqueKey) > 0 {
err := c.SetNX(msg.UniqueKey, msg.ID.String(), 1*time.Minute).Err() err := c.SetNX(context.Background(), msg.UniqueKey, msg.ID, 1*time.Minute).Err()
if err != nil { if err != nil {
tb.Fatalf("Failed to set unique lock in redis: %v", err) tb.Fatalf("Failed to set unique lock in redis: %v", err)
} }
@@ -366,6 +435,13 @@ func GetArchivedMessages(tb testing.TB, r redis.UniversalClient, qname string) [
return getMessagesFromZSet(tb, r, qname, base.ArchivedKey, base.TaskStateArchived) return getMessagesFromZSet(tb, r, qname, base.ArchivedKey, base.TaskStateArchived)
} }
// GetCompletedMessages returns all completed task messages in the given queue.
// It also asserts the state field of the task.
func GetCompletedMessages(tb testing.TB, r redis.UniversalClient, qname string) []*base.TaskMessage {
tb.Helper()
return getMessagesFromZSet(tb, r, qname, base.CompletedKey, base.TaskStateCompleted)
}
// GetScheduledEntries returns all scheduled messages and its score in the given queue. // GetScheduledEntries returns all scheduled messages and its score in the given queue.
// It also asserts the state field of the task. // It also asserts the state field of the task.
func GetScheduledEntries(tb testing.TB, r redis.UniversalClient, qname string) []base.Z { func GetScheduledEntries(tb testing.TB, r redis.UniversalClient, qname string) []base.Z {
@@ -387,24 +463,39 @@ func GetArchivedEntries(tb testing.TB, r redis.UniversalClient, qname string) []
return getMessagesFromZSetWithScores(tb, r, qname, base.ArchivedKey, base.TaskStateArchived) return getMessagesFromZSetWithScores(tb, r, qname, base.ArchivedKey, base.TaskStateArchived)
} }
// GetDeadlinesEntries returns all task messages and its score in the deadlines set for the given queue. // GetLeaseEntries returns all task IDs and its score in the lease set for the given queue.
// It also asserts the state field of the task. // It also asserts the state field of the task.
func GetDeadlinesEntries(tb testing.TB, r redis.UniversalClient, qname string) []base.Z { func GetLeaseEntries(tb testing.TB, r redis.UniversalClient, qname string) []base.Z {
tb.Helper() tb.Helper()
return getMessagesFromZSetWithScores(tb, r, qname, base.DeadlinesKey, base.TaskStateActive) return getMessagesFromZSetWithScores(tb, r, qname, base.LeaseKey, base.TaskStateActive)
}
// GetCompletedEntries returns all completed messages and its score in the given queue.
// It also asserts the state field of the task.
func GetCompletedEntries(tb testing.TB, r redis.UniversalClient, qname string) []base.Z {
tb.Helper()
return getMessagesFromZSetWithScores(tb, r, qname, base.CompletedKey, base.TaskStateCompleted)
}
// GetGroupEntries returns all scheduled messages and its score in the given queue.
// It also asserts the state field of the task.
func GetGroupEntries(tb testing.TB, r redis.UniversalClient, qname, groupKey string) []base.Z {
tb.Helper()
return getMessagesFromZSetWithScores(tb, r, qname,
func(qname string) string { return base.GroupKey(qname, groupKey) }, base.TaskStateAggregating)
} }
// Retrieves all messages stored under `keyFn(qname)` key in redis list. // Retrieves all messages stored under `keyFn(qname)` key in redis list.
func getMessagesFromList(tb testing.TB, r redis.UniversalClient, qname string, func getMessagesFromList(tb testing.TB, r redis.UniversalClient, qname string,
keyFn func(qname string) string, state base.TaskState) []*base.TaskMessage { keyFn func(qname string) string, state base.TaskState) []*base.TaskMessage {
tb.Helper() tb.Helper()
ids := r.LRange(keyFn(qname), 0, -1).Val() ids := r.LRange(context.Background(), keyFn(qname), 0, -1).Val()
var msgs []*base.TaskMessage var msgs []*base.TaskMessage
for _, id := range ids { for _, id := range ids {
taskKey := base.TaskKey(qname, id) taskKey := base.TaskKey(qname, id)
data := r.HGet(taskKey, "msg").Val() data := r.HGet(context.Background(), taskKey, "msg").Val()
msgs = append(msgs, MustUnmarshal(tb, data)) msgs = append(msgs, MustUnmarshal(tb, data))
if gotState := r.HGet(taskKey, "state").Val(); gotState != state.String() { if gotState := r.HGet(context.Background(), taskKey, "state").Val(); gotState != state.String() {
tb.Errorf("task (id=%q) is in %q state, want %v", id, gotState, state) tb.Errorf("task (id=%q) is in %q state, want %v", id, gotState, state)
} }
} }
@@ -415,13 +506,13 @@ func getMessagesFromList(tb testing.TB, r redis.UniversalClient, qname string,
func getMessagesFromZSet(tb testing.TB, r redis.UniversalClient, qname string, func getMessagesFromZSet(tb testing.TB, r redis.UniversalClient, qname string,
keyFn func(qname string) string, state base.TaskState) []*base.TaskMessage { keyFn func(qname string) string, state base.TaskState) []*base.TaskMessage {
tb.Helper() tb.Helper()
ids := r.ZRange(keyFn(qname), 0, -1).Val() ids := r.ZRange(context.Background(), keyFn(qname), 0, -1).Val()
var msgs []*base.TaskMessage var msgs []*base.TaskMessage
for _, id := range ids { for _, id := range ids {
taskKey := base.TaskKey(qname, id) taskKey := base.TaskKey(qname, id)
msg := r.HGet(taskKey, "msg").Val() msg := r.HGet(context.Background(), taskKey, "msg").Val()
msgs = append(msgs, MustUnmarshal(tb, msg)) msgs = append(msgs, MustUnmarshal(tb, msg))
if gotState := r.HGet(taskKey, "state").Val(); gotState != state.String() { if gotState := r.HGet(context.Background(), taskKey, "state").Val(); gotState != state.String() {
tb.Errorf("task (id=%q) is in %q state, want %v", id, gotState, state) tb.Errorf("task (id=%q) is in %q state, want %v", id, gotState, state)
} }
} }
@@ -432,16 +523,120 @@ func getMessagesFromZSet(tb testing.TB, r redis.UniversalClient, qname string,
func getMessagesFromZSetWithScores(tb testing.TB, r redis.UniversalClient, func getMessagesFromZSetWithScores(tb testing.TB, r redis.UniversalClient,
qname string, keyFn func(qname string) string, state base.TaskState) []base.Z { qname string, keyFn func(qname string) string, state base.TaskState) []base.Z {
tb.Helper() tb.Helper()
zs := r.ZRangeWithScores(keyFn(qname), 0, -1).Val() zs := r.ZRangeWithScores(context.Background(), keyFn(qname), 0, -1).Val()
var res []base.Z var res []base.Z
for _, z := range zs { for _, z := range zs {
taskID := z.Member.(string) taskID := z.Member.(string)
taskKey := base.TaskKey(qname, taskID) taskKey := base.TaskKey(qname, taskID)
msg := r.HGet(taskKey, "msg").Val() msg := r.HGet(context.Background(), taskKey, "msg").Val()
res = append(res, base.Z{Message: MustUnmarshal(tb, msg), Score: int64(z.Score)}) res = append(res, base.Z{Message: MustUnmarshal(tb, msg), Score: int64(z.Score)})
if gotState := r.HGet(taskKey, "state").Val(); gotState != state.String() { if gotState := r.HGet(context.Background(), taskKey, "state").Val(); gotState != state.String() {
tb.Errorf("task (id=%q) is in %q state, want %v", taskID, gotState, state) tb.Errorf("task (id=%q) is in %q state, want %v", taskID, gotState, state)
} }
} }
return res return res
} }
// TaskSeedData holds the data required to seed tasks under the task key in test.
type TaskSeedData struct {
Msg *base.TaskMessage
State base.TaskState
PendingSince time.Time
}
func SeedTasks(tb testing.TB, r redis.UniversalClient, taskData []*TaskSeedData) {
for _, data := range taskData {
msg := data.Msg
ctx := context.Background()
key := base.TaskKey(msg.Queue, msg.ID)
v := map[string]interface{}{
"msg": MustMarshal(tb, msg),
"state": data.State.String(),
"unique_key": msg.UniqueKey,
"group": msg.GroupKey,
}
if !data.PendingSince.IsZero() {
v["pending_since"] = data.PendingSince.Unix()
}
if err := r.HSet(ctx, key, v).Err(); err != nil {
tb.Fatalf("Failed to write task data in redis: %v", err)
}
if len(msg.UniqueKey) > 0 {
err := r.SetNX(ctx, msg.UniqueKey, msg.ID, 1*time.Minute).Err()
if err != nil {
tb.Fatalf("Failed to set unique lock in redis: %v", err)
}
}
}
}
func SeedRedisZSets(tb testing.TB, r redis.UniversalClient, zsets map[string][]*redis.Z) {
for key, zs := range zsets {
// FIXME: How come we can't simply do ZAdd(ctx, key, zs...) here?
for _, z := range zs {
if err := r.ZAdd(context.Background(), key, z).Err(); err != nil {
tb.Fatalf("Failed to seed zset (key=%q): %v", key, err)
}
}
}
}
func SeedRedisSets(tb testing.TB, r redis.UniversalClient, sets map[string][]string) {
for key, set := range sets {
SeedRedisSet(tb, r, key, set)
}
}
func SeedRedisSet(tb testing.TB, r redis.UniversalClient, key string, members []string) {
for _, mem := range members {
if err := r.SAdd(context.Background(), key, mem).Err(); err != nil {
tb.Fatalf("Failed to seed set (key=%q): %v", key, err)
}
}
}
func SeedRedisLists(tb testing.TB, r redis.UniversalClient, lists map[string][]string) {
for key, vals := range lists {
for _, v := range vals {
if err := r.LPush(context.Background(), key, v).Err(); err != nil {
tb.Fatalf("Failed to seed list (key=%q): %v", key, err)
}
}
}
}
func AssertRedisLists(t *testing.T, r redis.UniversalClient, wantLists map[string][]string) {
for key, want := range wantLists {
got, err := r.LRange(context.Background(), key, 0, -1).Result()
if err != nil {
t.Fatalf("Failed to read list (key=%q): %v", key, err)
}
if diff := cmp.Diff(want, got, SortStringSliceOpt); diff != "" {
t.Errorf("mismatch found in list (key=%q): (-want,+got)\n%s", key, diff)
}
}
}
func AssertRedisSets(t *testing.T, r redis.UniversalClient, wantSets map[string][]string) {
for key, want := range wantSets {
got, err := r.SMembers(context.Background(), key).Result()
if err != nil {
t.Fatalf("Failed to read set (key=%q): %v", key, err)
}
if diff := cmp.Diff(want, got, SortStringSliceOpt); diff != "" {
t.Errorf("mismatch found in set (key=%q): (-want,+got)\n%s", key, diff)
}
}
}
func AssertRedisZSets(t *testing.T, r redis.UniversalClient, wantZSets map[string][]redis.Z) {
for key, want := range wantZSets {
got, err := r.ZRangeWithScores(context.Background(), key, 0, -1).Result()
if err != nil {
t.Fatalf("Failed to read zset (key=%q): %v", key, err)
}
if diff := cmp.Diff(want, got, SortRedisZSetEntryOpt); diff != "" {
t.Errorf("mismatch found in zset (key=%q): (-want,+got)\n%s", key, diff)
}
}
}

View File

@@ -0,0 +1,59 @@
// Copyright 2022 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
// Package timeutil exports functions and types related to time and date.
package timeutil
import (
"sync"
"time"
)
// A Clock is an object that can tell you the current time.
//
// This interface allows decoupling code that uses time from the code that creates
// a point in time. You can use this to your advantage by injecting Clocks into interfaces
// rather than having implementations call time.Now() directly.
//
// Use RealClock() in production.
// Use SimulatedClock() in test.
type Clock interface {
Now() time.Time
}
func NewRealClock() Clock { return &realTimeClock{} }
type realTimeClock struct{}
func (_ *realTimeClock) Now() time.Time { return time.Now() }
// A SimulatedClock is a concrete Clock implementation that doesn't "tick" on its own.
// Time is advanced by explicit call to the AdvanceTime() or SetTime() functions.
// This object is concurrency safe.
type SimulatedClock struct {
mu sync.Mutex
t time.Time // guarded by mu
}
func NewSimulatedClock(t time.Time) *SimulatedClock {
return &SimulatedClock{t: t}
}
func (c *SimulatedClock) Now() time.Time {
c.mu.Lock()
defer c.mu.Unlock()
return c.t
}
func (c *SimulatedClock) SetTime(t time.Time) {
c.mu.Lock()
defer c.mu.Unlock()
c.t = t
}
func (c *SimulatedClock) AdvanceTime(d time.Duration) {
c.mu.Lock()
defer c.mu.Unlock()
c.t = c.t.Add(d)
}

View File

@@ -0,0 +1,48 @@
// Copyright 2022 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
package timeutil
import (
"testing"
"time"
)
func TestSimulatedClock(t *testing.T) {
now := time.Now()
tests := []struct {
desc string
initTime time.Time
advanceBy time.Duration
wantTime time.Time
}{
{
desc: "advance time forward",
initTime: now,
advanceBy: 30 * time.Second,
wantTime: now.Add(30 * time.Second),
},
{
desc: "advance time backward",
initTime: now,
advanceBy: -10 * time.Second,
wantTime: now.Add(-10 * time.Second),
},
}
for _, tc := range tests {
c := NewSimulatedClock(tc.initTime)
if c.Now() != tc.initTime {
t.Errorf("%s: Before Advance; SimulatedClock.Now() = %v, want %v", tc.desc, c.Now(), tc.initTime)
}
c.AdvanceTime(tc.advanceBy)
if c.Now() != tc.wantTime {
t.Errorf("%s: After Advance; SimulatedClock.Now() = %v, want %v", tc.desc, c.Now(), tc.wantTime)
}
}
}

81
janitor.go Normal file
View File

@@ -0,0 +1,81 @@
// Copyright 2021 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
package asynq
import (
"sync"
"time"
"github.com/hibiken/asynq/internal/base"
"github.com/hibiken/asynq/internal/log"
)
// A janitor is responsible for deleting expired completed tasks from the specified
// queues. It periodically checks for any expired tasks in the completed set, and
// deletes them.
type janitor struct {
logger *log.Logger
broker base.Broker
// channel to communicate back to the long running "janitor" goroutine.
done chan struct{}
// list of queue names to check.
queues []string
// average interval between checks.
avgInterval time.Duration
}
type janitorParams struct {
logger *log.Logger
broker base.Broker
queues []string
interval time.Duration
}
func newJanitor(params janitorParams) *janitor {
return &janitor{
logger: params.logger,
broker: params.broker,
done: make(chan struct{}),
queues: params.queues,
avgInterval: params.interval,
}
}
func (j *janitor) shutdown() {
j.logger.Debug("Janitor shutting down...")
// Signal the janitor goroutine to stop.
j.done <- struct{}{}
}
// start starts the "janitor" goroutine.
func (j *janitor) start(wg *sync.WaitGroup) {
wg.Add(1)
timer := time.NewTimer(j.avgInterval) // randomize this interval with margin of 1s
go func() {
defer wg.Done()
for {
select {
case <-j.done:
j.logger.Debug("Janitor done")
return
case <-timer.C:
j.exec()
timer.Reset(j.avgInterval)
}
}
}()
}
func (j *janitor) exec() {
for _, qname := range j.queues {
if err := j.broker.DeleteExpiredCompletedTasks(qname); err != nil {
j.logger.Errorf("Failed to delete expired completed tasks from queue %q: %v",
qname, err)
}
}
}

89
janitor_test.go Normal file
View File

@@ -0,0 +1,89 @@
// Copyright 2021 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
package asynq
import (
"sync"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/hibiken/asynq/internal/base"
"github.com/hibiken/asynq/internal/rdb"
h "github.com/hibiken/asynq/internal/testutil"
)
func newCompletedTask(qname, tasktype string, payload []byte, completedAt time.Time) *base.TaskMessage {
msg := h.NewTaskMessageWithQueue(tasktype, payload, qname)
msg.CompletedAt = completedAt.Unix()
return msg
}
func TestJanitor(t *testing.T) {
r := setup(t)
defer r.Close()
rdbClient := rdb.NewRDB(r)
const interval = 1 * time.Second
janitor := newJanitor(janitorParams{
logger: testLogger,
broker: rdbClient,
queues: []string{"default", "custom"},
interval: interval,
})
now := time.Now()
hourAgo := now.Add(-1 * time.Hour)
minuteAgo := now.Add(-1 * time.Minute)
halfHourAgo := now.Add(-30 * time.Minute)
halfHourFromNow := now.Add(30 * time.Minute)
fiveMinFromNow := now.Add(5 * time.Minute)
msg1 := newCompletedTask("default", "task1", nil, hourAgo)
msg2 := newCompletedTask("default", "task2", nil, minuteAgo)
msg3 := newCompletedTask("custom", "task3", nil, hourAgo)
msg4 := newCompletedTask("custom", "task4", nil, minuteAgo)
tests := []struct {
completed map[string][]base.Z // initial completed sets
wantCompleted map[string][]base.Z // expected completed sets after janitor runs
}{
{
completed: map[string][]base.Z{
"default": {
{Message: msg1, Score: halfHourAgo.Unix()},
{Message: msg2, Score: fiveMinFromNow.Unix()},
},
"custom": {
{Message: msg3, Score: halfHourFromNow.Unix()},
{Message: msg4, Score: minuteAgo.Unix()},
},
},
wantCompleted: map[string][]base.Z{
"default": {
{Message: msg2, Score: fiveMinFromNow.Unix()},
},
"custom": {
{Message: msg3, Score: halfHourFromNow.Unix()},
},
},
},
}
for _, tc := range tests {
h.FlushDB(t, r)
h.SeedAllCompletedQueues(t, r, tc.completed)
var wg sync.WaitGroup
janitor.start(&wg)
time.Sleep(2 * interval) // make sure to let janitor run at least one time
janitor.shutdown()
for qname, want := range tc.wantCompleted {
got := h.GetCompletedEntries(t, r, qname)
if diff := cmp.Diff(want, got, h.SortZSetEntryOpt); diff != "" {
t.Errorf("diff found in %q after running janitor: (-want, +got)\n%s", base.CompletedKey(qname), diff)
}
}
}
}

243
periodic_task_manager.go Normal file
View File

@@ -0,0 +1,243 @@
// Copyright 2022 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
package asynq
import (
"crypto/sha256"
"fmt"
"io"
"sort"
"sync"
"time"
)
// PeriodicTaskManager manages scheduling of periodic tasks.
// It syncs scheduler's entries by calling the config provider periodically.
type PeriodicTaskManager struct {
s *Scheduler
p PeriodicTaskConfigProvider
syncInterval time.Duration
done chan (struct{})
wg sync.WaitGroup
m map[string]string // map[hash]entryID
}
type PeriodicTaskManagerOpts struct {
// Required: must be non nil
PeriodicTaskConfigProvider PeriodicTaskConfigProvider
// Required: must be non nil
RedisConnOpt RedisConnOpt
// Optional: scheduler options
*SchedulerOpts
// Optional: default is 3m
SyncInterval time.Duration
}
const defaultSyncInterval = 3 * time.Minute
// NewPeriodicTaskManager returns a new PeriodicTaskManager instance.
// The given opts should specify the RedisConnOp and PeriodicTaskConfigProvider at minimum.
func NewPeriodicTaskManager(opts PeriodicTaskManagerOpts) (*PeriodicTaskManager, error) {
if opts.PeriodicTaskConfigProvider == nil {
return nil, fmt.Errorf("PeriodicTaskConfigProvider cannot be nil")
}
if opts.RedisConnOpt == nil {
return nil, fmt.Errorf("RedisConnOpt cannot be nil")
}
scheduler := NewScheduler(opts.RedisConnOpt, opts.SchedulerOpts)
syncInterval := opts.SyncInterval
if syncInterval == 0 {
syncInterval = defaultSyncInterval
}
return &PeriodicTaskManager{
s: scheduler,
p: opts.PeriodicTaskConfigProvider,
syncInterval: syncInterval,
done: make(chan struct{}),
m: make(map[string]string),
}, nil
}
// PeriodicTaskConfigProvider provides configs for periodic tasks.
// GetConfigs will be called by a PeriodicTaskManager periodically to
// sync the scheduler's entries with the configs returned by the provider.
type PeriodicTaskConfigProvider interface {
GetConfigs() ([]*PeriodicTaskConfig, error)
}
// PeriodicTaskConfig specifies the details of a periodic task.
type PeriodicTaskConfig struct {
Cronspec string // required: must be non empty string
Task *Task // required: must be non nil
Opts []Option // optional: can be nil
}
func (c *PeriodicTaskConfig) hash() string {
h := sha256.New()
io.WriteString(h, c.Cronspec)
io.WriteString(h, c.Task.Type())
h.Write(c.Task.Payload())
opts := stringifyOptions(c.Opts)
sort.Strings(opts)
for _, opt := range opts {
io.WriteString(h, opt)
}
return fmt.Sprintf("%x", h.Sum(nil))
}
func validatePeriodicTaskConfig(c *PeriodicTaskConfig) error {
if c == nil {
return fmt.Errorf("PeriodicTaskConfig cannot be nil")
}
if c.Task == nil {
return fmt.Errorf("PeriodicTaskConfig.Task cannot be nil")
}
if c.Cronspec == "" {
return fmt.Errorf("PeriodicTaskConfig.Cronspec cannot be empty")
}
return nil
}
// Start starts a scheduler and background goroutine to sync the scheduler with the configs
// returned by the provider.
//
// Start returns any error encountered at start up time.
func (mgr *PeriodicTaskManager) Start() error {
if mgr.s == nil || mgr.p == nil {
panic("asynq: cannot start uninitialized PeriodicTaskManager; use NewPeriodicTaskManager to initialize")
}
if err := mgr.initialSync(); err != nil {
return fmt.Errorf("asynq: %v", err)
}
if err := mgr.s.Start(); err != nil {
return fmt.Errorf("asynq: %v", err)
}
mgr.wg.Add(1)
go func() {
defer mgr.wg.Done()
ticker := time.NewTicker(mgr.syncInterval)
for {
select {
case <-mgr.done:
mgr.s.logger.Debugf("Stopping syncer goroutine")
ticker.Stop()
return
case <-ticker.C:
mgr.sync()
}
}
}()
return nil
}
// Shutdown gracefully shuts down the manager.
// It notifies a background syncer goroutine to stop and stops scheduler.
func (mgr *PeriodicTaskManager) Shutdown() {
close(mgr.done)
mgr.wg.Wait()
mgr.s.Shutdown()
}
// Run starts the manager and blocks until an os signal to exit the program is received.
// Once it receives a signal, it gracefully shuts down the manager.
func (mgr *PeriodicTaskManager) Run() error {
if err := mgr.Start(); err != nil {
return err
}
mgr.s.waitForSignals()
mgr.Shutdown()
mgr.s.logger.Debugf("PeriodicTaskManager exiting")
return nil
}
func (mgr *PeriodicTaskManager) initialSync() error {
configs, err := mgr.p.GetConfigs()
if err != nil {
return fmt.Errorf("initial call to GetConfigs failed: %v", err)
}
for _, c := range configs {
if err := validatePeriodicTaskConfig(c); err != nil {
return fmt.Errorf("initial call to GetConfigs contained an invalid config: %v", err)
}
}
mgr.add(configs)
return nil
}
func (mgr *PeriodicTaskManager) add(configs []*PeriodicTaskConfig) {
for _, c := range configs {
entryID, err := mgr.s.Register(c.Cronspec, c.Task, c.Opts...)
if err != nil {
mgr.s.logger.Errorf("Failed to register periodic task: cronspec=%q task=%q",
c.Cronspec, c.Task.Type())
continue
}
mgr.m[c.hash()] = entryID
mgr.s.logger.Infof("Successfully registered periodic task: cronspec=%q task=%q, entryID=%s",
c.Cronspec, c.Task.Type(), entryID)
}
}
func (mgr *PeriodicTaskManager) remove(removed map[string]string) {
for hash, entryID := range removed {
if err := mgr.s.Unregister(entryID); err != nil {
mgr.s.logger.Errorf("Failed to unregister periodic task: %v", err)
continue
}
delete(mgr.m, hash)
mgr.s.logger.Infof("Successfully unregistered periodic task: entryID=%s", entryID)
}
}
func (mgr *PeriodicTaskManager) sync() {
configs, err := mgr.p.GetConfigs()
if err != nil {
mgr.s.logger.Errorf("Failed to get periodic task configs: %v", err)
return
}
for _, c := range configs {
if err := validatePeriodicTaskConfig(c); err != nil {
mgr.s.logger.Errorf("Failed to sync: GetConfigs returned an invalid config: %v", err)
return
}
}
// Diff and only register/unregister the newly added/removed entries.
removed := mgr.diffRemoved(configs)
added := mgr.diffAdded(configs)
mgr.remove(removed)
mgr.add(added)
}
// diffRemoved diffs the incoming configs with the registered config and returns
// a map containing hash and entryID of each config that was removed.
func (mgr *PeriodicTaskManager) diffRemoved(configs []*PeriodicTaskConfig) map[string]string {
newConfigs := make(map[string]string)
for _, c := range configs {
newConfigs[c.hash()] = "" // empty value since we don't have entryID yet
}
removed := make(map[string]string)
for k, v := range mgr.m {
// test whether existing config is present in the incoming configs
if _, found := newConfigs[k]; !found {
removed[k] = v
}
}
return removed
}
// diffAdded diffs the incoming configs with the registered configs and returns
// a list of configs that were added.
func (mgr *PeriodicTaskManager) diffAdded(configs []*PeriodicTaskConfig) []*PeriodicTaskConfig {
var added []*PeriodicTaskConfig
for _, c := range configs {
if _, found := mgr.m[c.hash()]; !found {
added = append(added, c)
}
}
return added
}

View File

@@ -0,0 +1,343 @@
// Copyright 2022 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
package asynq
import (
"sort"
"sync"
"testing"
"time"
"github.com/google/go-cmp/cmp"
)
// Trivial implementation of PeriodicTaskConfigProvider for testing purpose.
type FakeConfigProvider struct {
mu sync.Mutex
cfgs []*PeriodicTaskConfig
}
func (p *FakeConfigProvider) SetConfigs(cfgs []*PeriodicTaskConfig) {
p.mu.Lock()
defer p.mu.Unlock()
p.cfgs = cfgs
}
func (p *FakeConfigProvider) GetConfigs() ([]*PeriodicTaskConfig, error) {
p.mu.Lock()
defer p.mu.Unlock()
return p.cfgs, nil
}
func TestNewPeriodicTaskManager(t *testing.T) {
cfgs := []*PeriodicTaskConfig{
{Cronspec: "* * * * *", Task: NewTask("foo", nil)},
{Cronspec: "* * * * *", Task: NewTask("bar", nil)},
}
tests := []struct {
desc string
opts PeriodicTaskManagerOpts
}{
{
desc: "with provider and redisConnOpt",
opts: PeriodicTaskManagerOpts{
RedisConnOpt: RedisClientOpt{Addr: ":6379"},
PeriodicTaskConfigProvider: &FakeConfigProvider{cfgs: cfgs},
},
},
{
desc: "with sync option",
opts: PeriodicTaskManagerOpts{
RedisConnOpt: RedisClientOpt{Addr: ":6379"},
PeriodicTaskConfigProvider: &FakeConfigProvider{cfgs: cfgs},
SyncInterval: 5 * time.Minute,
},
},
{
desc: "with scheduler option",
opts: PeriodicTaskManagerOpts{
RedisConnOpt: RedisClientOpt{Addr: ":6379"},
PeriodicTaskConfigProvider: &FakeConfigProvider{cfgs: cfgs},
SyncInterval: 5 * time.Minute,
SchedulerOpts: &SchedulerOpts{
LogLevel: DebugLevel,
},
},
},
}
for _, tc := range tests {
_, err := NewPeriodicTaskManager(tc.opts)
if err != nil {
t.Errorf("%s; NewPeriodicTaskManager returned error: %v", tc.desc, err)
}
}
}
func TestNewPeriodicTaskManagerError(t *testing.T) {
cfgs := []*PeriodicTaskConfig{
{Cronspec: "* * * * *", Task: NewTask("foo", nil)},
{Cronspec: "* * * * *", Task: NewTask("bar", nil)},
}
tests := []struct {
desc string
opts PeriodicTaskManagerOpts
}{
{
desc: "without provider",
opts: PeriodicTaskManagerOpts{
RedisConnOpt: RedisClientOpt{Addr: ":6379"},
},
},
{
desc: "without redisConOpt",
opts: PeriodicTaskManagerOpts{
PeriodicTaskConfigProvider: &FakeConfigProvider{cfgs: cfgs},
},
},
}
for _, tc := range tests {
_, err := NewPeriodicTaskManager(tc.opts)
if err == nil {
t.Errorf("%s; NewPeriodicTaskManager did not return error", tc.desc)
}
}
}
func TestPeriodicTaskConfigHash(t *testing.T) {
tests := []struct {
desc string
a *PeriodicTaskConfig
b *PeriodicTaskConfig
isSame bool
}{
{
desc: "basic identity test",
a: &PeriodicTaskConfig{
Cronspec: "* * * * *",
Task: NewTask("foo", nil),
},
b: &PeriodicTaskConfig{
Cronspec: "* * * * *",
Task: NewTask("foo", nil),
},
isSame: true,
},
{
desc: "with a option",
a: &PeriodicTaskConfig{
Cronspec: "* * * * *",
Task: NewTask("foo", nil),
Opts: []Option{Queue("myqueue")},
},
b: &PeriodicTaskConfig{
Cronspec: "* * * * *",
Task: NewTask("foo", nil),
Opts: []Option{Queue("myqueue")},
},
isSame: true,
},
{
desc: "with multiple options (different order)",
a: &PeriodicTaskConfig{
Cronspec: "* * * * *",
Task: NewTask("foo", nil),
Opts: []Option{Unique(5 * time.Minute), Queue("myqueue")},
},
b: &PeriodicTaskConfig{
Cronspec: "* * * * *",
Task: NewTask("foo", nil),
Opts: []Option{Queue("myqueue"), Unique(5 * time.Minute)},
},
isSame: true,
},
{
desc: "with payload",
a: &PeriodicTaskConfig{
Cronspec: "* * * * *",
Task: NewTask("foo", []byte("hello world!")),
Opts: []Option{Queue("myqueue")},
},
b: &PeriodicTaskConfig{
Cronspec: "* * * * *",
Task: NewTask("foo", []byte("hello world!")),
Opts: []Option{Queue("myqueue")},
},
isSame: true,
},
{
desc: "with different cronspecs",
a: &PeriodicTaskConfig{
Cronspec: "* * * * *",
Task: NewTask("foo", nil),
},
b: &PeriodicTaskConfig{
Cronspec: "5 * * * *",
Task: NewTask("foo", nil),
},
isSame: false,
},
{
desc: "with different task type",
a: &PeriodicTaskConfig{
Cronspec: "* * * * *",
Task: NewTask("foo", nil),
},
b: &PeriodicTaskConfig{
Cronspec: "* * * * *",
Task: NewTask("bar", nil),
},
isSame: false,
},
{
desc: "with different options",
a: &PeriodicTaskConfig{
Cronspec: "* * * * *",
Task: NewTask("foo", nil),
Opts: []Option{Queue("myqueue")},
},
b: &PeriodicTaskConfig{
Cronspec: "* * * * *",
Task: NewTask("foo", nil),
Opts: []Option{Unique(10 * time.Minute)},
},
isSame: false,
},
{
desc: "with different options (one is subset of the other)",
a: &PeriodicTaskConfig{
Cronspec: "* * * * *",
Task: NewTask("foo", nil),
Opts: []Option{Queue("myqueue")},
},
b: &PeriodicTaskConfig{
Cronspec: "* * * * *",
Task: NewTask("foo", nil),
Opts: []Option{Queue("myqueue"), Unique(10 * time.Minute)},
},
isSame: false,
},
{
desc: "with different payload",
a: &PeriodicTaskConfig{
Cronspec: "* * * * *",
Task: NewTask("foo", []byte("hello!")),
Opts: []Option{Queue("myqueue")},
},
b: &PeriodicTaskConfig{
Cronspec: "* * * * *",
Task: NewTask("foo", []byte("HELLO!")),
Opts: []Option{Queue("myqueue"), Unique(10 * time.Minute)},
},
isSame: false,
},
}
for _, tc := range tests {
if tc.isSame && tc.a.hash() != tc.b.hash() {
t.Errorf("%s: a.hash=%s b.hash=%s expected to be equal",
tc.desc, tc.a.hash(), tc.b.hash())
}
if !tc.isSame && tc.a.hash() == tc.b.hash() {
t.Errorf("%s: a.hash=%s b.hash=%s expected to be not equal",
tc.desc, tc.a.hash(), tc.b.hash())
}
}
}
// Things to test.
// - Run the manager
// - Change provider to return new configs
// - Verify that the scheduler synced with the new config
func TestPeriodicTaskManager(t *testing.T) {
// Note: In this test, we'll use task type as an ID for each config.
cfgs := []*PeriodicTaskConfig{
{Task: NewTask("task1", nil), Cronspec: "* * * * 1"},
{Task: NewTask("task2", nil), Cronspec: "* * * * 2"},
}
const syncInterval = 3 * time.Second
provider := &FakeConfigProvider{cfgs: cfgs}
mgr, err := NewPeriodicTaskManager(PeriodicTaskManagerOpts{
RedisConnOpt: getRedisConnOpt(t),
PeriodicTaskConfigProvider: provider,
SyncInterval: syncInterval,
})
if err != nil {
t.Fatalf("Failed to initialize PeriodicTaskManager: %v", err)
}
if err := mgr.Start(); err != nil {
t.Fatalf("Failed to start PeriodicTaskManager: %v", err)
}
defer mgr.Shutdown()
got := extractCronEntries(mgr.s)
want := []*cronEntry{
{Cronspec: "* * * * 1", TaskType: "task1"},
{Cronspec: "* * * * 2", TaskType: "task2"},
}
if diff := cmp.Diff(want, got, sortCronEntry); diff != "" {
t.Errorf("Diff found in scheduler's registered entries: %s", diff)
}
// Change the underlying configs
// - task2 removed
// - task3 added
provider.SetConfigs([]*PeriodicTaskConfig{
{Task: NewTask("task1", nil), Cronspec: "* * * * 1"},
{Task: NewTask("task3", nil), Cronspec: "* * * * 3"},
})
// Wait for the next sync
time.Sleep(syncInterval * 2)
// Verify the entries are synced
got = extractCronEntries(mgr.s)
want = []*cronEntry{
{Cronspec: "* * * * 1", TaskType: "task1"},
{Cronspec: "* * * * 3", TaskType: "task3"},
}
if diff := cmp.Diff(want, got, sortCronEntry); diff != "" {
t.Errorf("Diff found in scheduler's registered entries: %s", diff)
}
// Change the underlying configs
// All configs removed, empty set.
provider.SetConfigs([]*PeriodicTaskConfig{})
// Wait for the next sync
time.Sleep(syncInterval * 2)
// Verify the entries are synced
got = extractCronEntries(mgr.s)
want = []*cronEntry{}
if diff := cmp.Diff(want, got, sortCronEntry); diff != "" {
t.Errorf("Diff found in scheduler's registered entries: %s", diff)
}
}
func extractCronEntries(s *Scheduler) []*cronEntry {
var out []*cronEntry
for _, e := range s.cron.Entries() {
job := e.Job.(*enqueueJob)
out = append(out, &cronEntry{Cronspec: job.cronspec, TaskType: job.task.Type()})
}
return out
}
var sortCronEntry = cmp.Transformer("sortCronEntry", func(in []*cronEntry) []*cronEntry {
out := append([]*cronEntry(nil), in...)
sort.Slice(out, func(i, j int) bool {
return out[i].TaskType < out[j].TaskType
})
return out
})
// A simple struct to allow for simpler comparison in test.
type cronEntry struct {
Cronspec string
TaskType string
}

View File

@@ -7,6 +7,7 @@ package asynq
import ( import (
"context" "context"
"fmt" "fmt"
"math"
"math/rand" "math/rand"
"runtime" "runtime"
"runtime/debug" "runtime/debug"
@@ -16,16 +17,20 @@ import (
"time" "time"
"github.com/hibiken/asynq/internal/base" "github.com/hibiken/asynq/internal/base"
asynqcontext "github.com/hibiken/asynq/internal/context"
"github.com/hibiken/asynq/internal/errors" "github.com/hibiken/asynq/internal/errors"
"github.com/hibiken/asynq/internal/log" "github.com/hibiken/asynq/internal/log"
"github.com/hibiken/asynq/internal/timeutil"
"golang.org/x/time/rate" "golang.org/x/time/rate"
) )
type processor struct { type processor struct {
logger *log.Logger logger *log.Logger
broker base.Broker broker base.Broker
clock timeutil.Clock
handler Handler handler Handler
baseCtxFn func() context.Context
queueConfig map[string]int queueConfig map[string]int
@@ -33,6 +38,7 @@ type processor struct {
orderedQueues []string orderedQueues []string
retryDelayFunc RetryDelayFunc retryDelayFunc RetryDelayFunc
isFailureFunc func(error) bool
errHandler ErrorHandler errHandler ErrorHandler
@@ -69,7 +75,9 @@ type processor struct {
type processorParams struct { type processorParams struct {
logger *log.Logger logger *log.Logger
broker base.Broker broker base.Broker
baseCtxFn func() context.Context
retryDelayFunc RetryDelayFunc retryDelayFunc RetryDelayFunc
isFailureFunc func(error) bool
syncCh chan<- *syncRequest syncCh chan<- *syncRequest
cancelations *base.Cancelations cancelations *base.Cancelations
concurrency int concurrency int
@@ -91,9 +99,12 @@ func newProcessor(params processorParams) *processor {
return &processor{ return &processor{
logger: params.logger, logger: params.logger,
broker: params.broker, broker: params.broker,
baseCtxFn: params.baseCtxFn,
clock: timeutil.NewRealClock(),
queueConfig: queues, queueConfig: queues,
orderedQueues: orderedQueues, orderedQueues: orderedQueues,
retryDelayFunc: params.retryDelayFunc, retryDelayFunc: params.retryDelayFunc,
isFailureFunc: params.isFailureFunc,
syncRequestCh: params.syncCh, syncRequestCh: params.syncCh,
cancelations: params.cancelations, cancelations: params.cancelations,
errLogLimiter: rate.NewLimiter(rate.Every(3*time.Second), 1), errLogLimiter: rate.NewLimiter(rate.Every(3*time.Second), 1),
@@ -160,7 +171,7 @@ func (p *processor) exec() {
return return
case p.sema <- struct{}{}: // acquire token case p.sema <- struct{}{}: // acquire token
qnames := p.queues() qnames := p.queues()
msg, deadline, err := p.broker.Dequeue(qnames...) msg, leaseExpirationTime, err := p.broker.Dequeue(qnames...)
switch { switch {
case errors.Is(err, errors.ErrNoProcessableTask): case errors.Is(err, errors.ErrNoProcessableTask):
p.logger.Debug("All queues are empty") p.logger.Debug("All queues are empty")
@@ -179,60 +190,77 @@ func (p *processor) exec() {
return return
} }
p.starting <- &workerInfo{msg, time.Now(), deadline} lease := base.NewLease(leaseExpirationTime)
deadline := p.computeDeadline(msg)
p.starting <- &workerInfo{msg, time.Now(), deadline, lease}
go func() { go func() {
defer func() { defer func() {
p.finished <- msg p.finished <- msg
<-p.sema // release token <-p.sema // release token
}() }()
ctx, cancel := createContext(msg, deadline) ctx, cancel := asynqcontext.New(p.baseCtxFn(), msg, deadline)
p.cancelations.Add(msg.ID.String(), cancel) p.cancelations.Add(msg.ID, cancel)
defer func() { defer func() {
cancel() cancel()
p.cancelations.Delete(msg.ID.String()) p.cancelations.Delete(msg.ID)
}() }()
// check context before starting a worker goroutine. // check context before starting a worker goroutine.
select { select {
case <-ctx.Done(): case <-ctx.Done():
// already canceled (e.g. deadline exceeded). // already canceled (e.g. deadline exceeded).
p.retryOrKill(ctx, msg, ctx.Err()) p.handleFailedMessage(ctx, lease, msg, ctx.Err())
return return
default: default:
} }
resCh := make(chan error, 1) resCh := make(chan error, 1)
go func() { go func() {
resCh <- p.perform(ctx, NewTask(msg.Type, msg.Payload)) task := newTask(
msg.Type,
msg.Payload,
&ResultWriter{
id: msg.ID,
qname: msg.Queue,
broker: p.broker,
ctx: ctx,
},
)
resCh <- p.perform(ctx, task)
}() }()
select { select {
case <-p.abort: case <-p.abort:
// time is up, push the message back to queue and quit this worker goroutine. // time is up, push the message back to queue and quit this worker goroutine.
p.logger.Warnf("Quitting worker. task id=%s", msg.ID) p.logger.Warnf("Quitting worker. task id=%s", msg.ID)
p.requeue(msg) p.requeue(lease, msg)
return
case <-lease.Done():
cancel()
p.handleFailedMessage(ctx, lease, msg, ErrLeaseExpired)
return return
case <-ctx.Done(): case <-ctx.Done():
p.retryOrKill(ctx, msg, ctx.Err()) p.handleFailedMessage(ctx, lease, msg, ctx.Err())
return return
case resErr := <-resCh: case resErr := <-resCh:
// Note: One of three things should happen.
// 1) Done -> Removes the message from Active
// 2) Retry -> Removes the message from Active & Adds the message to Retry
// 3) Archive -> Removes the message from Active & Adds the message to archive
if resErr != nil { if resErr != nil {
p.retryOrKill(ctx, msg, resErr) p.handleFailedMessage(ctx, lease, msg, resErr)
return return
} }
p.markAsDone(ctx, msg) p.handleSucceededMessage(lease, msg)
} }
}() }()
} }
} }
func (p *processor) requeue(msg *base.TaskMessage) { func (p *processor) requeue(l *base.Lease, msg *base.TaskMessage) {
err := p.broker.Requeue(msg) if !l.IsValid() {
// If lease is not valid, do not write to redis; Let recoverer take care of it.
return
}
ctx, _ := context.WithDeadline(context.Background(), l.Deadline())
err := p.broker.Requeue(ctx, msg)
if err != nil { if err != nil {
p.logger.Errorf("Could not push task id=%s back to queue: %v", msg.ID, err) p.logger.Errorf("Could not push task id=%s back to queue: %v", msg.ID, err)
} else { } else {
@@ -240,21 +268,51 @@ func (p *processor) requeue(msg *base.TaskMessage) {
} }
} }
func (p *processor) markAsDone(ctx context.Context, msg *base.TaskMessage) { func (p *processor) handleSucceededMessage(l *base.Lease, msg *base.TaskMessage) {
err := p.broker.Done(msg) if msg.Retention > 0 {
p.markAsComplete(l, msg)
} else {
p.markAsDone(l, msg)
}
}
func (p *processor) markAsComplete(l *base.Lease, msg *base.TaskMessage) {
if !l.IsValid() {
// If lease is not valid, do not write to redis; Let recoverer take care of it.
return
}
ctx, _ := context.WithDeadline(context.Background(), l.Deadline())
err := p.broker.MarkAsComplete(ctx, msg)
if err != nil { if err != nil {
errMsg := fmt.Sprintf("Could not remove task id=%s type=%q from %q err: %+v", msg.ID, msg.Type, base.ActiveKey(msg.Queue), err) errMsg := fmt.Sprintf("Could not move task id=%s type=%q from %q to %q: %+v",
deadline, ok := ctx.Deadline() msg.ID, msg.Type, base.ActiveKey(msg.Queue), base.CompletedKey(msg.Queue), err)
if !ok {
panic("asynq: internal error: missing deadline in context")
}
p.logger.Warnf("%s; Will retry syncing", errMsg) p.logger.Warnf("%s; Will retry syncing", errMsg)
p.syncRequestCh <- &syncRequest{ p.syncRequestCh <- &syncRequest{
fn: func() error { fn: func() error {
return p.broker.Done(msg) return p.broker.MarkAsComplete(ctx, msg)
}, },
errMsg: errMsg, errMsg: errMsg,
deadline: deadline, deadline: l.Deadline(),
}
}
}
func (p *processor) markAsDone(l *base.Lease, msg *base.TaskMessage) {
if !l.IsValid() {
// If lease is not valid, do not write to redis; Let recoverer take care of it.
return
}
ctx, _ := context.WithDeadline(context.Background(), l.Deadline())
err := p.broker.Done(ctx, msg)
if err != nil {
errMsg := fmt.Sprintf("Could not remove task id=%s type=%q from %q err: %+v", msg.ID, msg.Type, base.ActiveKey(msg.Queue), err)
p.logger.Warnf("%s; Will retry syncing", errMsg)
p.syncRequestCh <- &syncRequest{
fn: func() error {
return p.broker.Done(ctx, msg)
},
errMsg: errMsg,
deadline: l.Deadline(),
} }
} }
} }
@@ -263,54 +321,61 @@ func (p *processor) markAsDone(ctx context.Context, msg *base.TaskMessage) {
// the task should not be retried and should be archived instead. // the task should not be retried and should be archived instead.
var SkipRetry = errors.New("skip retry for the task") var SkipRetry = errors.New("skip retry for the task")
func (p *processor) retryOrKill(ctx context.Context, msg *base.TaskMessage, err error) { func (p *processor) handleFailedMessage(ctx context.Context, l *base.Lease, msg *base.TaskMessage, err error) {
if p.errHandler != nil { if p.errHandler != nil {
p.errHandler.HandleError(ctx, NewTask(msg.Type, msg.Payload), err) p.errHandler.HandleError(ctx, NewTask(msg.Type, msg.Payload), err)
} }
if !p.isFailureFunc(err) {
// retry the task without marking it as failed
p.retry(l, msg, err, false /*isFailure*/)
return
}
if msg.Retried >= msg.Retry || errors.Is(err, SkipRetry) { if msg.Retried >= msg.Retry || errors.Is(err, SkipRetry) {
p.logger.Warnf("Retry exhausted for task id=%s", msg.ID) p.logger.Warnf("Retry exhausted for task id=%s", msg.ID)
p.archive(ctx, msg, err) p.archive(l, msg, err)
} else { } else {
p.retry(ctx, msg, err) p.retry(l, msg, err, true /*isFailure*/)
} }
} }
func (p *processor) retry(ctx context.Context, msg *base.TaskMessage, e error) { func (p *processor) retry(l *base.Lease, msg *base.TaskMessage, e error, isFailure bool) {
if !l.IsValid() {
// If lease is not valid, do not write to redis; Let recoverer take care of it.
return
}
ctx, _ := context.WithDeadline(context.Background(), l.Deadline())
d := p.retryDelayFunc(msg.Retried, e, NewTask(msg.Type, msg.Payload)) d := p.retryDelayFunc(msg.Retried, e, NewTask(msg.Type, msg.Payload))
retryAt := time.Now().Add(d) retryAt := time.Now().Add(d)
err := p.broker.Retry(msg, retryAt, e.Error()) err := p.broker.Retry(ctx, msg, retryAt, e.Error(), isFailure)
if err != nil { if err != nil {
errMsg := fmt.Sprintf("Could not move task id=%s from %q to %q", msg.ID, base.ActiveKey(msg.Queue), base.RetryKey(msg.Queue)) errMsg := fmt.Sprintf("Could not move task id=%s from %q to %q", msg.ID, base.ActiveKey(msg.Queue), base.RetryKey(msg.Queue))
deadline, ok := ctx.Deadline()
if !ok {
panic("asynq: internal error: missing deadline in context")
}
p.logger.Warnf("%s; Will retry syncing", errMsg) p.logger.Warnf("%s; Will retry syncing", errMsg)
p.syncRequestCh <- &syncRequest{ p.syncRequestCh <- &syncRequest{
fn: func() error { fn: func() error {
return p.broker.Retry(msg, retryAt, e.Error()) return p.broker.Retry(ctx, msg, retryAt, e.Error(), isFailure)
}, },
errMsg: errMsg, errMsg: errMsg,
deadline: deadline, deadline: l.Deadline(),
} }
} }
} }
func (p *processor) archive(ctx context.Context, msg *base.TaskMessage, e error) { func (p *processor) archive(l *base.Lease, msg *base.TaskMessage, e error) {
err := p.broker.Archive(msg, e.Error()) if !l.IsValid() {
// If lease is not valid, do not write to redis; Let recoverer take care of it.
return
}
ctx, _ := context.WithDeadline(context.Background(), l.Deadline())
err := p.broker.Archive(ctx, msg, e.Error())
if err != nil { if err != nil {
errMsg := fmt.Sprintf("Could not move task id=%s from %q to %q", msg.ID, base.ActiveKey(msg.Queue), base.ArchivedKey(msg.Queue)) errMsg := fmt.Sprintf("Could not move task id=%s from %q to %q", msg.ID, base.ActiveKey(msg.Queue), base.ArchivedKey(msg.Queue))
deadline, ok := ctx.Deadline()
if !ok {
panic("asynq: internal error: missing deadline in context")
}
p.logger.Warnf("%s; Will retry syncing", errMsg) p.logger.Warnf("%s; Will retry syncing", errMsg)
p.syncRequestCh <- &syncRequest{ p.syncRequestCh <- &syncRequest{
fn: func() error { fn: func() error {
return p.broker.Archive(msg, e.Error()) return p.broker.Archive(ctx, msg, e.Error())
}, },
errMsg: errMsg, errMsg: errMsg,
deadline: deadline, deadline: l.Deadline(),
} }
} }
} }
@@ -440,3 +505,19 @@ func gcd(xs ...int) int {
} }
return res return res
} }
// computeDeadline returns the given task's deadline,
func (p *processor) computeDeadline(msg *base.TaskMessage) time.Time {
if msg.Timeout == 0 && msg.Deadline == 0 {
p.logger.Errorf("asynq: internal error: both timeout and deadline are not set for the task message: %s", msg.ID)
return p.clock.Now().Add(defaultTimeout)
}
if msg.Timeout != 0 && msg.Deadline != 0 {
deadlineUnix := math.Min(float64(p.clock.Now().Unix()+msg.Timeout), float64(msg.Deadline))
return time.Unix(int64(deadlineUnix), 0)
}
if msg.Timeout != 0 {
return p.clock.Now().Add(time.Duration(msg.Timeout) * time.Second)
}
return time.Unix(msg.Deadline, 0)
}

View File

@@ -14,11 +14,21 @@ import (
"time" "time"
"github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp"
h "github.com/hibiken/asynq/internal/asynqtest" "github.com/google/go-cmp/cmp/cmpopts"
"github.com/hibiken/asynq/internal/base" "github.com/hibiken/asynq/internal/base"
"github.com/hibiken/asynq/internal/errors"
"github.com/hibiken/asynq/internal/log"
"github.com/hibiken/asynq/internal/rdb" "github.com/hibiken/asynq/internal/rdb"
h "github.com/hibiken/asynq/internal/testutil"
"github.com/hibiken/asynq/internal/timeutil"
) )
var taskCmpOpts = []cmp.Option{
sortTaskOpt, // sort the tasks
cmp.AllowUnexported(Task{}), // allow typename, payload fields to be compared
cmpopts.IgnoreFields(Task{}, "opts", "w"), // ignore opts, w fields
}
// fakeHeartbeater receives from starting and finished channels and do nothing. // fakeHeartbeater receives from starting and finished channels and do nothing.
func fakeHeartbeater(starting <-chan *workerInfo, finished <-chan *base.TaskMessage, done <-chan struct{}) { func fakeHeartbeater(starting <-chan *workerInfo, finished <-chan *base.TaskMessage, done <-chan struct{}) {
for { for {
@@ -42,6 +52,35 @@ func fakeSyncer(syncCh <-chan *syncRequest, done <-chan struct{}) {
} }
} }
// Returns a processor instance configured for testing purpose.
func newProcessorForTest(t *testing.T, r *rdb.RDB, h Handler) *processor {
starting := make(chan *workerInfo)
finished := make(chan *base.TaskMessage)
syncCh := make(chan *syncRequest)
done := make(chan struct{})
t.Cleanup(func() { close(done) })
go fakeHeartbeater(starting, finished, done)
go fakeSyncer(syncCh, done)
p := newProcessor(processorParams{
logger: testLogger,
broker: r,
baseCtxFn: context.Background,
retryDelayFunc: DefaultRetryDelayFunc,
isFailureFunc: defaultIsFailureFunc,
syncCh: syncCh,
cancelations: base.NewCancelations(),
concurrency: 10,
queues: defaultQueueConfig,
strictPriority: false,
errHandler: nil,
shutdownTimeout: defaultShutdownTimeout,
starting: starting,
finished: finished,
})
p.handler = h
return p
}
func TestProcessorSuccessWithSingleQueue(t *testing.T) { func TestProcessorSuccessWithSingleQueue(t *testing.T) {
r := setup(t) r := setup(t)
defer r.Close() defer r.Close()
@@ -87,45 +126,24 @@ func TestProcessorSuccessWithSingleQueue(t *testing.T) {
processed = append(processed, task) processed = append(processed, task)
return nil return nil
} }
starting := make(chan *workerInfo) p := newProcessorForTest(t, rdbClient, HandlerFunc(handler))
finished := make(chan *base.TaskMessage)
syncCh := make(chan *syncRequest)
done := make(chan struct{})
defer func() { close(done) }()
go fakeHeartbeater(starting, finished, done)
go fakeSyncer(syncCh, done)
p := newProcessor(processorParams{
logger: testLogger,
broker: rdbClient,
retryDelayFunc: DefaultRetryDelayFunc,
syncCh: syncCh,
cancelations: base.NewCancelations(),
concurrency: 10,
queues: defaultQueueConfig,
strictPriority: false,
errHandler: nil,
shutdownTimeout: defaultShutdownTimeout,
starting: starting,
finished: finished,
})
p.handler = HandlerFunc(handler)
p.start(&sync.WaitGroup{}) p.start(&sync.WaitGroup{})
for _, msg := range tc.incoming { for _, msg := range tc.incoming {
err := rdbClient.Enqueue(msg) err := rdbClient.Enqueue(context.Background(), msg)
if err != nil { if err != nil {
p.shutdown() p.shutdown()
t.Fatal(err) t.Fatal(err)
} }
} }
time.Sleep(2 * time.Second) // wait for two second to allow all pending tasks to be processed. time.Sleep(2 * time.Second) // wait for two second to allow all pending tasks to be processed.
if l := r.LLen(base.ActiveKey(base.DefaultQueueName)).Val(); l != 0 { if l := r.LLen(context.Background(), base.ActiveKey(base.DefaultQueueName)).Val(); l != 0 {
t.Errorf("%q has %d tasks, want 0", base.ActiveKey(base.DefaultQueueName), l) t.Errorf("%q has %d tasks, want 0", base.ActiveKey(base.DefaultQueueName), l)
} }
p.shutdown() p.shutdown()
mu.Lock() mu.Lock()
if diff := cmp.Diff(tc.wantProcessed, processed, sortTaskOpt, cmp.AllowUnexported(Task{})); diff != "" { if diff := cmp.Diff(tc.wantProcessed, processed, taskCmpOpts...); diff != "" {
t.Errorf("mismatch found in processed tasks; (-want, +got)\n%s", diff) t.Errorf("mismatch found in processed tasks; (-want, +got)\n%s", diff)
} }
mu.Unlock() mu.Unlock()
@@ -179,46 +197,26 @@ func TestProcessorSuccessWithMultipleQueues(t *testing.T) {
processed = append(processed, task) processed = append(processed, task)
return nil return nil
} }
starting := make(chan *workerInfo) p := newProcessorForTest(t, rdbClient, HandlerFunc(handler))
finished := make(chan *base.TaskMessage) p.queueConfig = map[string]int{
syncCh := make(chan *syncRequest) "default": 2,
done := make(chan struct{}) "high": 3,
defer func() { close(done) }() "low": 1,
go fakeHeartbeater(starting, finished, done) }
go fakeSyncer(syncCh, done)
p := newProcessor(processorParams{
logger: testLogger,
broker: rdbClient,
retryDelayFunc: DefaultRetryDelayFunc,
syncCh: syncCh,
cancelations: base.NewCancelations(),
concurrency: 10,
queues: map[string]int{
"default": 2,
"high": 3,
"low": 1,
},
strictPriority: false,
errHandler: nil,
shutdownTimeout: defaultShutdownTimeout,
starting: starting,
finished: finished,
})
p.handler = HandlerFunc(handler)
p.start(&sync.WaitGroup{}) p.start(&sync.WaitGroup{})
// Wait for two second to allow all pending tasks to be processed. // Wait for two second to allow all pending tasks to be processed.
time.Sleep(2 * time.Second) time.Sleep(2 * time.Second)
// Make sure no messages are stuck in active list. // Make sure no messages are stuck in active list.
for _, qname := range tc.queues { for _, qname := range tc.queues {
if l := r.LLen(base.ActiveKey(qname)).Val(); l != 0 { if l := r.LLen(context.Background(), base.ActiveKey(qname)).Val(); l != 0 {
t.Errorf("%q has %d tasks, want 0", base.ActiveKey(qname), l) t.Errorf("%q has %d tasks, want 0", base.ActiveKey(qname), l)
} }
} }
p.shutdown() p.shutdown()
mu.Lock() mu.Lock()
if diff := cmp.Diff(tc.wantProcessed, processed, sortTaskOpt, cmp.AllowUnexported(Task{})); diff != "" { if diff := cmp.Diff(tc.wantProcessed, processed, taskCmpOpts...); diff != "" {
t.Errorf("mismatch found in processed tasks; (-want, +got)\n%s", diff) t.Errorf("mismatch found in processed tasks; (-want, +got)\n%s", diff)
} }
mu.Unlock() mu.Unlock()
@@ -265,38 +263,17 @@ func TestProcessTasksWithLargeNumberInPayload(t *testing.T) {
processed = append(processed, task) processed = append(processed, task)
return nil return nil
} }
starting := make(chan *workerInfo) p := newProcessorForTest(t, rdbClient, HandlerFunc(handler))
finished := make(chan *base.TaskMessage)
syncCh := make(chan *syncRequest)
done := make(chan struct{})
defer func() { close(done) }()
go fakeHeartbeater(starting, finished, done)
go fakeSyncer(syncCh, done)
p := newProcessor(processorParams{
logger: testLogger,
broker: rdbClient,
retryDelayFunc: DefaultRetryDelayFunc,
syncCh: syncCh,
cancelations: base.NewCancelations(),
concurrency: 10,
queues: defaultQueueConfig,
strictPriority: false,
errHandler: nil,
shutdownTimeout: defaultShutdownTimeout,
starting: starting,
finished: finished,
})
p.handler = HandlerFunc(handler)
p.start(&sync.WaitGroup{}) p.start(&sync.WaitGroup{})
time.Sleep(2 * time.Second) // wait for two second to allow all pending tasks to be processed. time.Sleep(2 * time.Second) // wait for two second to allow all pending tasks to be processed.
if l := r.LLen(base.ActiveKey(base.DefaultQueueName)).Val(); l != 0 { if l := r.LLen(context.Background(), base.ActiveKey(base.DefaultQueueName)).Val(); l != 0 {
t.Errorf("%q has %d tasks, want 0", base.ActiveKey(base.DefaultQueueName), l) t.Errorf("%q has %d tasks, want 0", base.ActiveKey(base.DefaultQueueName), l)
} }
p.shutdown() p.shutdown()
mu.Lock() mu.Lock()
if diff := cmp.Diff(tc.wantProcessed, processed, sortTaskOpt, cmp.AllowUnexported(Task{})); diff != "" { if diff := cmp.Diff(tc.wantProcessed, processed, taskCmpOpts...); diff != "" {
t.Errorf("mismatch found in processed tasks; (-want, +got)\n%s", diff) t.Errorf("mismatch found in processed tasks; (-want, +got)\n%s", diff)
} }
mu.Unlock() mu.Unlock()
@@ -386,26 +363,9 @@ func TestProcessorRetry(t *testing.T) {
defer mu.Unlock() defer mu.Unlock()
n++ n++
} }
starting := make(chan *workerInfo) p := newProcessorForTest(t, rdbClient, tc.handler)
finished := make(chan *base.TaskMessage) p.errHandler = ErrorHandlerFunc(errHandler)
done := make(chan struct{}) p.retryDelayFunc = delayFunc
defer func() { close(done) }()
go fakeHeartbeater(starting, finished, done)
p := newProcessor(processorParams{
logger: testLogger,
broker: rdbClient,
retryDelayFunc: delayFunc,
syncCh: nil,
cancelations: base.NewCancelations(),
concurrency: 10,
queues: defaultQueueConfig,
strictPriority: false,
errHandler: ErrorHandlerFunc(errHandler),
shutdownTimeout: defaultShutdownTimeout,
starting: starting,
finished: finished,
})
p.handler = tc.handler
p.start(&sync.WaitGroup{}) p.start(&sync.WaitGroup{})
runTime := time.Now() // time when processor is running runTime := time.Now() // time when processor is running
@@ -439,7 +399,7 @@ func TestProcessorRetry(t *testing.T) {
t.Errorf("%s: mismatch found in %q after running processor; (-want, +got)\n%s", tc.desc, base.ArchivedKey(base.DefaultQueueName), diff) t.Errorf("%s: mismatch found in %q after running processor; (-want, +got)\n%s", tc.desc, base.ArchivedKey(base.DefaultQueueName), diff)
} }
if l := r.LLen(base.ActiveKey(base.DefaultQueueName)).Val(); l != 0 { if l := r.LLen(context.Background(), base.ActiveKey(base.DefaultQueueName)).Val(); l != 0 {
t.Errorf("%s: %q has %d tasks, want 0", base.ActiveKey(base.DefaultQueueName), tc.desc, l) t.Errorf("%s: %q has %d tasks, want 0", base.ActiveKey(base.DefaultQueueName), tc.desc, l)
} }
@@ -449,6 +409,179 @@ func TestProcessorRetry(t *testing.T) {
} }
} }
func TestProcessorMarkAsComplete(t *testing.T) {
r := setup(t)
defer r.Close()
rdbClient := rdb.NewRDB(r)
msg1 := h.NewTaskMessage("one", nil)
msg2 := h.NewTaskMessage("two", nil)
msg3 := h.NewTaskMessageWithQueue("three", nil, "custom")
msg1.Retention = 3600
msg3.Retention = 7200
handler := func(ctx context.Context, task *Task) error { return nil }
tests := []struct {
pending map[string][]*base.TaskMessage
completed map[string][]base.Z
queueCfg map[string]int
wantPending map[string][]*base.TaskMessage
wantCompleted func(completedAt time.Time) map[string][]base.Z
}{
{
pending: map[string][]*base.TaskMessage{
"default": {msg1, msg2},
"custom": {msg3},
},
completed: map[string][]base.Z{
"default": {},
"custom": {},
},
queueCfg: map[string]int{
"default": 1,
"custom": 1,
},
wantPending: map[string][]*base.TaskMessage{
"default": {},
"custom": {},
},
wantCompleted: func(completedAt time.Time) map[string][]base.Z {
return map[string][]base.Z{
"default": {{Message: h.TaskMessageWithCompletedAt(*msg1, completedAt), Score: completedAt.Unix() + msg1.Retention}},
"custom": {{Message: h.TaskMessageWithCompletedAt(*msg3, completedAt), Score: completedAt.Unix() + msg3.Retention}},
}
},
},
}
for _, tc := range tests {
h.FlushDB(t, r)
h.SeedAllPendingQueues(t, r, tc.pending)
h.SeedAllCompletedQueues(t, r, tc.completed)
p := newProcessorForTest(t, rdbClient, HandlerFunc(handler))
p.queueConfig = tc.queueCfg
p.start(&sync.WaitGroup{})
runTime := time.Now() // time when processor is running
time.Sleep(2 * time.Second)
p.shutdown()
for qname, want := range tc.wantPending {
gotPending := h.GetPendingMessages(t, r, qname)
if diff := cmp.Diff(want, gotPending, cmpopts.EquateEmpty()); diff != "" {
t.Errorf("diff found in %q pending set; want=%v, got=%v\n%s", qname, want, gotPending, diff)
}
}
for qname, want := range tc.wantCompleted(runTime) {
gotCompleted := h.GetCompletedEntries(t, r, qname)
if diff := cmp.Diff(want, gotCompleted, cmpopts.EquateEmpty()); diff != "" {
t.Errorf("diff found in %q completed set; want=%v, got=%v\n%s", qname, want, gotCompleted, diff)
}
}
}
}
// Test a scenario where the worker server cannot communicate with redis due to a network failure
// and the lease expires
func TestProcessorWithExpiredLease(t *testing.T) {
r := setup(t)
defer r.Close()
rdbClient := rdb.NewRDB(r)
m1 := h.NewTaskMessage("task1", nil)
tests := []struct {
pending []*base.TaskMessage
handler Handler
wantErrCount int
}{
{
pending: []*base.TaskMessage{m1},
handler: HandlerFunc(func(ctx context.Context, task *Task) error {
// make sure the task processing time exceeds lease duration
// to test expired lease.
time.Sleep(rdb.LeaseDuration + 10*time.Second)
return nil
}),
wantErrCount: 1, // ErrorHandler should still be called with ErrLeaseExpired
},
}
for _, tc := range tests {
h.FlushDB(t, r)
h.SeedPendingQueue(t, r, tc.pending, base.DefaultQueueName)
starting := make(chan *workerInfo)
finished := make(chan *base.TaskMessage)
syncCh := make(chan *syncRequest)
done := make(chan struct{})
t.Cleanup(func() { close(done) })
// fake heartbeater which notifies lease expiration
go func() {
for {
select {
case w := <-starting:
// simulate expiration by resetting to some time in the past
w.lease.Reset(time.Now().Add(-5 * time.Second))
if !w.lease.NotifyExpiration() {
panic("Failed to notifiy lease expiration")
}
case <-finished:
// do nothing
case <-done:
return
}
}
}()
go fakeSyncer(syncCh, done)
p := newProcessor(processorParams{
logger: testLogger,
broker: rdbClient,
baseCtxFn: context.Background,
retryDelayFunc: DefaultRetryDelayFunc,
isFailureFunc: defaultIsFailureFunc,
syncCh: syncCh,
cancelations: base.NewCancelations(),
concurrency: 10,
queues: defaultQueueConfig,
strictPriority: false,
errHandler: nil,
shutdownTimeout: defaultShutdownTimeout,
starting: starting,
finished: finished,
})
p.handler = tc.handler
var (
mu sync.Mutex // guards n and errs
n int // number of times error handler is called
errs []error // error passed to error handler
)
p.errHandler = ErrorHandlerFunc(func(ctx context.Context, t *Task, err error) {
mu.Lock()
defer mu.Unlock()
n++
errs = append(errs, err)
})
p.start(&sync.WaitGroup{})
time.Sleep(4 * time.Second)
p.shutdown()
if n != tc.wantErrCount {
t.Errorf("Unexpected number of error count: got %d, want %d", n, tc.wantErrCount)
continue
}
for i := 0; i < tc.wantErrCount; i++ {
if !errors.Is(errs[i], ErrLeaseExpired) {
t.Errorf("Unexpected error was passed to ErrorHandler: got %v want %v", errs[i], ErrLeaseExpired)
}
}
}
}
func TestProcessorQueues(t *testing.T) { func TestProcessorQueues(t *testing.T) {
sortOpt := cmp.Transformer("SortStrings", func(in []string) []string { sortOpt := cmp.Transformer("SortStrings", func(in []string) []string {
out := append([]string(nil), in...) // Copy input to avoid mutating it out := append([]string(nil), in...) // Copy input to avoid mutating it
@@ -477,25 +610,10 @@ func TestProcessorQueues(t *testing.T) {
} }
for _, tc := range tests { for _, tc := range tests {
starting := make(chan *workerInfo) // Note: rdb and handler not needed for this test.
finished := make(chan *base.TaskMessage) p := newProcessorForTest(t, nil, nil)
done := make(chan struct{}) p.queueConfig = tc.queueCfg
defer func() { close(done) }()
go fakeHeartbeater(starting, finished, done)
p := newProcessor(processorParams{
logger: testLogger,
broker: nil,
retryDelayFunc: DefaultRetryDelayFunc,
syncCh: nil,
cancelations: base.NewCancelations(),
concurrency: 10,
queues: tc.queueCfg,
strictPriority: false,
errHandler: nil,
shutdownTimeout: defaultShutdownTimeout,
starting: starting,
finished: finished,
})
got := p.queues() got := p.queues()
if diff := cmp.Diff(tc.want, got, sortOpt); diff != "" { if diff := cmp.Diff(tc.want, got, sortOpt); diff != "" {
t.Errorf("with queue config: %v\n(*processor).queues() = %v, want %v\n(-want,+got):\n%s", t.Errorf("with queue config: %v\n(*processor).queues() = %v, want %v\n(-want,+got):\n%s",
@@ -576,7 +694,9 @@ func TestProcessorWithStrictPriority(t *testing.T) {
p := newProcessor(processorParams{ p := newProcessor(processorParams{
logger: testLogger, logger: testLogger,
broker: rdbClient, broker: rdbClient,
baseCtxFn: context.Background,
retryDelayFunc: DefaultRetryDelayFunc, retryDelayFunc: DefaultRetryDelayFunc,
isFailureFunc: defaultIsFailureFunc,
syncCh: syncCh, syncCh: syncCh,
cancelations: base.NewCancelations(), cancelations: base.NewCancelations(),
concurrency: 1, // Set concurrency to 1 to make sure tasks are processed one at a time. concurrency: 1, // Set concurrency to 1 to make sure tasks are processed one at a time.
@@ -593,13 +713,13 @@ func TestProcessorWithStrictPriority(t *testing.T) {
time.Sleep(tc.wait) time.Sleep(tc.wait)
// Make sure no tasks are stuck in active list. // Make sure no tasks are stuck in active list.
for _, qname := range tc.queues { for _, qname := range tc.queues {
if l := r.LLen(base.ActiveKey(qname)).Val(); l != 0 { if l := r.LLen(context.Background(), base.ActiveKey(qname)).Val(); l != 0 {
t.Errorf("%q has %d tasks, want 0", base.ActiveKey(qname), l) t.Errorf("%q has %d tasks, want 0", base.ActiveKey(qname), l)
} }
} }
p.shutdown() p.shutdown()
if diff := cmp.Diff(tc.wantProcessed, processed, sortTaskOpt, cmp.AllowUnexported(Task{})); diff != "" { if diff := cmp.Diff(tc.wantProcessed, processed, taskCmpOpts...); diff != "" {
t.Errorf("mismatch found in processed tasks; (-want, +got)\n%s", diff) t.Errorf("mismatch found in processed tasks; (-want, +got)\n%s", diff)
} }
@@ -638,12 +758,9 @@ func TestProcessorPerform(t *testing.T) {
wantErr: true, wantErr: true,
}, },
} }
// Note: We don't need to fully initialize the processor since we are only testing // Note: We don't need to fully initialized the processor since we are only testing
// perform method. // perform method.
p := newProcessor(processorParams{ p := newProcessorForTest(t, nil, nil)
logger: testLogger,
queues: defaultQueueConfig,
})
for _, tc := range tests { for _, tc := range tests {
p.handler = tc.handler p.handler = tc.handler
@@ -738,3 +855,69 @@ func TestNormalizeQueues(t *testing.T) {
} }
} }
} }
func TestProcessorComputeDeadline(t *testing.T) {
now := time.Now()
p := processor{
logger: log.NewLogger(nil),
clock: timeutil.NewSimulatedClock(now),
}
tests := []struct {
desc string
msg *base.TaskMessage
want time.Time
}{
{
desc: "message with only timeout specified",
msg: &base.TaskMessage{
Timeout: int64((30 * time.Minute).Seconds()),
},
want: now.Add(30 * time.Minute),
},
{
desc: "message with only deadline specified",
msg: &base.TaskMessage{
Deadline: now.Add(24 * time.Hour).Unix(),
},
want: now.Add(24 * time.Hour),
},
{
desc: "message with both timeout and deadline set (now+timeout < deadline)",
msg: &base.TaskMessage{
Deadline: now.Add(24 * time.Hour).Unix(),
Timeout: int64((30 * time.Minute).Seconds()),
},
want: now.Add(30 * time.Minute),
},
{
desc: "message with both timeout and deadline set (now+timeout > deadline)",
msg: &base.TaskMessage{
Deadline: now.Add(10 * time.Minute).Unix(),
Timeout: int64((30 * time.Minute).Seconds()),
},
want: now.Add(10 * time.Minute),
},
{
desc: "message with both timeout and deadline set (now+timeout == deadline)",
msg: &base.TaskMessage{
Deadline: now.Add(30 * time.Minute).Unix(),
Timeout: int64((30 * time.Minute).Seconds()),
},
want: now.Add(30 * time.Minute),
},
{
desc: "message without timeout and deadline",
msg: &base.TaskMessage{},
want: now.Add(defaultTimeout),
},
}
for _, tc := range tests {
got := p.computeDeadline(tc.msg)
// Compare the Unix epoch with seconds granularity
if got.Unix() != tc.want.Unix() {
t.Errorf("%s: got=%v, want=%v", tc.desc, got.Unix(), tc.want.Unix())
}
}
}

View File

@@ -5,11 +5,12 @@
package asynq package asynq
import ( import (
"fmt" "context"
"sync" "sync"
"time" "time"
"github.com/hibiken/asynq/internal/base" "github.com/hibiken/asynq/internal/base"
"github.com/hibiken/asynq/internal/errors"
"github.com/hibiken/asynq/internal/log" "github.com/hibiken/asynq/internal/log"
) )
@@ -17,6 +18,7 @@ type recoverer struct {
logger *log.Logger logger *log.Logger
broker base.Broker broker base.Broker
retryDelayFunc RetryDelayFunc retryDelayFunc RetryDelayFunc
isFailureFunc func(error) bool
// channel to communicate back to the long running "recoverer" goroutine. // channel to communicate back to the long running "recoverer" goroutine.
done chan struct{} done chan struct{}
@@ -34,6 +36,7 @@ type recovererParams struct {
queues []string queues []string
interval time.Duration interval time.Duration
retryDelayFunc RetryDelayFunc retryDelayFunc RetryDelayFunc
isFailureFunc func(error) bool
} }
func newRecoverer(params recovererParams) *recoverer { func newRecoverer(params recovererParams) *recoverer {
@@ -44,6 +47,7 @@ func newRecoverer(params recovererParams) *recoverer {
queues: params.queues, queues: params.queues,
interval: params.interval, interval: params.interval,
retryDelayFunc: params.retryDelayFunc, retryDelayFunc: params.retryDelayFunc,
isFailureFunc: params.isFailureFunc,
} }
} }
@@ -73,34 +77,50 @@ func (r *recoverer) start(wg *sync.WaitGroup) {
}() }()
} }
// ErrLeaseExpired error indicates that the task failed because the worker working on the task
// could not extend its lease due to missing heartbeats. The worker may have crashed or got cutoff from the network.
var ErrLeaseExpired = errors.New("asynq: task lease expired")
func (r *recoverer) recover() { func (r *recoverer) recover() {
// Get all tasks which have expired 30 seconds ago or earlier. r.recoverLeaseExpiredTasks()
deadline := time.Now().Add(-30 * time.Second) r.recoverStaleAggregationSets()
msgs, err := r.broker.ListDeadlineExceeded(deadline, r.queues...) }
func (r *recoverer) recoverLeaseExpiredTasks() {
// Get all tasks which have expired 30 seconds ago or earlier to accomodate certain amount of clock skew.
cutoff := time.Now().Add(-30 * time.Second)
msgs, err := r.broker.ListLeaseExpired(cutoff, r.queues...)
if err != nil { if err != nil {
r.logger.Warn("recoverer: could not list deadline exceeded tasks") r.logger.Warnf("recoverer: could not list lease expired tasks: %v", err)
return return
} }
const errMsg = "deadline exceeded"
for _, msg := range msgs { for _, msg := range msgs {
if msg.Retried >= msg.Retry { if msg.Retried >= msg.Retry {
r.archive(msg, errMsg) r.archive(msg, ErrLeaseExpired)
} else { } else {
r.retry(msg, errMsg) r.retry(msg, ErrLeaseExpired)
} }
} }
} }
func (r *recoverer) retry(msg *base.TaskMessage, errMsg string) { func (r *recoverer) recoverStaleAggregationSets() {
delay := r.retryDelayFunc(msg.Retried, fmt.Errorf(errMsg), NewTask(msg.Type, msg.Payload)) for _, qname := range r.queues {
retryAt := time.Now().Add(delay) if err := r.broker.ReclaimStaleAggregationSets(qname); err != nil {
if err := r.broker.Retry(msg, retryAt, errMsg); err != nil { r.logger.Warnf("recoverer: could not reclaim stale aggregation sets in queue %q: %v", qname, err)
r.logger.Warnf("recoverer: could not retry deadline exceeded task: %v", err) }
} }
} }
func (r *recoverer) archive(msg *base.TaskMessage, errMsg string) { func (r *recoverer) retry(msg *base.TaskMessage, err error) {
if err := r.broker.Archive(msg, errMsg); err != nil { delay := r.retryDelayFunc(msg.Retried, err, NewTask(msg.Type, msg.Payload))
retryAt := time.Now().Add(delay)
if err := r.broker.Retry(context.Background(), msg, retryAt, err.Error(), r.isFailureFunc(err)); err != nil {
r.logger.Warnf("recoverer: could not retry lease expired task: %v", err)
}
}
func (r *recoverer) archive(msg *base.TaskMessage, err error) {
if err := r.broker.Archive(context.Background(), msg, err.Error()); err != nil {
r.logger.Warnf("recoverer: could not move task to archive: %v", err) r.logger.Warnf("recoverer: could not move task to archive: %v", err)
} }
} }

View File

@@ -10,9 +10,9 @@ import (
"time" "time"
"github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp"
h "github.com/hibiken/asynq/internal/asynqtest"
"github.com/hibiken/asynq/internal/base" "github.com/hibiken/asynq/internal/base"
"github.com/hibiken/asynq/internal/rdb" "github.com/hibiken/asynq/internal/rdb"
h "github.com/hibiken/asynq/internal/testutil"
) )
func TestRecoverer(t *testing.T) { func TestRecoverer(t *testing.T) {
@@ -27,29 +27,25 @@ func TestRecoverer(t *testing.T) {
t4.Retried = t4.Retry // t4 has reached its max retry count t4.Retried = t4.Retry // t4 has reached its max retry count
now := time.Now() now := time.Now()
oneHourFromNow := now.Add(1 * time.Hour)
fiveMinutesFromNow := now.Add(5 * time.Minute)
fiveMinutesAgo := now.Add(-5 * time.Minute)
oneHourAgo := now.Add(-1 * time.Hour)
tests := []struct { tests := []struct {
desc string desc string
inProgress map[string][]*base.TaskMessage active map[string][]*base.TaskMessage
deadlines map[string][]base.Z lease map[string][]base.Z
retry map[string][]base.Z retry map[string][]base.Z
archived map[string][]base.Z archived map[string][]base.Z
wantActive map[string][]*base.TaskMessage wantActive map[string][]*base.TaskMessage
wantDeadlines map[string][]base.Z wantLease map[string][]base.Z
wantRetry map[string][]*base.TaskMessage wantRetry map[string][]*base.TaskMessage
wantArchived map[string][]*base.TaskMessage wantArchived map[string][]*base.TaskMessage
}{ }{
{ {
desc: "with one active task", desc: "with one active task",
inProgress: map[string][]*base.TaskMessage{ active: map[string][]*base.TaskMessage{
"default": {t1}, "default": {t1},
}, },
deadlines: map[string][]base.Z{ lease: map[string][]base.Z{
"default": {{Message: t1, Score: fiveMinutesAgo.Unix()}}, "default": {{Message: t1, Score: now.Add(-1 * time.Minute).Unix()}},
}, },
retry: map[string][]base.Z{ retry: map[string][]base.Z{
"default": {}, "default": {},
@@ -60,7 +56,7 @@ func TestRecoverer(t *testing.T) {
wantActive: map[string][]*base.TaskMessage{ wantActive: map[string][]*base.TaskMessage{
"default": {}, "default": {},
}, },
wantDeadlines: map[string][]base.Z{ wantLease: map[string][]base.Z{
"default": {}, "default": {},
}, },
wantRetry: map[string][]*base.TaskMessage{ wantRetry: map[string][]*base.TaskMessage{
@@ -72,12 +68,12 @@ func TestRecoverer(t *testing.T) {
}, },
{ {
desc: "with a task with max-retry reached", desc: "with a task with max-retry reached",
inProgress: map[string][]*base.TaskMessage{ active: map[string][]*base.TaskMessage{
"default": {t4}, "default": {t4},
"critical": {}, "critical": {},
}, },
deadlines: map[string][]base.Z{ lease: map[string][]base.Z{
"default": {{Message: t4, Score: fiveMinutesAgo.Unix()}}, "default": {{Message: t4, Score: now.Add(-40 * time.Second).Unix()}},
"critical": {}, "critical": {},
}, },
retry: map[string][]base.Z{ retry: map[string][]base.Z{
@@ -92,7 +88,7 @@ func TestRecoverer(t *testing.T) {
"default": {}, "default": {},
"critical": {}, "critical": {},
}, },
wantDeadlines: map[string][]base.Z{ wantLease: map[string][]base.Z{
"default": {}, "default": {},
"critical": {}, "critical": {},
}, },
@@ -107,17 +103,17 @@ func TestRecoverer(t *testing.T) {
}, },
{ {
desc: "with multiple active tasks, and one expired", desc: "with multiple active tasks, and one expired",
inProgress: map[string][]*base.TaskMessage{ active: map[string][]*base.TaskMessage{
"default": {t1, t2}, "default": {t1, t2},
"critical": {t3}, "critical": {t3},
}, },
deadlines: map[string][]base.Z{ lease: map[string][]base.Z{
"default": { "default": {
{Message: t1, Score: oneHourAgo.Unix()}, {Message: t1, Score: now.Add(-2 * time.Minute).Unix()},
{Message: t2, Score: fiveMinutesFromNow.Unix()}, {Message: t2, Score: now.Add(20 * time.Second).Unix()},
}, },
"critical": { "critical": {
{Message: t3, Score: oneHourFromNow.Unix()}, {Message: t3, Score: now.Add(20 * time.Second).Unix()},
}, },
}, },
retry: map[string][]base.Z{ retry: map[string][]base.Z{
@@ -132,9 +128,9 @@ func TestRecoverer(t *testing.T) {
"default": {t2}, "default": {t2},
"critical": {t3}, "critical": {t3},
}, },
wantDeadlines: map[string][]base.Z{ wantLease: map[string][]base.Z{
"default": {{Message: t2, Score: fiveMinutesFromNow.Unix()}}, "default": {{Message: t2, Score: now.Add(20 * time.Second).Unix()}},
"critical": {{Message: t3, Score: oneHourFromNow.Unix()}}, "critical": {{Message: t3, Score: now.Add(20 * time.Second).Unix()}},
}, },
wantRetry: map[string][]*base.TaskMessage{ wantRetry: map[string][]*base.TaskMessage{
"default": {t1}, "default": {t1},
@@ -147,17 +143,17 @@ func TestRecoverer(t *testing.T) {
}, },
{ {
desc: "with multiple expired active tasks", desc: "with multiple expired active tasks",
inProgress: map[string][]*base.TaskMessage{ active: map[string][]*base.TaskMessage{
"default": {t1, t2}, "default": {t1, t2},
"critical": {t3}, "critical": {t3},
}, },
deadlines: map[string][]base.Z{ lease: map[string][]base.Z{
"default": { "default": {
{Message: t1, Score: oneHourAgo.Unix()}, {Message: t1, Score: now.Add(-1 * time.Minute).Unix()},
{Message: t2, Score: oneHourFromNow.Unix()}, {Message: t2, Score: now.Add(10 * time.Second).Unix()},
}, },
"critical": { "critical": {
{Message: t3, Score: fiveMinutesAgo.Unix()}, {Message: t3, Score: now.Add(-1 * time.Minute).Unix()},
}, },
}, },
retry: map[string][]base.Z{ retry: map[string][]base.Z{
@@ -172,8 +168,8 @@ func TestRecoverer(t *testing.T) {
"default": {t2}, "default": {t2},
"critical": {}, "critical": {},
}, },
wantDeadlines: map[string][]base.Z{ wantLease: map[string][]base.Z{
"default": {{Message: t2, Score: oneHourFromNow.Unix()}}, "default": {{Message: t2, Score: now.Add(10 * time.Second).Unix()}},
}, },
wantRetry: map[string][]*base.TaskMessage{ wantRetry: map[string][]*base.TaskMessage{
"default": {t1}, "default": {t1},
@@ -186,11 +182,11 @@ func TestRecoverer(t *testing.T) {
}, },
{ {
desc: "with empty active queue", desc: "with empty active queue",
inProgress: map[string][]*base.TaskMessage{ active: map[string][]*base.TaskMessage{
"default": {}, "default": {},
"critical": {}, "critical": {},
}, },
deadlines: map[string][]base.Z{ lease: map[string][]base.Z{
"default": {}, "default": {},
"critical": {}, "critical": {},
}, },
@@ -206,7 +202,7 @@ func TestRecoverer(t *testing.T) {
"default": {}, "default": {},
"critical": {}, "critical": {},
}, },
wantDeadlines: map[string][]base.Z{ wantLease: map[string][]base.Z{
"default": {}, "default": {},
"critical": {}, "critical": {},
}, },
@@ -223,8 +219,8 @@ func TestRecoverer(t *testing.T) {
for _, tc := range tests { for _, tc := range tests {
h.FlushDB(t, r) h.FlushDB(t, r)
h.SeedAllActiveQueues(t, r, tc.inProgress) h.SeedAllActiveQueues(t, r, tc.active)
h.SeedAllDeadlines(t, r, tc.deadlines) h.SeedAllLease(t, r, tc.lease)
h.SeedAllRetryQueues(t, r, tc.retry) h.SeedAllRetryQueues(t, r, tc.retry)
h.SeedAllArchivedQueues(t, r, tc.archived) h.SeedAllArchivedQueues(t, r, tc.archived)
@@ -234,6 +230,7 @@ func TestRecoverer(t *testing.T) {
queues: []string{"default", "critical"}, queues: []string{"default", "critical"},
interval: 1 * time.Second, interval: 1 * time.Second,
retryDelayFunc: func(n int, err error, task *Task) time.Duration { return 30 * time.Second }, retryDelayFunc: func(n int, err error, task *Task) time.Duration { return 30 * time.Second },
isFailureFunc: defaultIsFailureFunc,
}) })
var wg sync.WaitGroup var wg sync.WaitGroup
@@ -248,10 +245,10 @@ func TestRecoverer(t *testing.T) {
t.Errorf("%s; mismatch found in %q; (-want,+got)\n%s", tc.desc, base.ActiveKey(qname), diff) t.Errorf("%s; mismatch found in %q; (-want,+got)\n%s", tc.desc, base.ActiveKey(qname), diff)
} }
} }
for qname, want := range tc.wantDeadlines { for qname, want := range tc.wantLease {
gotDeadlines := h.GetDeadlinesEntries(t, r, qname) gotLease := h.GetLeaseEntries(t, r, qname)
if diff := cmp.Diff(want, gotDeadlines, h.SortZSetEntryOpt); diff != "" { if diff := cmp.Diff(want, gotLease, h.SortZSetEntryOpt); diff != "" {
t.Errorf("%s; mismatch found in %q; (-want,+got)\n%s", tc.desc, base.DeadlinesKey(qname), diff) t.Errorf("%s; mismatch found in %q; (-want,+got)\n%s", tc.desc, base.LeaseKey(qname), diff)
} }
} }
cmpOpt := h.EquateInt64Approx(2) // allow up to two-second difference in `LastFailedAt` cmpOpt := h.EquateInt64Approx(2) // allow up to two-second difference in `LastFailedAt`
@@ -259,7 +256,7 @@ func TestRecoverer(t *testing.T) {
gotRetry := h.GetRetryMessages(t, r, qname) gotRetry := h.GetRetryMessages(t, r, qname)
var wantRetry []*base.TaskMessage // Note: construct message here since `LastFailedAt` is relative to each test run var wantRetry []*base.TaskMessage // Note: construct message here since `LastFailedAt` is relative to each test run
for _, msg := range msgs { for _, msg := range msgs {
wantRetry = append(wantRetry, h.TaskMessageAfterRetry(*msg, "deadline exceeded", runTime)) wantRetry = append(wantRetry, h.TaskMessageAfterRetry(*msg, ErrLeaseExpired.Error(), runTime))
} }
if diff := cmp.Diff(wantRetry, gotRetry, h.SortMsgOpt, cmpOpt); diff != "" { if diff := cmp.Diff(wantRetry, gotRetry, h.SortMsgOpt, cmpOpt); diff != "" {
t.Errorf("%s; mismatch found in %q: (-want, +got)\n%s", tc.desc, base.RetryKey(qname), diff) t.Errorf("%s; mismatch found in %q: (-want, +got)\n%s", tc.desc, base.RetryKey(qname), diff)
@@ -269,7 +266,7 @@ func TestRecoverer(t *testing.T) {
gotArchived := h.GetArchivedMessages(t, r, qname) gotArchived := h.GetArchivedMessages(t, r, qname)
var wantArchived []*base.TaskMessage var wantArchived []*base.TaskMessage
for _, msg := range msgs { for _, msg := range msgs {
wantArchived = append(wantArchived, h.TaskMessageWithError(*msg, "deadline exceeded", runTime)) wantArchived = append(wantArchived, h.TaskMessageWithError(*msg, ErrLeaseExpired.Error(), runTime))
} }
if diff := cmp.Diff(wantArchived, gotArchived, h.SortMsgOpt, cmpOpt); diff != "" { if diff := cmp.Diff(wantArchived, gotArchived, h.SortMsgOpt, cmpOpt); diff != "" {
t.Errorf("%s; mismatch found in %q: (-want, +got)\n%s", tc.desc, base.ArchivedKey(qname), diff) t.Errorf("%s; mismatch found in %q: (-want, +got)\n%s", tc.desc, base.ArchivedKey(qname), diff)

View File

@@ -10,7 +10,7 @@ import (
"sync" "sync"
"time" "time"
"github.com/go-redis/redis/v7" "github.com/go-redis/redis/v8"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/hibiken/asynq/internal/base" "github.com/hibiken/asynq/internal/base"
"github.com/hibiken/asynq/internal/log" "github.com/hibiken/asynq/internal/log"
@@ -19,9 +19,13 @@ import (
) )
// A Scheduler kicks off tasks at regular intervals based on the user defined schedule. // A Scheduler kicks off tasks at regular intervals based on the user defined schedule.
//
// Schedulers are safe for concurrent use by multiple goroutines.
type Scheduler struct { type Scheduler struct {
id string id string
state *base.ServerState
state *serverState
logger *log.Logger logger *log.Logger
client *Client client *Client
rdb *rdb.RDB rdb *rdb.RDB
@@ -30,6 +34,9 @@ type Scheduler struct {
done chan struct{} done chan struct{}
wg sync.WaitGroup wg sync.WaitGroup
errHandler func(task *Task, opts []Option, err error) errHandler func(task *Task, opts []Option, err error)
// guards idmap
mu sync.Mutex
// idmap maps Scheduler's entry ID to cron.EntryID // idmap maps Scheduler's entry ID to cron.EntryID
// to avoid using cron.EntryID as the public API of // to avoid using cron.EntryID as the public API of
// the Scheduler. // the Scheduler.
@@ -61,7 +68,7 @@ func NewScheduler(r RedisConnOpt, opts *SchedulerOpts) *Scheduler {
return &Scheduler{ return &Scheduler{
id: generateSchedulerID(), id: generateSchedulerID(),
state: base.NewServerState(), state: &serverState{value: srvStateNew},
logger: logger, logger: logger,
client: NewClient(r), client: NewClient(r),
rdb: rdb.NewRDB(c), rdb: rdb.NewRDB(c),
@@ -154,17 +161,22 @@ func (s *Scheduler) Register(cronspec string, task *Task, opts ...Option) (entry
if err != nil { if err != nil {
return "", err return "", err
} }
s.mu.Lock()
s.idmap[job.id.String()] = cronID s.idmap[job.id.String()] = cronID
s.mu.Unlock()
return job.id.String(), nil return job.id.String(), nil
} }
// Unregister removes a registered entry by entry ID. // Unregister removes a registered entry by entry ID.
// Unregister returns a non-nil error if no entries were found for the given entryID. // Unregister returns a non-nil error if no entries were found for the given entryID.
func (s *Scheduler) Unregister(entryID string) error { func (s *Scheduler) Unregister(entryID string) error {
s.mu.Lock()
defer s.mu.Unlock()
cronID, ok := s.idmap[entryID] cronID, ok := s.idmap[entryID]
if !ok { if !ok {
return fmt.Errorf("asynq: no scheduler entry found") return fmt.Errorf("asynq: no scheduler entry found")
} }
delete(s.idmap, entryID)
s.cron.Remove(cronID) s.cron.Remove(cronID)
return nil return nil
} }
@@ -183,23 +195,43 @@ func (s *Scheduler) Run() error {
// Start starts the scheduler. // Start starts the scheduler.
// It returns an error if the scheduler is already running or has been shutdown. // It returns an error if the scheduler is already running or has been shutdown.
func (s *Scheduler) Start() error { func (s *Scheduler) Start() error {
switch s.state.Get() { if err := s.start(); err != nil {
case base.StateActive: return err
return fmt.Errorf("asynq: the scheduler is already running")
case base.StateClosed:
return fmt.Errorf("asynq: the scheduler has already been stopped")
} }
s.logger.Info("Scheduler starting") s.logger.Info("Scheduler starting")
s.logger.Infof("Scheduler timezone is set to %v", s.location) s.logger.Infof("Scheduler timezone is set to %v", s.location)
s.cron.Start() s.cron.Start()
s.wg.Add(1) s.wg.Add(1)
go s.runHeartbeater() go s.runHeartbeater()
s.state.Set(base.StateActive) return nil
}
// Checks server state and returns an error if pre-condition is not met.
// Otherwise it sets the server state to active.
func (s *Scheduler) start() error {
s.state.mu.Lock()
defer s.state.mu.Unlock()
switch s.state.value {
case srvStateActive:
return fmt.Errorf("asynq: the scheduler is already running")
case srvStateClosed:
return fmt.Errorf("asynq: the scheduler has already been stopped")
}
s.state.value = srvStateActive
return nil return nil
} }
// Shutdown stops and shuts down the scheduler. // Shutdown stops and shuts down the scheduler.
func (s *Scheduler) Shutdown() { func (s *Scheduler) Shutdown() {
s.state.mu.Lock()
if s.state.value == srvStateNew || s.state.value == srvStateClosed {
// scheduler is not running, do nothing and return.
s.state.mu.Unlock()
return
}
s.state.value = srvStateClosed
s.state.mu.Unlock()
s.logger.Info("Scheduler shutting down") s.logger.Info("Scheduler shutting down")
close(s.done) // signal heartbeater to stop close(s.done) // signal heartbeater to stop
ctx := s.cron.Stop() ctx := s.cron.Stop()
@@ -209,7 +241,6 @@ func (s *Scheduler) Shutdown() {
s.clearHistory() s.clearHistory()
s.client.Close() s.client.Close()
s.rdb.Close() s.rdb.Close()
s.state.Set(base.StateClosed)
s.logger.Info("Scheduler stopped") s.logger.Info("Scheduler stopped")
} }

View File

@@ -10,8 +10,8 @@ import (
"time" "time"
"github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp"
"github.com/hibiken/asynq/internal/asynqtest"
"github.com/hibiken/asynq/internal/base" "github.com/hibiken/asynq/internal/base"
"github.com/hibiken/asynq/internal/testutil"
) )
func TestSchedulerRegister(t *testing.T) { func TestSchedulerRegister(t *testing.T) {
@@ -69,8 +69,8 @@ func TestSchedulerRegister(t *testing.T) {
time.Sleep(tc.wait) time.Sleep(tc.wait)
scheduler.Shutdown() scheduler.Shutdown()
got := asynqtest.GetPendingMessages(t, r, tc.queue) got := testutil.GetPendingMessages(t, r, tc.queue)
if diff := cmp.Diff(tc.want, got, asynqtest.IgnoreIDOpt); diff != "" { if diff := cmp.Diff(tc.want, got, testutil.IgnoreIDOpt); diff != "" {
t.Errorf("mismatch found in queue %q: (-want,+got)\n%s", tc.queue, diff) t.Errorf("mismatch found in queue %q: (-want,+got)\n%s", tc.queue, diff)
} }
} }
@@ -148,7 +148,7 @@ func TestSchedulerUnregister(t *testing.T) {
time.Sleep(tc.wait) time.Sleep(tc.wait)
scheduler.Shutdown() scheduler.Shutdown()
got := asynqtest.GetPendingMessages(t, r, tc.queue) got := testutil.GetPendingMessages(t, r, tc.queue)
if len(got) != 0 { if len(got) != 0 {
t.Errorf("%d tasks were enqueued, want zero", len(got)) t.Errorf("%d tasks were enqueued, want zero", len(got))
} }

View File

@@ -98,7 +98,7 @@ func (mux *ServeMux) Handle(pattern string, handler Handler) {
mux.mu.Lock() mux.mu.Lock()
defer mux.mu.Unlock() defer mux.mu.Unlock()
if pattern == "" { if strings.TrimSpace(pattern) == "" {
panic("asynq: invalid pattern") panic("asynq: invalid pattern")
} }
if handler == nil { if handler == nil {

221
server.go
View File

@@ -15,7 +15,7 @@ import (
"sync" "sync"
"time" "time"
"github.com/go-redis/redis/v7" "github.com/go-redis/redis/v8"
"github.com/hibiken/asynq/internal/base" "github.com/hibiken/asynq/internal/base"
"github.com/hibiken/asynq/internal/log" "github.com/hibiken/asynq/internal/log"
"github.com/hibiken/asynq/internal/rdb" "github.com/hibiken/asynq/internal/rdb"
@@ -38,7 +38,7 @@ type Server struct {
broker base.Broker broker base.Broker
state *base.ServerState state *serverState
// wait group to wait for all goroutines to finish. // wait group to wait for all goroutines to finish.
wg sync.WaitGroup wg sync.WaitGroup
@@ -49,6 +49,45 @@ type Server struct {
subscriber *subscriber subscriber *subscriber
recoverer *recoverer recoverer *recoverer
healthchecker *healthchecker healthchecker *healthchecker
janitor *janitor
aggregator *aggregator
}
type serverState struct {
mu sync.Mutex
value serverStateValue
}
type serverStateValue int
const (
// StateNew represents a new server. Server begins in
// this state and then transition to StatusActive when
// Start or Run is callled.
srvStateNew serverStateValue = iota
// StateActive indicates the server is up and active.
srvStateActive
// StateStopped indicates the server is up but no longer processing new tasks.
srvStateStopped
// StateClosed indicates the server has been shutdown.
srvStateClosed
)
var serverStates = []string{
"new",
"active",
"stopped",
"closed",
}
func (s serverStateValue) String() string {
if srvStateNew <= s && s <= srvStateClosed {
return serverStates[s]
}
return "unknown status"
} }
// Config specifies the server's background-task processing behavior. // Config specifies the server's background-task processing behavior.
@@ -56,14 +95,28 @@ type Config struct {
// Maximum number of concurrent processing of tasks. // Maximum number of concurrent processing of tasks.
// //
// If set to a zero or negative value, NewServer will overwrite the value // If set to a zero or negative value, NewServer will overwrite the value
// to the number of CPUs usable by the currennt process. // to the number of CPUs usable by the current process.
Concurrency int Concurrency int
// BaseContext optionally specifies a function that returns the base context for Handler invocations on this server.
//
// If BaseContext is nil, the default is context.Background().
// If this is defined, then it MUST return a non-nil context
BaseContext func() context.Context
// Function to calculate retry delay for a failed task. // Function to calculate retry delay for a failed task.
// //
// By default, it uses exponential backoff algorithm to calculate the delay. // By default, it uses exponential backoff algorithm to calculate the delay.
RetryDelayFunc RetryDelayFunc RetryDelayFunc RetryDelayFunc
// Predicate function to determine whether the error returned from Handler is a failure.
// If the function returns false, Server will not increment the retried counter for the task,
// and Server won't record the queue stats (processed and failed stats) to avoid skewing the error
// rate of the queue.
//
// By default, if the given error is non-nil the function returns true.
IsFailure func(error) bool
// List of queues to process with given priority value. Keys are the names of the // List of queues to process with given priority value. Keys are the names of the
// queues and values are associated priority value. // queues and values are associated priority value.
// //
@@ -135,6 +188,58 @@ type Config struct {
// //
// If unset or zero, the interval is set to 15 seconds. // If unset or zero, the interval is set to 15 seconds.
HealthCheckInterval time.Duration HealthCheckInterval time.Duration
// DelayedTaskCheckInterval specifies the interval between checks run on 'scheduled' and 'retry'
// tasks, and forwarding them to 'pending' state if they are ready to be processed.
//
// If unset or zero, the interval is set to 5 seconds.
DelayedTaskCheckInterval time.Duration
// GroupGracePeriod specifies the amount of time the server will wait for an incoming task before aggregating
// the tasks in a group. If an incoming task is received within this period, the server will wait for another
// period of the same length, up to GroupMaxDelay if specified.
//
// If unset or zero, the grace period is set to 1 minute.
// Minimum duration for GroupGracePeriod is 1 second. If value specified is less than a second, the call to
// NewServer will panic.
GroupGracePeriod time.Duration
// GroupMaxDelay specifies the maximum amount of time the server will wait for incoming tasks before aggregating
// the tasks in a group.
//
// If unset or zero, no delay limit is used.
GroupMaxDelay time.Duration
// GroupMaxSize specifies the maximum number of tasks that can be aggregated into a single task within a group.
// If GroupMaxSize is reached, the server will aggregate the tasks into one immediately.
//
// If unset or zero, no size limit is used.
GroupMaxSize int
// GroupAggregator specifies the aggregation function used to aggregate multiple tasks in a group into one task.
//
// If unset or nil, the group aggregation feature will be disabled on the server.
GroupAggregator GroupAggregator
}
// GroupAggregator aggregates a group of tasks into one before the tasks are passed to the Handler.
type GroupAggregator interface {
// Aggregate aggregates the given tasks in a group with the given group name,
// and returns a new task which is the aggregation of those tasks.
//
// Use NewTask(typename, payload, opts...) to set any options for the aggregated task.
// The Queue option, if provided, will be ignored and the aggregated task will always be enqueued
// to the same queue the group belonged.
Aggregate(group string, tasks []*Task) *Task
}
// The GroupAggregatorFunc type is an adapter to allow the use of ordinary functions as a GroupAggregator.
// If f is a function with the appropriate signature, GroupAggregatorFunc(f) is a GroupAggregator that calls f.
type GroupAggregatorFunc func(group string, tasks []*Task) *Task
// Aggregate calls fn(group, tasks)
func (fn GroupAggregatorFunc) Aggregate(group string, tasks []*Task) *Task {
return fn(group, tasks)
} }
// An ErrorHandler handles an error occured during task processing. // An ErrorHandler handles an error occured during task processing.
@@ -268,6 +373,8 @@ func DefaultRetryDelayFunc(n int, e error, t *Task) time.Duration {
return time.Duration(s) * time.Second return time.Duration(s) * time.Second
} }
func defaultIsFailureFunc(err error) bool { return err != nil }
var defaultQueueConfig = map[string]int{ var defaultQueueConfig = map[string]int{
base.DefaultQueueName: 1, base.DefaultQueueName: 1,
} }
@@ -276,6 +383,10 @@ const (
defaultShutdownTimeout = 8 * time.Second defaultShutdownTimeout = 8 * time.Second
defaultHealthCheckInterval = 15 * time.Second defaultHealthCheckInterval = 15 * time.Second
defaultDelayedTaskCheckInterval = 5 * time.Second
defaultGroupGracePeriod = 1 * time.Minute
) )
// NewServer returns a new Server given a redis connection option // NewServer returns a new Server given a redis connection option
@@ -285,6 +396,10 @@ func NewServer(r RedisConnOpt, cfg Config) *Server {
if !ok { if !ok {
panic(fmt.Sprintf("asynq: unsupported RedisConnOpt type %T", r)) panic(fmt.Sprintf("asynq: unsupported RedisConnOpt type %T", r))
} }
baseCtxFn := cfg.BaseContext
if baseCtxFn == nil {
baseCtxFn = context.Background
}
n := cfg.Concurrency n := cfg.Concurrency
if n < 1 { if n < 1 {
n = runtime.NumCPU() n = runtime.NumCPU()
@@ -293,8 +408,15 @@ func NewServer(r RedisConnOpt, cfg Config) *Server {
if delayFunc == nil { if delayFunc == nil {
delayFunc = DefaultRetryDelayFunc delayFunc = DefaultRetryDelayFunc
} }
isFailureFunc := cfg.IsFailure
if isFailureFunc == nil {
isFailureFunc = defaultIsFailureFunc
}
queues := make(map[string]int) queues := make(map[string]int)
for qname, p := range cfg.Queues { for qname, p := range cfg.Queues {
if err := base.ValidateQueueName(qname); err != nil {
continue // ignore invalid queue names
}
if p > 0 { if p > 0 {
queues[qname] = p queues[qname] = p
} }
@@ -314,6 +436,14 @@ func NewServer(r RedisConnOpt, cfg Config) *Server {
if healthcheckInterval == 0 { if healthcheckInterval == 0 {
healthcheckInterval = defaultHealthCheckInterval healthcheckInterval = defaultHealthCheckInterval
} }
// TODO: Create a helper to check for zero value and fall back to default (e.g. getDurationOrDefault())
groupGracePeriod := cfg.GroupGracePeriod
if groupGracePeriod == 0 {
groupGracePeriod = defaultGroupGracePeriod
}
if groupGracePeriod < time.Second {
panic("GroupGracePeriod cannot be less than a second")
}
logger := log.NewLogger(cfg.Logger) logger := log.NewLogger(cfg.Logger)
loglevel := cfg.LogLevel loglevel := cfg.LogLevel
if loglevel == level_unspecified { if loglevel == level_unspecified {
@@ -325,7 +455,7 @@ func NewServer(r RedisConnOpt, cfg Config) *Server {
starting := make(chan *workerInfo) starting := make(chan *workerInfo)
finished := make(chan *base.TaskMessage) finished := make(chan *base.TaskMessage)
syncCh := make(chan *syncRequest) syncCh := make(chan *syncRequest)
state := base.NewServerState() srvState := &serverState{value: srvStateNew}
cancels := base.NewCancelations() cancels := base.NewCancelations()
syncer := newSyncer(syncerParams{ syncer := newSyncer(syncerParams{
@@ -340,15 +470,19 @@ func NewServer(r RedisConnOpt, cfg Config) *Server {
concurrency: n, concurrency: n,
queues: queues, queues: queues,
strictPriority: cfg.StrictPriority, strictPriority: cfg.StrictPriority,
state: state, state: srvState,
starting: starting, starting: starting,
finished: finished, finished: finished,
}) })
delayedTaskCheckInterval := cfg.DelayedTaskCheckInterval
if delayedTaskCheckInterval == 0 {
delayedTaskCheckInterval = defaultDelayedTaskCheckInterval
}
forwarder := newForwarder(forwarderParams{ forwarder := newForwarder(forwarderParams{
logger: logger, logger: logger,
broker: rdb, broker: rdb,
queues: qnames, queues: qnames,
interval: 5 * time.Second, interval: delayedTaskCheckInterval,
}) })
subscriber := newSubscriber(subscriberParams{ subscriber := newSubscriber(subscriberParams{
logger: logger, logger: logger,
@@ -359,6 +493,8 @@ func NewServer(r RedisConnOpt, cfg Config) *Server {
logger: logger, logger: logger,
broker: rdb, broker: rdb,
retryDelayFunc: delayFunc, retryDelayFunc: delayFunc,
baseCtxFn: baseCtxFn,
isFailureFunc: isFailureFunc,
syncCh: syncCh, syncCh: syncCh,
cancelations: cancels, cancelations: cancels,
concurrency: n, concurrency: n,
@@ -373,6 +509,7 @@ func NewServer(r RedisConnOpt, cfg Config) *Server {
logger: logger, logger: logger,
broker: rdb, broker: rdb,
retryDelayFunc: delayFunc, retryDelayFunc: delayFunc,
isFailureFunc: isFailureFunc,
queues: qnames, queues: qnames,
interval: 1 * time.Minute, interval: 1 * time.Minute,
}) })
@@ -382,10 +519,25 @@ func NewServer(r RedisConnOpt, cfg Config) *Server {
interval: healthcheckInterval, interval: healthcheckInterval,
healthcheckFunc: cfg.HealthCheckFunc, healthcheckFunc: cfg.HealthCheckFunc,
}) })
janitor := newJanitor(janitorParams{
logger: logger,
broker: rdb,
queues: qnames,
interval: 8 * time.Second,
})
aggregator := newAggregator(aggregatorParams{
logger: logger,
broker: rdb,
queues: qnames,
gracePeriod: groupGracePeriod,
maxDelay: cfg.GroupMaxDelay,
maxSize: cfg.GroupMaxSize,
groupAggregator: cfg.GroupAggregator,
})
return &Server{ return &Server{
logger: logger, logger: logger,
broker: rdb, broker: rdb,
state: state, state: srvState,
forwarder: forwarder, forwarder: forwarder,
processor: processor, processor: processor,
syncer: syncer, syncer: syncer,
@@ -393,6 +545,8 @@ func NewServer(r RedisConnOpt, cfg Config) *Server {
subscriber: subscriber, subscriber: subscriber,
recoverer: recoverer, recoverer: recoverer,
healthchecker: healthchecker, healthchecker: healthchecker,
janitor: janitor,
aggregator: aggregator,
} }
} }
@@ -454,17 +608,11 @@ func (srv *Server) Start(handler Handler) error {
if handler == nil { if handler == nil {
return fmt.Errorf("asynq: server cannot run with nil handler") return fmt.Errorf("asynq: server cannot run with nil handler")
} }
switch srv.state.Get() {
case base.StateActive:
return fmt.Errorf("asynq: the server is already running")
case base.StateStopped:
return fmt.Errorf("asynq: the server is in the stopped state. Waiting for shutdown.")
case base.StateClosed:
return ErrServerClosed
}
srv.state.Set(base.StateActive)
srv.processor.handler = handler srv.processor.handler = handler
if err := srv.start(); err != nil {
return err
}
srv.logger.Info("Starting processing") srv.logger.Info("Starting processing")
srv.heartbeater.start(&srv.wg) srv.heartbeater.start(&srv.wg)
@@ -474,6 +622,25 @@ func (srv *Server) Start(handler Handler) error {
srv.recoverer.start(&srv.wg) srv.recoverer.start(&srv.wg)
srv.forwarder.start(&srv.wg) srv.forwarder.start(&srv.wg)
srv.processor.start(&srv.wg) srv.processor.start(&srv.wg)
srv.janitor.start(&srv.wg)
srv.aggregator.start(&srv.wg)
return nil
}
// Checks server state and returns an error if pre-condition is not met.
// Otherwise it sets the server state to active.
func (srv *Server) start() error {
srv.state.mu.Lock()
defer srv.state.mu.Unlock()
switch srv.state.value {
case srvStateActive:
return fmt.Errorf("asynq: the server is already running")
case srvStateStopped:
return fmt.Errorf("asynq: the server is in the stopped state. Waiting for shutdown.")
case srvStateClosed:
return ErrServerClosed
}
srv.state.value = srvStateActive
return nil return nil
} }
@@ -482,11 +649,14 @@ func (srv *Server) Start(handler Handler) error {
// active workers to finish processing tasks for duration specified in Config.ShutdownTimeout. // active workers to finish processing tasks for duration specified in Config.ShutdownTimeout.
// If worker didn't finish processing a task during the timeout, the task will be pushed back to Redis. // If worker didn't finish processing a task during the timeout, the task will be pushed back to Redis.
func (srv *Server) Shutdown() { func (srv *Server) Shutdown() {
switch srv.state.Get() { srv.state.mu.Lock()
case base.StateNew, base.StateClosed: if srv.state.value == srvStateNew || srv.state.value == srvStateClosed {
srv.state.mu.Unlock()
// server is not running, do nothing and return. // server is not running, do nothing and return.
return return
} }
srv.state.value = srvStateClosed
srv.state.mu.Unlock()
srv.logger.Info("Starting graceful shutdown") srv.logger.Info("Starting graceful shutdown")
// Note: The order of shutdown is important. // Note: The order of shutdown is important.
@@ -498,14 +668,13 @@ func (srv *Server) Shutdown() {
srv.recoverer.shutdown() srv.recoverer.shutdown()
srv.syncer.shutdown() srv.syncer.shutdown()
srv.subscriber.shutdown() srv.subscriber.shutdown()
srv.janitor.shutdown()
srv.aggregator.shutdown()
srv.healthchecker.shutdown() srv.healthchecker.shutdown()
srv.heartbeater.shutdown() srv.heartbeater.shutdown()
srv.wg.Wait() srv.wg.Wait()
srv.broker.Close() srv.broker.Close()
srv.state.Set(base.StateClosed)
srv.logger.Info("Exiting") srv.logger.Info("Exiting")
} }
@@ -515,8 +684,16 @@ func (srv *Server) Shutdown() {
// //
// Stop does not shutdown the server, make sure to call Shutdown before exit. // Stop does not shutdown the server, make sure to call Shutdown before exit.
func (srv *Server) Stop() { func (srv *Server) Stop() {
srv.state.mu.Lock()
if srv.state.value != srvStateActive {
// Invalid calll to Stop, server can only go from Active state to Stopped state.
srv.state.mu.Unlock()
return
}
srv.state.value = srvStateStopped
srv.state.mu.Unlock()
srv.logger.Info("Stopping processor") srv.logger.Info("Stopping processor")
srv.processor.stop() srv.processor.stop()
srv.state.Set(base.StateStopped)
srv.logger.Info("Processor stopped") srv.logger.Info("Processor stopped")
} }

View File

@@ -11,15 +11,15 @@ import (
"testing" "testing"
"time" "time"
"github.com/hibiken/asynq/internal/asynqtest"
"github.com/hibiken/asynq/internal/rdb" "github.com/hibiken/asynq/internal/rdb"
"github.com/hibiken/asynq/internal/testbroker" "github.com/hibiken/asynq/internal/testbroker"
"github.com/hibiken/asynq/internal/testutil"
"go.uber.org/goleak" "go.uber.org/goleak"
) )
func TestServer(t *testing.T) { func TestServer(t *testing.T) {
// https://github.com/go-redis/redis/issues/1029 // https://github.com/go-redis/redis/issues/1029
ignoreOpt := goleak.IgnoreTopFunction("github.com/go-redis/redis/v7/internal/pool.(*ConnPool).reaper") ignoreOpt := goleak.IgnoreTopFunction("github.com/go-redis/redis/v8/internal/pool.(*ConnPool).reaper")
defer goleak.VerifyNoLeaks(t, ignoreOpt) defer goleak.VerifyNoLeaks(t, ignoreOpt)
redisConnOpt := getRedisConnOpt(t) redisConnOpt := getRedisConnOpt(t)
@@ -40,12 +40,12 @@ func TestServer(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
_, err = c.Enqueue(NewTask("send_email", asynqtest.JSON(map[string]interface{}{"recipient_id": 123}))) _, err = c.Enqueue(NewTask("send_email", testutil.JSON(map[string]interface{}{"recipient_id": 123})))
if err != nil { if err != nil {
t.Errorf("could not enqueue a task: %v", err) t.Errorf("could not enqueue a task: %v", err)
} }
_, err = c.Enqueue(NewTask("send_email", asynqtest.JSON(map[string]interface{}{"recipient_id": 456})), ProcessIn(1*time.Hour)) _, err = c.Enqueue(NewTask("send_email", testutil.JSON(map[string]interface{}{"recipient_id": 456})), ProcessIn(1*time.Hour))
if err != nil { if err != nil {
t.Errorf("could not enqueue a task: %v", err) t.Errorf("could not enqueue a task: %v", err)
} }
@@ -55,7 +55,7 @@ func TestServer(t *testing.T) {
func TestServerRun(t *testing.T) { func TestServerRun(t *testing.T) {
// https://github.com/go-redis/redis/issues/1029 // https://github.com/go-redis/redis/issues/1029
ignoreOpt := goleak.IgnoreTopFunction("github.com/go-redis/redis/v7/internal/pool.(*ConnPool).reaper") ignoreOpt := goleak.IgnoreTopFunction("github.com/go-redis/redis/v8/internal/pool.(*ConnPool).reaper")
defer goleak.VerifyNoLeaks(t, ignoreOpt) defer goleak.VerifyNoLeaks(t, ignoreOpt)
srv := NewServer(RedisClientOpt{Addr: ":6379"}, Config{LogLevel: testLogLevel}) srv := NewServer(RedisClientOpt{Addr: ":6379"}, Config{LogLevel: testLogLevel})

View File

@@ -8,7 +8,7 @@ import (
"sync" "sync"
"time" "time"
"github.com/go-redis/redis/v7" "github.com/go-redis/redis/v8"
"github.com/hibiken/asynq/internal/base" "github.com/hibiken/asynq/internal/base"
"github.com/hibiken/asynq/internal/log" "github.com/hibiken/asynq/internal/log"
) )

View File

@@ -5,14 +5,15 @@
package asynq package asynq
import ( import (
"context"
"fmt" "fmt"
"sync" "sync"
"testing" "testing"
"time" "time"
h "github.com/hibiken/asynq/internal/asynqtest"
"github.com/hibiken/asynq/internal/base" "github.com/hibiken/asynq/internal/base"
"github.com/hibiken/asynq/internal/rdb" "github.com/hibiken/asynq/internal/rdb"
h "github.com/hibiken/asynq/internal/testutil"
) )
func TestSyncer(t *testing.T) { func TestSyncer(t *testing.T) {
@@ -41,7 +42,7 @@ func TestSyncer(t *testing.T) {
m := msg m := msg
syncRequestCh <- &syncRequest{ syncRequestCh <- &syncRequest{
fn: func() error { fn: func() error {
return rdbClient.Done(m) return rdbClient.Done(context.Background(), m)
}, },
deadline: time.Now().Add(5 * time.Minute), deadline: time.Now().Add(5 * time.Minute),
} }

View File

@@ -63,7 +63,7 @@ func cronList(cmd *cobra.Command, args []string) {
cols := []string{"EntryID", "Spec", "Type", "Payload", "Options", "Next", "Prev"} cols := []string{"EntryID", "Spec", "Type", "Payload", "Options", "Next", "Prev"}
printRows := func(w io.Writer, tmpl string) { printRows := func(w io.Writer, tmpl string) {
for _, e := range entries { for _, e := range entries {
fmt.Fprintf(w, tmpl, e.ID, e.Spec, e.Task.Type(), formatPayload(e.Task.Payload()), e.Opts, fmt.Fprintf(w, tmpl, e.ID, e.Spec, e.Task.Type(), sprintBytes(e.Task.Payload()), e.Opts,
nextEnqueue(e.Next), prevEnqueue(e.Prev)) nextEnqueue(e.Next), prevEnqueue(e.Prev))
} }
} }

48
tools/asynq/cmd/group.go Normal file
View File

@@ -0,0 +1,48 @@
// Copyright 2022 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
package cmd
import (
"fmt"
"os"
"github.com/spf13/cobra"
)
func init() {
rootCmd.AddCommand(groupCmd)
groupCmd.AddCommand(groupListCmd)
groupListCmd.Flags().StringP("queue", "q", "", "queue to inspect")
groupListCmd.MarkFlagRequired("queue")
}
var groupCmd = &cobra.Command{
Use: "group",
Short: "Manage groups",
}
var groupListCmd = &cobra.Command{
Use: "ls",
Short: "List groups",
Args: cobra.NoArgs,
Run: groupLists,
}
func groupLists(cmd *cobra.Command, args []string) {
qname, err := cmd.Flags().GetString("queue")
if err != nil {
fmt.Println(err)
os.Exit(1)
}
inspector := createInspector()
groups, err := inspector.Groups(qname)
if len(groups) == 0 {
fmt.Printf("No groups found in queue %q\n", qname)
return
}
for _, g := range groups {
fmt.Println(g.Group)
}
}

View File

@@ -1,404 +0,0 @@
// Copyright 2020 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
package cmd
import (
"encoding/json"
"fmt"
"os"
"strings"
"time"
"github.com/go-redis/redis/v7"
"github.com/google/uuid"
"github.com/hibiken/asynq/internal/base"
"github.com/hibiken/asynq/internal/errors"
"github.com/hibiken/asynq/internal/rdb"
"github.com/spf13/cobra"
)
// migrateCmd represents the migrate command.
var migrateCmd = &cobra.Command{
Use: "migrate",
Short: fmt.Sprintf("Migrate existing tasks and queues to be asynq%s compatible", base.Version),
Long: `Migrate (asynq migrate) will migrate existing tasks and queues in redis to be compatible with the latest version of asynq.
`,
Args: cobra.NoArgs,
Run: migrate,
}
func init() {
rootCmd.AddCommand(migrateCmd)
}
func backupKey(key string) string {
return fmt.Sprintf("%s:backup", key)
}
func renameKeyAsBackup(c redis.UniversalClient, key string) error {
if c.Exists(key).Val() == 0 {
return nil // key doesn't exist; no-op
}
return c.Rename(key, backupKey(key)).Err()
}
func failIfError(err error, msg string) {
if err != nil {
fmt.Printf("error: %s: %v\n", msg, err)
fmt.Println("*** Please report this issue at https://github.com/hibiken/asynq/issues ***")
os.Exit(1)
}
}
func logIfError(err error, msg string) {
if err != nil {
fmt.Printf("warning: %s: %v\n", msg, err)
}
}
func migrate(cmd *cobra.Command, args []string) {
r := createRDB()
queues, err := r.AllQueues()
failIfError(err, "Failed to get queue names")
// ---------------------------------------------
// Pre-check: Ensure no active servers, tasks.
// ---------------------------------------------
srvs, err := r.ListServers()
failIfError(err, "Failed to get server infos")
if len(srvs) > 0 {
fmt.Println("(error): Server(s) still running. Please ensure that no asynq servers are running when runnning migrate command.")
os.Exit(1)
}
for _, qname := range queues {
stats, err := r.CurrentStats(qname)
failIfError(err, "Failed to get stats")
if stats.Active > 0 {
fmt.Printf("(error): %d active tasks found. Please ensure that no active tasks exist when running migrate command.\n", stats.Active)
os.Exit(1)
}
}
// ---------------------------------------------
// Rename pending key
// ---------------------------------------------
fmt.Print("Renaming pending keys...")
for _, qname := range queues {
oldKey := fmt.Sprintf("asynq:{%s}", qname)
if r.Client().Exists(oldKey).Val() == 0 {
continue
}
newKey := base.PendingKey(qname)
err := r.Client().Rename(oldKey, newKey).Err()
failIfError(err, "Failed to rename key")
}
fmt.Print("Done\n")
// ---------------------------------------------
// Rename keys as backup
// ---------------------------------------------
fmt.Print("Renaming keys for backup...")
for _, qname := range queues {
keys := []string{
base.ActiveKey(qname),
base.PendingKey(qname),
base.ScheduledKey(qname),
base.RetryKey(qname),
base.ArchivedKey(qname),
}
for _, key := range keys {
err := renameKeyAsBackup(r.Client(), key)
failIfError(err, fmt.Sprintf("Failed to rename key %q for backup", key))
}
}
fmt.Print("Done\n")
// ---------------------------------------------
// Update to new schema
// ---------------------------------------------
fmt.Print("Updating to new schema...")
for _, qname := range queues {
updatePendingMessages(r, qname)
updateZSetMessages(r.Client(), base.ScheduledKey(qname), "scheduled")
updateZSetMessages(r.Client(), base.RetryKey(qname), "retry")
updateZSetMessages(r.Client(), base.ArchivedKey(qname), "archived")
}
fmt.Print("Done\n")
// ---------------------------------------------
// Delete backup keys
// ---------------------------------------------
fmt.Print("Deleting backup keys...")
for _, qname := range queues {
keys := []string{
backupKey(base.ActiveKey(qname)),
backupKey(base.PendingKey(qname)),
backupKey(base.ScheduledKey(qname)),
backupKey(base.RetryKey(qname)),
backupKey(base.ArchivedKey(qname)),
}
for _, key := range keys {
err := r.Client().Del(key).Err()
failIfError(err, "Failed to delete backup key")
}
}
fmt.Print("Done\n")
}
func UnmarshalOldMessage(encoded string) (*base.TaskMessage, error) {
oldMsg, err := DecodeMessage(encoded)
if err != nil {
return nil, err
}
payload, err := json.Marshal(oldMsg.Payload)
if err != nil {
return nil, fmt.Errorf("could not marshal payload: %v", err)
}
return &base.TaskMessage{
Type: oldMsg.Type,
Payload: payload,
ID: oldMsg.ID,
Queue: oldMsg.Queue,
Retry: oldMsg.Retry,
Retried: oldMsg.Retried,
ErrorMsg: oldMsg.ErrorMsg,
LastFailedAt: 0,
Timeout: oldMsg.Timeout,
Deadline: oldMsg.Deadline,
UniqueKey: oldMsg.UniqueKey,
}, nil
}
// TaskMessage from v0.17
type OldTaskMessage struct {
// Type indicates the kind of the task to be performed.
Type string
// Payload holds data needed to process the task.
Payload map[string]interface{}
// ID is a unique identifier for each task.
ID uuid.UUID
// Queue is a name this message should be enqueued to.
Queue string
// Retry is the max number of retry for this task.
Retry int
// Retried is the number of times we've retried this task so far.
Retried int
// ErrorMsg holds the error message from the last failure.
ErrorMsg string
// Timeout specifies timeout in seconds.
// If task processing doesn't complete within the timeout, the task will be retried
// if retry count is remaining. Otherwise it will be moved to the archive.
//
// Use zero to indicate no timeout.
Timeout int64
// Deadline specifies the deadline for the task in Unix time,
// the number of seconds elapsed since January 1, 1970 UTC.
// If task processing doesn't complete before the deadline, the task will be retried
// if retry count is remaining. Otherwise it will be moved to the archive.
//
// Use zero to indicate no deadline.
Deadline int64
// UniqueKey holds the redis key used for uniqueness lock for this task.
//
// Empty string indicates that no uniqueness lock was used.
UniqueKey string
}
// DecodeMessage unmarshals the given encoded string and returns a decoded task message.
// Code from v0.17.
func DecodeMessage(s string) (*OldTaskMessage, error) {
d := json.NewDecoder(strings.NewReader(s))
d.UseNumber()
var msg OldTaskMessage
if err := d.Decode(&msg); err != nil {
return nil, err
}
return &msg, nil
}
func updatePendingMessages(r *rdb.RDB, qname string) {
data, err := r.Client().LRange(backupKey(base.PendingKey(qname)), 0, -1).Result()
failIfError(err, "Failed to read backup pending key")
for _, s := range data {
msg, err := UnmarshalOldMessage(s)
failIfError(err, "Failed to unmarshal message")
if msg.UniqueKey != "" {
ttl, err := r.Client().TTL(msg.UniqueKey).Result()
failIfError(err, "Failed to get ttl")
if ttl > 0 {
err = r.Client().Del(msg.UniqueKey).Err()
logIfError(err, "Failed to delete unique key")
}
// Regenerate unique key.
msg.UniqueKey = base.UniqueKey(msg.Queue, msg.Type, msg.Payload)
if ttl > 0 {
err = r.EnqueueUnique(msg, ttl)
} else {
err = r.Enqueue(msg)
}
failIfError(err, "Failed to enqueue message")
} else {
err := r.Enqueue(msg)
failIfError(err, "Failed to enqueue message")
}
}
}
// KEYS[1] -> asynq:{<qname>}:t:<task_id>
// KEYS[2] -> asynq:{<qname>}:scheduled
// ARGV[1] -> task message data
// ARGV[2] -> zset score
// ARGV[3] -> task ID
// ARGV[4] -> task timeout in seconds (0 if not timeout)
// ARGV[5] -> task deadline in unix time (0 if no deadline)
// ARGV[6] -> task state (e.g. "retry", "archived")
var taskZAddCmd = redis.NewScript(`
redis.call("HSET", KEYS[1],
"msg", ARGV[1],
"state", ARGV[6],
"timeout", ARGV[4],
"deadline", ARGV[5])
redis.call("ZADD", KEYS[2], ARGV[2], ARGV[3])
return 1
`)
// ZAddTask adds task to zset.
func ZAddTask(c redis.UniversalClient, key string, msg *base.TaskMessage, score float64, state string) error {
// Special case; LastFailedAt field is new so assign a value inferred from zscore.
if state == "archived" {
msg.LastFailedAt = int64(score)
}
encoded, err := base.EncodeMessage(msg)
if err != nil {
return err
}
if err := c.SAdd(base.AllQueues, msg.Queue).Err(); err != nil {
return err
}
keys := []string{
base.TaskKey(msg.Queue, msg.ID.String()),
key,
}
argv := []interface{}{
encoded,
score,
msg.ID.String(),
msg.Timeout,
msg.Deadline,
state,
}
return taskZAddCmd.Run(c, keys, argv...).Err()
}
// KEYS[1] -> unique key
// KEYS[2] -> asynq:{<qname>}:t:<task_id>
// KEYS[3] -> zset key (e.g. asynq:{<qname>}:scheduled)
// --
// ARGV[1] -> task ID
// ARGV[2] -> uniqueness lock TTL
// ARGV[3] -> score (process_at timestamp)
// ARGV[4] -> task message
// ARGV[5] -> task timeout in seconds (0 if not timeout)
// ARGV[6] -> task deadline in unix time (0 if no deadline)
// ARGV[7] -> task state (oneof "scheduled", "retry", "archived")
var taskZAddUniqueCmd = redis.NewScript(`
local ok = redis.call("SET", KEYS[1], ARGV[1], "NX", "EX", ARGV[2])
if not ok then
return 0
end
redis.call("HSET", KEYS[2],
"msg", ARGV[4],
"state", ARGV[7],
"timeout", ARGV[5],
"deadline", ARGV[6],
"unique_key", KEYS[1])
redis.call("ZADD", KEYS[3], ARGV[3], ARGV[1])
return 1
`)
// ScheduleUnique adds the task to the backlog queue to be processed in the future if the uniqueness lock can be acquired.
// It returns ErrDuplicateTask if the lock cannot be acquired.
func ZAddTaskUnique(c redis.UniversalClient, key string, msg *base.TaskMessage, score float64, state string, ttl time.Duration) error {
encoded, err := base.EncodeMessage(msg)
if err != nil {
return err
}
if err := c.SAdd(base.AllQueues, msg.Queue).Err(); err != nil {
return err
}
keys := []string{
msg.UniqueKey,
base.TaskKey(msg.Queue, msg.ID.String()),
key,
}
argv := []interface{}{
msg.ID.String(),
int(ttl.Seconds()),
score,
encoded,
msg.Timeout,
msg.Deadline,
state,
}
res, err := taskZAddUniqueCmd.Run(c, keys, argv...).Result()
if err != nil {
return err
}
n, ok := res.(int64)
if !ok {
return errors.E(errors.Internal, fmt.Sprintf("cast error: unexpected return value from Lua script: %v", res))
}
if n == 0 {
return errors.E(errors.AlreadyExists, errors.ErrDuplicateTask)
}
return nil
}
func updateZSetMessages(c redis.UniversalClient, key, state string) {
zs, err := c.ZRangeWithScores(backupKey(key), 0, -1).Result()
failIfError(err, "Failed to read")
for _, z := range zs {
msg, err := UnmarshalOldMessage(z.Member.(string))
failIfError(err, "Failed to unmarshal message")
if msg.UniqueKey != "" {
ttl, err := c.TTL(msg.UniqueKey).Result()
failIfError(err, "Failed to get ttl")
if ttl > 0 {
err = c.Del(msg.UniqueKey).Err()
logIfError(err, "Failed to delete unique key")
}
// Regenerate unique key.
msg.UniqueKey = base.UniqueKey(msg.Queue, msg.Type, msg.Payload)
if ttl > 0 {
err = ZAddTaskUnique(c, key, msg, z.Score, state, ttl)
} else {
err = ZAddTask(c, key, msg, z.Score, state)
}
failIfError(err, "Failed to zadd message")
} else {
err := ZAddTask(c, key, msg, z.Score, state)
failIfError(err, "Failed to enqueue scheduled message")
}
}
}

View File

@@ -145,12 +145,13 @@ func printQueueInfo(info *asynq.QueueInfo) {
bold.Println("Queue Info") bold.Println("Queue Info")
fmt.Printf("Name: %s\n", info.Queue) fmt.Printf("Name: %s\n", info.Queue)
fmt.Printf("Size: %d\n", info.Size) fmt.Printf("Size: %d\n", info.Size)
fmt.Printf("Groups: %d\n", info.Groups)
fmt.Printf("Paused: %t\n\n", info.Paused) fmt.Printf("Paused: %t\n\n", info.Paused)
bold.Println("Task Count by State") bold.Println("Task Count by State")
printTable( printTable(
[]string{"active", "pending", "scheduled", "retry", "archived"}, []string{"active", "pending", "aggregating", "scheduled", "retry", "archived", "completed"},
func(w io.Writer, tmpl string) { func(w io.Writer, tmpl string) {
fmt.Fprintf(w, tmpl, info.Active, info.Pending, info.Scheduled, info.Retry, info.Archived) fmt.Fprintf(w, tmpl, info.Active, info.Pending, info.Aggregating, info.Scheduled, info.Retry, info.Archived, info.Completed)
}, },
) )
fmt.Println() fmt.Println()

View File

@@ -14,7 +14,7 @@ import (
"unicode" "unicode"
"unicode/utf8" "unicode/utf8"
"github.com/go-redis/redis/v7" "github.com/go-redis/redis/v8"
"github.com/hibiken/asynq" "github.com/hibiken/asynq"
"github.com/hibiken/asynq/internal/base" "github.com/hibiken/asynq/internal/base"
"github.com/hibiken/asynq/internal/rdb" "github.com/hibiken/asynq/internal/rdb"
@@ -199,9 +199,9 @@ func printTable(cols []string, printRows func(w io.Writer, tmpl string)) {
tw.Flush() tw.Flush()
} }
// formatPayload returns string representation of payload if data is printable. // sprintBytes returns a string representation of the given byte slice if data is printable.
// If data is not printable, it returns a string describing payload is not printable. // If data is not printable, it returns a string describing it is not printable.
func formatPayload(payload []byte) string { func sprintBytes(payload []byte) string {
if !isPrintable(payload) { if !isPrintable(payload) {
return "non-printable bytes" return "non-printable bytes"
} }

View File

@@ -5,13 +5,16 @@
package cmd package cmd
import ( import (
"encoding/json"
"fmt" "fmt"
"io" "io"
"math"
"os" "os"
"strconv" "strconv"
"strings" "strings"
"text/tabwriter" "text/tabwriter"
"time" "time"
"unicode/utf8"
"github.com/fatih/color" "github.com/fatih/color"
"github.com/hibiken/asynq/internal/rdb" "github.com/hibiken/asynq/internal/rdb"
@@ -38,8 +41,11 @@ Example: watch -n 3 asynq stats -> Shows current state of tasks every three seco
Run: stats, Run: stats,
} }
var jsonFlag bool
func init() { func init() {
rootCmd.AddCommand(statsCmd) rootCmd.AddCommand(statsCmd)
statsCmd.Flags().BoolVar(&jsonFlag, "json", false, "Output stats in JSON format.")
// Here you will define your flags and configuration settings. // Here you will define your flags and configuration settings.
@@ -53,14 +59,22 @@ func init() {
} }
type AggregateStats struct { type AggregateStats struct {
Active int Active int `json:"active"`
Pending int Pending int `json:"pending"`
Scheduled int Aggregating int `json:"aggregating"`
Retry int Scheduled int `json:"scheduled"`
Archived int Retry int `json:"retry"`
Processed int Archived int `json:"archived"`
Failed int Completed int `json:"completed"`
Timestamp time.Time Processed int `json:"processed"`
Failed int `json:"failed"`
Timestamp time.Time `json:"timestamp"`
}
type FullStats struct {
Aggregate AggregateStats `json:"aggregate"`
QueueStats []*rdb.Stats `json:"queues"`
RedisInfo map[string]string `json:"redis"`
} }
func stats(cmd *cobra.Command, args []string) { func stats(cmd *cobra.Command, args []string) {
@@ -82,9 +96,11 @@ func stats(cmd *cobra.Command, args []string) {
} }
aggStats.Active += s.Active aggStats.Active += s.Active
aggStats.Pending += s.Pending aggStats.Pending += s.Pending
aggStats.Aggregating += s.Aggregating
aggStats.Scheduled += s.Scheduled aggStats.Scheduled += s.Scheduled
aggStats.Retry += s.Retry aggStats.Retry += s.Retry
aggStats.Archived += s.Archived aggStats.Archived += s.Archived
aggStats.Completed += s.Completed
aggStats.Processed += s.Processed aggStats.Processed += s.Processed
aggStats.Failed += s.Failed aggStats.Failed += s.Failed
aggStats.Timestamp = s.Timestamp aggStats.Timestamp = s.Timestamp
@@ -100,6 +116,23 @@ func stats(cmd *cobra.Command, args []string) {
fmt.Println(err) fmt.Println(err)
os.Exit(1) os.Exit(1)
} }
if jsonFlag {
statsJSON, err := json.Marshal(FullStats{
Aggregate: aggStats,
QueueStats: stats,
RedisInfo: info,
})
if err != nil {
fmt.Println(err)
os.Exit(1)
}
fmt.Println(string(statsJSON))
return
}
bold := color.New(color.Bold) bold := color.New(color.Bold)
bold.Println("Task Count by State") bold.Println("Task Count by State")
printStatsByState(&aggStats) printStatsByState(&aggStats)
@@ -124,22 +157,50 @@ func stats(cmd *cobra.Command, args []string) {
} }
func printStatsByState(s *AggregateStats) { func printStatsByState(s *AggregateStats) {
format := strings.Repeat("%v\t", 5) + "\n" format := strings.Repeat("%v\t", 7) + "\n"
tw := new(tabwriter.Writer).Init(os.Stdout, 0, 8, 2, ' ', 0) tw := new(tabwriter.Writer).Init(os.Stdout, 0, 8, 2, ' ', 0)
fmt.Fprintf(tw, format, "active", "pending", "scheduled", "retry", "archived") fmt.Fprintf(tw, format, "active", "pending", "aggregating", "scheduled", "retry", "archived", "completed")
fmt.Fprintf(tw, format, "----------", "--------", "---------", "-----", "----") width := maxInt(9 /* defaultWidth */, maxWidthOf(s.Active, s.Pending, s.Aggregating, s.Scheduled, s.Retry, s.Archived, s.Completed)) // length of widest column
fmt.Fprintf(tw, format, s.Active, s.Pending, s.Scheduled, s.Retry, s.Archived) sep := strings.Repeat("-", width)
fmt.Fprintf(tw, format, sep, sep, sep, sep, sep, sep, sep)
fmt.Fprintf(tw, format, s.Active, s.Pending, s.Aggregating, s.Scheduled, s.Retry, s.Archived, s.Completed)
tw.Flush() tw.Flush()
} }
// numDigits returns the number of digits in n.
func numDigits(n int) int {
return len(strconv.Itoa(n))
}
// maxWidthOf returns the max number of digits amount the provided vals.
func maxWidthOf(vals ...int) int {
max := 0
for _, v := range vals {
if vw := numDigits(v); vw > max {
max = vw
}
}
return max
}
func maxInt(a, b int) int {
return int(math.Max(float64(a), float64(b)))
}
func printStatsByQueue(stats []*rdb.Stats) { func printStatsByQueue(stats []*rdb.Stats) {
var headers, seps, counts []string var headers, seps, counts []string
maxHeaderWidth := 0
for _, s := range stats { for _, s := range stats {
title := queueTitle(s) title := queueTitle(s)
headers = append(headers, title) headers = append(headers, title)
seps = append(seps, strings.Repeat("-", len(title))) if w := utf8.RuneCountInString(title); w > maxHeaderWidth {
maxHeaderWidth = w
}
counts = append(counts, strconv.Itoa(s.Size)) counts = append(counts, strconv.Itoa(s.Size))
} }
for i := 0; i < len(headers); i++ {
seps = append(seps, strings.Repeat("-", maxHeaderWidth))
}
format := strings.Repeat("%v\t", len(headers)) + "\n" format := strings.Repeat("%v\t", len(headers)) + "\n"
tw := new(tabwriter.Writer).Init(os.Stdout, 0, 8, 2, ' ', 0) tw := new(tabwriter.Writer).Init(os.Stdout, 0, 8, 2, ' ', 0)
fmt.Fprintf(tw, format, toInterfaceSlice(headers)...) fmt.Fprintf(tw, format, toInterfaceSlice(headers)...)

View File

@@ -22,6 +22,7 @@ func init() {
taskListCmd.Flags().StringP("state", "s", "", "state of the tasks to inspect") taskListCmd.Flags().StringP("state", "s", "", "state of the tasks to inspect")
taskListCmd.Flags().Int("page", 1, "page number") taskListCmd.Flags().Int("page", 1, "page number")
taskListCmd.Flags().Int("size", 30, "page size") taskListCmd.Flags().Int("size", 30, "page size")
taskListCmd.Flags().StringP("group", "g", "", "group to inspect (required for listing aggregating tasks)")
taskListCmd.MarkFlagRequired("queue") taskListCmd.MarkFlagRequired("queue")
taskListCmd.MarkFlagRequired("state") taskListCmd.MarkFlagRequired("state")
@@ -83,20 +84,29 @@ var taskListCmd = &cobra.Command{
The value for the state flag should be one of: The value for the state flag should be one of:
- active - active
- pending - pending
- aggregating
- scheduled - scheduled
- retry - retry
- archived - archived
- completed
List opeartion paginates the result set. List opeartion paginates the result set.
By default, the command fetches the first 30 tasks. By default, the command fetches the first 30 tasks.
Use --page and --size flags to specify the page number and size. Use --page and --size flags to specify the page number and size.
Example: Example:
To list pending tasks from "default" queue, run To list pending tasks from "default" queue, run
asynq task ls --queue=default --state=pending asynq task ls --queue=default --state=pending
To list the tasks from the second page, run To list the tasks from the second page, run
asynq task ls --queue=default --state=pending --page=1`, asynq task ls --queue=default --state=pending --page=1
For aggregating tasks, additional --group flag is required.
Example:
asynq task ls --queue=default --state=aggregating --group=mygroup
`,
Run: taskList, Run: taskList,
} }
@@ -189,6 +199,19 @@ func taskList(cmd *cobra.Command, args []string) {
listRetryTasks(qname, pageNum, pageSize) listRetryTasks(qname, pageNum, pageSize)
case "archived": case "archived":
listArchivedTasks(qname, pageNum, pageSize) listArchivedTasks(qname, pageNum, pageSize)
case "completed":
listCompletedTasks(qname, pageNum, pageSize)
case "aggregating":
group, err := cmd.Flags().GetString("group")
if err != nil {
fmt.Println(err)
os.Exit(1)
}
if group == "" {
fmt.Println("Flag --group is required for listing aggregating tasks")
os.Exit(1)
}
listAggregatingTasks(qname, group, pageNum, pageSize)
default: default:
fmt.Printf("error: state=%q is not supported\n", state) fmt.Printf("error: state=%q is not supported\n", state)
os.Exit(1) os.Exit(1)
@@ -210,7 +233,7 @@ func listActiveTasks(qname string, pageNum, pageSize int) {
[]string{"ID", "Type", "Payload"}, []string{"ID", "Type", "Payload"},
func(w io.Writer, tmpl string) { func(w io.Writer, tmpl string) {
for _, t := range tasks { for _, t := range tasks {
fmt.Fprintf(w, tmpl, t.ID, t.Type, formatPayload(t.Payload)) fmt.Fprintf(w, tmpl, t.ID, t.Type, sprintBytes(t.Payload))
} }
}, },
) )
@@ -231,7 +254,7 @@ func listPendingTasks(qname string, pageNum, pageSize int) {
[]string{"ID", "Type", "Payload"}, []string{"ID", "Type", "Payload"},
func(w io.Writer, tmpl string) { func(w io.Writer, tmpl string) {
for _, t := range tasks { for _, t := range tasks {
fmt.Fprintf(w, tmpl, t.ID, t.Type, formatPayload(t.Payload)) fmt.Fprintf(w, tmpl, t.ID, t.Type, sprintBytes(t.Payload))
} }
}, },
) )
@@ -252,7 +275,7 @@ func listScheduledTasks(qname string, pageNum, pageSize int) {
[]string{"ID", "Type", "Payload", "Process In"}, []string{"ID", "Type", "Payload", "Process In"},
func(w io.Writer, tmpl string) { func(w io.Writer, tmpl string) {
for _, t := range tasks { for _, t := range tasks {
fmt.Fprintf(w, tmpl, t.ID, t.Type, formatPayload(t.Payload), formatProcessAt(t.NextProcessAt)) fmt.Fprintf(w, tmpl, t.ID, t.Type, sprintBytes(t.Payload), formatProcessAt(t.NextProcessAt))
} }
}, },
) )
@@ -284,8 +307,8 @@ func listRetryTasks(qname string, pageNum, pageSize int) {
[]string{"ID", "Type", "Payload", "Next Retry", "Last Error", "Last Failed", "Retried", "Max Retry"}, []string{"ID", "Type", "Payload", "Next Retry", "Last Error", "Last Failed", "Retried", "Max Retry"},
func(w io.Writer, tmpl string) { func(w io.Writer, tmpl string) {
for _, t := range tasks { for _, t := range tasks {
fmt.Fprintf(w, tmpl, t.ID, t.Type, formatPayload(t.Payload), formatProcessAt(t.NextProcessAt), fmt.Fprintf(w, tmpl, t.ID, t.Type, sprintBytes(t.Payload), formatProcessAt(t.NextProcessAt),
t.LastErr, formatLastFailedAt(t.LastFailedAt), t.Retried, t.MaxRetry) t.LastErr, formatPastTime(t.LastFailedAt), t.Retried, t.MaxRetry)
} }
}, },
) )
@@ -306,11 +329,52 @@ func listArchivedTasks(qname string, pageNum, pageSize int) {
[]string{"ID", "Type", "Payload", "Last Failed", "Last Error"}, []string{"ID", "Type", "Payload", "Last Failed", "Last Error"},
func(w io.Writer, tmpl string) { func(w io.Writer, tmpl string) {
for _, t := range tasks { for _, t := range tasks {
fmt.Fprintf(w, tmpl, t.ID, t.Type, formatPayload(t.Payload), formatLastFailedAt(t.LastFailedAt), t.LastErr) fmt.Fprintf(w, tmpl, t.ID, t.Type, sprintBytes(t.Payload), formatPastTime(t.LastFailedAt), t.LastErr)
} }
}) })
} }
func listCompletedTasks(qname string, pageNum, pageSize int) {
i := createInspector()
tasks, err := i.ListCompletedTasks(qname, asynq.PageSize(pageSize), asynq.Page(pageNum))
if err != nil {
fmt.Println(err)
os.Exit(1)
}
if len(tasks) == 0 {
fmt.Printf("No completed tasks in %q queue\n", qname)
return
}
printTable(
[]string{"ID", "Type", "Payload", "CompletedAt", "Result"},
func(w io.Writer, tmpl string) {
for _, t := range tasks {
fmt.Fprintf(w, tmpl, t.ID, t.Type, sprintBytes(t.Payload), formatPastTime(t.CompletedAt), sprintBytes(t.Result))
}
})
}
func listAggregatingTasks(qname, group string, pageNum, pageSize int) {
i := createInspector()
tasks, err := i.ListAggregatingTasks(qname, group, asynq.PageSize(pageSize), asynq.Page(pageNum))
if err != nil {
fmt.Println(err)
os.Exit(1)
}
if len(tasks) == 0 {
fmt.Printf("No aggregating tasks in group %q \n", group)
return
}
printTable(
[]string{"ID", "Type", "Payload", "Group"},
func(w io.Writer, tmpl string) {
for _, t := range tasks {
fmt.Fprintf(w, tmpl, t.ID, t.Type, sprintBytes(t.Payload), t.Group)
}
},
)
}
func taskCancel(cmd *cobra.Command, args []string) { func taskCancel(cmd *cobra.Command, args []string) {
i := createInspector() i := createInspector()
for _, id := range args { for _, id := range args {
@@ -356,7 +420,7 @@ func printTaskInfo(info *asynq.TaskInfo) {
if len(info.LastErr) != 0 { if len(info.LastErr) != 0 {
fmt.Println() fmt.Println()
bold.Println("Last Failure") bold.Println("Last Failure")
fmt.Printf("Failed at: %s\n", formatLastFailedAt(info.LastFailedAt)) fmt.Printf("Failed at: %s\n", formatPastTime(info.LastFailedAt))
fmt.Printf("Error message: %s\n", info.LastErr) fmt.Printf("Error message: %s\n", info.LastErr)
} }
} }
@@ -371,11 +435,12 @@ func formatNextProcessAt(processAt time.Time) string {
return fmt.Sprintf("%s (in %v)", processAt.Format(time.UnixDate), processAt.Sub(time.Now()).Round(time.Second)) return fmt.Sprintf("%s (in %v)", processAt.Format(time.UnixDate), processAt.Sub(time.Now()).Round(time.Second))
} }
func formatLastFailedAt(lastFailedAt time.Time) string { // formatPastTime takes t which is time in the past and returns a user-friendly string.
if lastFailedAt.IsZero() || lastFailedAt.Unix() == 0 { func formatPastTime(t time.Time) string {
if t.IsZero() || t.Unix() == 0 {
return "" return ""
} }
return lastFailedAt.Format(time.UnixDate) return t.Format(time.UnixDate)
} }
func taskArchive(cmd *cobra.Command, args []string) { func taskArchive(cmd *cobra.Command, args []string) {
@@ -496,6 +561,8 @@ func taskDeleteAll(cmd *cobra.Command, args []string) {
n, err = i.DeleteAllRetryTasks(qname) n, err = i.DeleteAllRetryTasks(qname)
case "archived": case "archived":
n, err = i.DeleteAllArchivedTasks(qname) n, err = i.DeleteAllArchivedTasks(qname)
case "completed":
n, err = i.DeleteAllCompletedTasks(qname)
default: default:
fmt.Printf("error: unsupported state %q\n", state) fmt.Printf("error: unsupported state %q\n", state)
os.Exit(1) os.Exit(1)

View File

@@ -3,21 +3,15 @@ module github.com/hibiken/asynq/tools
go 1.13 go 1.13
require ( require (
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6 // indirect
github.com/coreos/go-etcd v2.0.0+incompatible // indirect
github.com/cpuguy83/go-md2man v1.0.10 // indirect
github.com/fatih/color v1.9.0 github.com/fatih/color v1.9.0
github.com/go-redis/redis/v7 v7.4.0 github.com/go-redis/redis/v8 v8.11.4
github.com/golang/protobuf v1.4.1 // indirect github.com/hibiken/asynq v0.21.0
github.com/google/uuid v1.2.0 github.com/hibiken/asynq/x v0.0.0-20220131170841-349f4c50fb1d
github.com/hibiken/asynq v0.17.1
github.com/mitchellh/go-homedir v1.1.0 github.com/mitchellh/go-homedir v1.1.0
github.com/spf13/cast v1.3.1 github.com/prometheus/client_golang v1.11.0
github.com/spf13/afero v1.1.2 // indirect
github.com/spf13/cobra v1.1.1 github.com/spf13/cobra v1.1.1
github.com/spf13/viper v1.7.0 github.com/spf13/viper v1.7.0
github.com/ugorji/go v1.1.4 // indirect
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8 // indirect
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77 // indirect
) )
replace github.com/hibiken/asynq => ./.. replace github.com/hibiken/asynq => ../

View File

@@ -16,50 +16,60 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s= github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s=
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-redis/redis/v7 v7.2.0 h1:CrCexy/jYWZjW0AyVoHlcJUeZN19VWlbepTh1Vq6dJs= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
github.com/go-redis/redis/v7 v7.2.0/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRfnwdHj/Dcg= github.com/go-redis/redis/v8 v8.11.2/go.mod h1:DLomh7y2e3ggQXQLd1YgmvIfecPJoFl7WU5SOQ/r06M=
github.com/go-redis/redis/v7 v7.4.0 h1:7obg6wUoj05T0EpY0o8B59S9w5yeMWql7sw2kwNW1x4= github.com/go-redis/redis/v8 v8.11.4 h1:kHoYkfZP6+pe04aFTnhDH6GDROa5yJdHJVNxV3F46Tg=
github.com/go-redis/redis/v7 v7.4.0/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRfnwdHj/Dcg= github.com/go-redis/redis/v8 v8.11.4/go.mod h1:2Z2wHZXdQpCDXEGzqMockDpNyYvi2l4Pxt6RJr792+w=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
@@ -69,36 +79,41 @@ github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1 h1:ZFgWrT+bLgsYPirOnRfKLYJLvssAegOj/hgyMFdJZe0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs=
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
@@ -124,26 +139,34 @@ github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hibiken/asynq v0.19.0/go.mod h1:tyc63ojaW8SJ5SBm8mvI4DDONsguP5HE85EEl4Qr5Ig=
github.com/hibiken/asynq v0.21.0 h1:uH9XogJhjq/S39E0/DEPWLZQ6hHJ73UiblZTe4RzHwA=
github.com/hibiken/asynq v0.21.0/go.mod h1:tyc63ojaW8SJ5SBm8mvI4DDONsguP5HE85EEl4Qr5Ig=
github.com/hibiken/asynq/x v0.0.0-20220131170841-349f4c50fb1d h1:Er+U+9PmnyRHRDQjSjRQ24HoWvOY7w9Pk7bUPYM3Ags=
github.com/hibiken/asynq/x v0.0.0-20220131170841-349f4c50fb1d/go.mod h1:VmxwMfMKyb6gyv8xG0oOBMXIhquWKPx+zPtbVBd2Q1s=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
@@ -153,6 +176,7 @@ github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNx
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.11 h1:FxPOTFNqGkuDUGi3H/qkUbQO4ZiBa2brKq5r0l8TGeM= github.com/mattn/go-isatty v0.0.11 h1:FxPOTFNqGkuDUGi3H/qkUbQO4ZiBa2brKq5r0l8TGeM=
github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
@@ -165,43 +189,70 @@ github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0Qu
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.15.0/go.mod h1:hF8qUzuuC8DJGygJH3726JnCZX4MYbRB8yFfISqnKUg=
github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME= github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48=
github.com/onsi/gomega v1.16.0 h1:6gjqkI8iiRHMvdccRJM8rVKjCWk6ZIm6FTm3ddIe4/c=
github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
@@ -213,49 +264,43 @@ github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng=
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s=
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
github.com/spf13/cobra v1.0.0 h1:6m/oheQuQ13N9ks4hubMG6BnvwOeaJrqSPLahSnczz8=
github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
github.com/spf13/cobra v1.1.1 h1:KfztREH0tPxJJ+geloSLaAkaPkr4ki2Er5quFV1TDo4= github.com/spf13/cobra v1.1.1 h1:KfztREH0tPxJJ+geloSLaAkaPkr4ki2Er5quFV1TDo4=
github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI=
github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
github.com/spf13/viper v1.6.2 h1:7aKfF+e8/k68gda3LOjo5RxiUqddoFxVq4BKBPrxk5E=
github.com/spf13/viper v1.6.2/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k=
github.com/spf13/viper v1.7.0 h1:xVKxvI7ouOI5I+U9s2eeiUfMaWBVoXA3AWskkrqK0VM= github.com/spf13/viper v1.7.0 h1:xVKxvI7ouOI5I+U9s2eeiUfMaWBVoXA3AWskkrqK0VM=
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/goleak v0.10.0 h1:G3eWbSNIskeRqtsN/1uI5B+eP73y3JUuBsv9AZjehb4=
go.uber.org/goleak v0.10.0/go.mod h1:VCZuO8V8mFPlL0F5J5GK1rtHV3DrFcQ1R8ryq7FK0aI= go.uber.org/goleak v0.10.0/go.mod h1:VCZuO8V8mFPlL0F5J5GK1rtHV3DrFcQ1R8ryq7FK0aI=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -274,6 +319,7 @@ golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -287,11 +333,15 @@ golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190923162816-aa69164e4478 h1:l5EDrHhldLYb3ZRHDUhXF7Om7MvYXnkV9/iQNo1lX6g= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781 h1:DzZ89McO9/gWPsQXS/FVKAlG02ZjaQ6AlZRBimEYOd0=
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -300,6 +350,9 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -307,23 +360,37 @@ golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e h1:9vRrk9YW2BTzLP0VCB9ZDjU4cPqkg+IDWL7XgxA1yxQ= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40 h1:JWgyZ1qgdTaF3N3oxC+MdTV7qvEEgHo3otj+HB5CM7Q=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -345,9 +412,13 @@ golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtn
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
@@ -369,7 +440,6 @@ google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvx
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
@@ -379,16 +449,18 @@ google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQ
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno= gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno=
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
@@ -399,10 +471,13 @@ gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bl
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

View File

@@ -0,0 +1,56 @@
package main
import (
"flag"
"fmt"
"log"
"net/http"
"github.com/hibiken/asynq"
"github.com/hibiken/asynq/x/metrics"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/collectors"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
// Declare command-line flags.
// These variables are binded to flags in init().
var (
flagRedisAddr string
flagRedisDB int
flagRedisPassword string
flagRedisUsername string
flagPort int
)
func init() {
flag.StringVar(&flagRedisAddr, "redis-addr", "127.0.0.1:6379", "host:port of redis server to connect to")
flag.IntVar(&flagRedisDB, "redis-db", 0, "redis DB number to use")
flag.StringVar(&flagRedisPassword, "redis-password", "", "password used to connect to redis server")
flag.StringVar(&flagRedisUsername, "redis-username", "", "username used to connect to redis server")
flag.IntVar(&flagPort, "port", 9876, "port to use for the HTTP server")
}
func main() {
flag.Parse()
// Using NewPedanticRegistry here to test the implementation of Collectors and Metrics.
reg := prometheus.NewPedanticRegistry()
inspector := asynq.NewInspector(asynq.RedisClientOpt{
Addr: flagRedisAddr,
DB: flagRedisDB,
Password: flagRedisPassword,
Username: flagRedisUsername,
})
reg.MustRegister(
metrics.NewQueueMetricsCollector(inspector),
// Add the standard process and go metrics to the registry
collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}),
collectors.NewGoCollector(),
)
http.Handle("/metrics", promhttp.HandlerFor(reg, promhttp.HandlerOpts{}))
log.Printf("exporter server is listening on port: %d\n", flagPort)
log.Fatal(http.ListenAndServe(fmt.Sprintf(":%d", flagPort), nil))
}

10
x/go.mod Normal file
View File

@@ -0,0 +1,10 @@
module github.com/hibiken/asynq/x
go 1.16
require (
github.com/go-redis/redis/v8 v8.11.4
github.com/google/uuid v1.3.0
github.com/hibiken/asynq v0.21.0
github.com/prometheus/client_golang v1.11.0
)

258
x/go.sum Normal file
View File

@@ -0,0 +1,258 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
github.com/go-redis/redis/v8 v8.11.2/go.mod h1:DLomh7y2e3ggQXQLd1YgmvIfecPJoFl7WU5SOQ/r06M=
github.com/go-redis/redis/v8 v8.11.4 h1:kHoYkfZP6+pe04aFTnhDH6GDROa5yJdHJVNxV3F46Tg=
github.com/go-redis/redis/v8 v8.11.4/go.mod h1:2Z2wHZXdQpCDXEGzqMockDpNyYvi2l4Pxt6RJr792+w=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/hibiken/asynq v0.21.0 h1:uH9XogJhjq/S39E0/DEPWLZQ6hHJ73UiblZTe4RzHwA=
github.com/hibiken/asynq v0.21.0/go.mod h1:tyc63ojaW8SJ5SBm8mvI4DDONsguP5HE85EEl4Qr5Ig=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.15.0/go.mod h1:hF8qUzuuC8DJGygJH3726JnCZX4MYbRB8yFfISqnKUg=
github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc=
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48=
github.com/onsi/gomega v1.16.0 h1:6gjqkI8iiRHMvdccRJM8rVKjCWk6ZIm6FTm3ddIe4/c=
github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng=
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.uber.org/goleak v0.10.0 h1:G3eWbSNIskeRqtsN/1uI5B+eP73y3JUuBsv9AZjehb4=
go.uber.org/goleak v0.10.0/go.mod h1:VCZuO8V8mFPlL0F5J5GK1rtHV3DrFcQ1R8ryq7FK0aI=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781 h1:DzZ89McO9/gWPsQXS/FVKAlG02ZjaQ6AlZRBimEYOd0=
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40 h1:JWgyZ1qgdTaF3N3oxC+MdTV7qvEEgHo3otj+HB5CM7Q=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

190
x/metrics/metrics.go Normal file
View File

@@ -0,0 +1,190 @@
// Package metrics provides implementations of prometheus.Collector to collect Asynq queue metrics.
package metrics
import (
"fmt"
"log"
"github.com/hibiken/asynq"
"github.com/prometheus/client_golang/prometheus"
)
// Namespace used in fully-qualified metrics names.
const namespace = "asynq"
// QueueMetricsCollector gathers queue metrics.
// It implements prometheus.Collector interface.
//
// All metrics exported from this collector have prefix "asynq".
type QueueMetricsCollector struct {
inspector *asynq.Inspector
}
// collectQueueInfo gathers QueueInfo of all queues.
// Since this operation is expensive, it must be called once per collection.
func (qmc *QueueMetricsCollector) collectQueueInfo() ([]*asynq.QueueInfo, error) {
qnames, err := qmc.inspector.Queues()
if err != nil {
return nil, fmt.Errorf("failed to get queue names: %v", err)
}
infos := make([]*asynq.QueueInfo, len(qnames))
for i, qname := range qnames {
qinfo, err := qmc.inspector.GetQueueInfo(qname)
if err != nil {
return nil, fmt.Errorf("failed to get queue info: %v", err)
}
infos[i] = qinfo
}
return infos, nil
}
// Descriptors used by QueueMetricsCollector
var (
tasksQueuedDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, "", "tasks_enqueued_total"),
"Number of tasks enqueued; broken down by queue and state.",
[]string{"queue", "state"}, nil,
)
queueSizeDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, "", "queue_size"),
"Number of tasks in a queue",
[]string{"queue"}, nil,
)
queueLatencyDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, "", "queue_latency_seconds"),
"Number of seconds the oldest pending task is waiting in pending state to be processed.",
[]string{"queue"}, nil,
)
queueMemUsgDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, "", "queue_memory_usage_approx_bytes"),
"Number of memory used by a given queue (approximated number by sampling).",
[]string{"queue"}, nil,
)
tasksProcessedTotalDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, "", "tasks_processed_total"),
"Number of tasks processed (both succeeded and failed); broken down by queue",
[]string{"queue"}, nil,
)
tasksFailedTotalDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, "", "tasks_failed_total"),
"Number of tasks failed; broken down by queue",
[]string{"queue"}, nil,
)
pausedQueues = prometheus.NewDesc(
prometheus.BuildFQName(namespace, "", "queue_paused_total"),
"Number of queues paused",
[]string{"queue"}, nil,
)
)
func (qmc *QueueMetricsCollector) Describe(ch chan<- *prometheus.Desc) {
prometheus.DescribeByCollect(qmc, ch)
}
func (qmc *QueueMetricsCollector) Collect(ch chan<- prometheus.Metric) {
queueInfos, err := qmc.collectQueueInfo()
if err != nil {
log.Printf("Failed to collect metrics data: %v", err)
}
for _, info := range queueInfos {
ch <- prometheus.MustNewConstMetric(
tasksQueuedDesc,
prometheus.GaugeValue,
float64(info.Active),
info.Queue,
"active",
)
ch <- prometheus.MustNewConstMetric(
tasksQueuedDesc,
prometheus.GaugeValue,
float64(info.Pending),
info.Queue,
"pending",
)
ch <- prometheus.MustNewConstMetric(
tasksQueuedDesc,
prometheus.GaugeValue,
float64(info.Scheduled),
info.Queue,
"scheduled",
)
ch <- prometheus.MustNewConstMetric(
tasksQueuedDesc,
prometheus.GaugeValue,
float64(info.Retry),
info.Queue,
"retry",
)
ch <- prometheus.MustNewConstMetric(
tasksQueuedDesc,
prometheus.GaugeValue,
float64(info.Archived),
info.Queue,
"archived",
)
ch <- prometheus.MustNewConstMetric(
tasksQueuedDesc,
prometheus.GaugeValue,
float64(info.Completed),
info.Queue,
"completed",
)
ch <- prometheus.MustNewConstMetric(
queueSizeDesc,
prometheus.GaugeValue,
float64(info.Size),
info.Queue,
)
ch <- prometheus.MustNewConstMetric(
queueLatencyDesc,
prometheus.GaugeValue,
info.Latency.Seconds(),
info.Queue,
)
ch <- prometheus.MustNewConstMetric(
queueMemUsgDesc,
prometheus.GaugeValue,
float64(info.MemoryUsage),
info.Queue,
)
ch <- prometheus.MustNewConstMetric(
tasksProcessedTotalDesc,
prometheus.CounterValue,
float64(info.ProcessedTotal),
info.Queue,
)
ch <- prometheus.MustNewConstMetric(
tasksFailedTotalDesc,
prometheus.CounterValue,
float64(info.FailedTotal),
info.Queue,
)
pausedValue := 0 // zero to indicate "not paused"
if info.Paused {
pausedValue = 1
}
ch <- prometheus.MustNewConstMetric(
pausedQueues,
prometheus.GaugeValue,
float64(pausedValue),
info.Queue,
)
}
}
// NewQueueMetricsCollector returns a collector that exports metrics about Asynq queues.
func NewQueueMetricsCollector(inspector *asynq.Inspector) *QueueMetricsCollector {
return &QueueMetricsCollector{inspector: inspector}
}

40
x/rate/example_test.go Normal file
View File

@@ -0,0 +1,40 @@
package rate_test
import (
"context"
"fmt"
"time"
"github.com/hibiken/asynq"
"github.com/hibiken/asynq/x/rate"
)
type RateLimitError struct {
RetryIn time.Duration
}
func (e *RateLimitError) Error() string {
return fmt.Sprintf("rate limited (retry in %v)", e.RetryIn)
}
func ExampleNewSemaphore() {
redisConnOpt := asynq.RedisClientOpt{Addr: ":6379"}
sema := rate.NewSemaphore(redisConnOpt, "my_queue", 10)
// call sema.Close() when appropriate
_ = asynq.HandlerFunc(func(ctx context.Context, task *asynq.Task) error {
ok, err := sema.Acquire(ctx)
if err != nil {
return err
}
if !ok {
return &RateLimitError{RetryIn: 30 * time.Second}
}
// Make sure to release the token once we're done.
defer sema.Release(ctx)
// Process task
return nil
})
}

114
x/rate/semaphore.go Normal file
View File

@@ -0,0 +1,114 @@
// Package rate contains rate limiting strategies for asynq.Handler(s).
package rate
import (
"context"
"fmt"
"strings"
"time"
"github.com/go-redis/redis/v8"
"github.com/hibiken/asynq"
asynqcontext "github.com/hibiken/asynq/internal/context"
)
// NewSemaphore creates a counting Semaphore for the given scope with the given number of tokens.
func NewSemaphore(rco asynq.RedisConnOpt, scope string, maxTokens int) *Semaphore {
rc, ok := rco.MakeRedisClient().(redis.UniversalClient)
if !ok {
panic(fmt.Sprintf("rate.NewSemaphore: unsupported RedisConnOpt type %T", rco))
}
if maxTokens < 1 {
panic("rate.NewSemaphore: maxTokens cannot be less than 1")
}
if len(strings.TrimSpace(scope)) == 0 {
panic("rate.NewSemaphore: scope should not be empty")
}
return &Semaphore{
rc: rc,
scope: scope,
maxTokens: maxTokens,
}
}
// Semaphore is a distributed counting semaphore which can be used to set maxTokens across multiple asynq servers.
type Semaphore struct {
rc redis.UniversalClient
maxTokens int
scope string
}
// KEYS[1] -> asynq:sema:<scope>
// ARGV[1] -> max concurrency
// ARGV[2] -> current time in unix time
// ARGV[3] -> deadline in unix time
// ARGV[4] -> task ID
var acquireCmd = redis.NewScript(`
redis.call("ZREMRANGEBYSCORE", KEYS[1], "-inf", tonumber(ARGV[2])-1)
local count = redis.call("ZCARD", KEYS[1])
if (count < tonumber(ARGV[1])) then
redis.call("ZADD", KEYS[1], ARGV[3], ARGV[4])
return 'true'
else
return 'false'
end
`)
// Acquire attempts to acquire a token from the semaphore.
// - Returns (true, nil), iff semaphore key exists and current value is less than maxTokens
// - Returns (false, nil) when token cannot be acquired
// - Returns (false, error) otherwise
//
// The context.Context passed to Acquire must have a deadline set,
// this ensures that token is released if the job goroutine crashes and does not call Release.
func (s *Semaphore) Acquire(ctx context.Context) (bool, error) {
d, ok := ctx.Deadline()
if !ok {
return false, fmt.Errorf("provided context must have a deadline")
}
taskID, ok := asynqcontext.GetTaskID(ctx)
if !ok {
return false, fmt.Errorf("provided context is missing task ID value")
}
return acquireCmd.Run(ctx, s.rc,
[]string{semaphoreKey(s.scope)},
s.maxTokens,
time.Now().Unix(),
d.Unix(),
taskID,
).Bool()
}
// Release will release the token on the counting semaphore.
func (s *Semaphore) Release(ctx context.Context) error {
taskID, ok := asynqcontext.GetTaskID(ctx)
if !ok {
return fmt.Errorf("provided context is missing task ID value")
}
n, err := s.rc.ZRem(ctx, semaphoreKey(s.scope), taskID).Result()
if err != nil {
return fmt.Errorf("redis command failed: %v", err)
}
if n == 0 {
return fmt.Errorf("no token found for task %q", taskID)
}
return nil
}
// Close closes the connection to redis.
func (s *Semaphore) Close() error {
return s.rc.Close()
}
func semaphoreKey(scope string) string {
return fmt.Sprintf("asynq:sema:%s", scope)
}

408
x/rate/semaphore_test.go Normal file
View File

@@ -0,0 +1,408 @@
package rate
import (
"context"
"flag"
"fmt"
"strings"
"testing"
"time"
"github.com/go-redis/redis/v8"
"github.com/google/uuid"
"github.com/hibiken/asynq"
"github.com/hibiken/asynq/internal/base"
asynqcontext "github.com/hibiken/asynq/internal/context"
)
var (
redisAddr string
redisDB int
useRedisCluster bool
redisClusterAddrs string // comma-separated list of host:port
)
func init() {
flag.StringVar(&redisAddr, "redis_addr", "localhost:6379", "redis address to use in testing")
flag.IntVar(&redisDB, "redis_db", 14, "redis db number to use in testing")
flag.BoolVar(&useRedisCluster, "redis_cluster", false, "use redis cluster as a broker in testing")
flag.StringVar(&redisClusterAddrs, "redis_cluster_addrs", "localhost:7000,localhost:7001,localhost:7002", "comma separated list of redis server addresses")
}
func TestNewSemaphore(t *testing.T) {
tests := []struct {
desc string
name string
maxConcurrency int
wantPanic string
connOpt asynq.RedisConnOpt
}{
{
desc: "Bad RedisConnOpt",
wantPanic: "rate.NewSemaphore: unsupported RedisConnOpt type *rate.badConnOpt",
connOpt: &badConnOpt{},
},
{
desc: "Zero maxTokens should panic",
wantPanic: "rate.NewSemaphore: maxTokens cannot be less than 1",
},
{
desc: "Empty scope should panic",
maxConcurrency: 2,
name: " ",
wantPanic: "rate.NewSemaphore: scope should not be empty",
},
}
for _, tt := range tests {
t.Run(tt.desc, func(t *testing.T) {
if tt.wantPanic != "" {
defer func() {
if r := recover(); r.(string) != tt.wantPanic {
t.Errorf("%s;\nNewSemaphore should panic with msg: %s, got %s", tt.desc, tt.wantPanic, r.(string))
}
}()
}
opt := tt.connOpt
if tt.connOpt == nil {
opt = getRedisConnOpt(t)
}
sema := NewSemaphore(opt, tt.name, tt.maxConcurrency)
defer sema.Close()
})
}
}
func TestNewSemaphore_Acquire(t *testing.T) {
tests := []struct {
desc string
name string
maxConcurrency int
taskIDs []string
ctxFunc func(string) (context.Context, context.CancelFunc)
want []bool
}{
{
desc: "Should acquire token when current token count is less than maxTokens",
name: "task-1",
maxConcurrency: 3,
taskIDs: []string{uuid.NewString(), uuid.NewString()},
ctxFunc: func(id string) (context.Context, context.CancelFunc) {
return asynqcontext.New(&base.TaskMessage{
ID: id,
Queue: "task-1",
}, time.Now().Add(time.Second))
},
want: []bool{true, true},
},
{
desc: "Should fail acquiring token when current token count is equal to maxTokens",
name: "task-2",
maxConcurrency: 3,
taskIDs: []string{uuid.NewString(), uuid.NewString(), uuid.NewString(), uuid.NewString()},
ctxFunc: func(id string) (context.Context, context.CancelFunc) {
return asynqcontext.New(&base.TaskMessage{
ID: id,
Queue: "task-2",
}, time.Now().Add(time.Second))
},
want: []bool{true, true, true, false},
},
}
for _, tt := range tests {
t.Run(tt.desc, func(t *testing.T) {
opt := getRedisConnOpt(t)
rc := opt.MakeRedisClient().(redis.UniversalClient)
defer rc.Close()
if err := rc.Del(context.Background(), semaphoreKey(tt.name)).Err(); err != nil {
t.Errorf("%s;\nredis.UniversalClient.Del() got error %v", tt.desc, err)
}
sema := NewSemaphore(opt, tt.name, tt.maxConcurrency)
defer sema.Close()
for i := 0; i < len(tt.taskIDs); i++ {
ctx, cancel := tt.ctxFunc(tt.taskIDs[i])
got, err := sema.Acquire(ctx)
if err != nil {
t.Errorf("%s;\nSemaphore.Acquire() got error %v", tt.desc, err)
}
if got != tt.want[i] {
t.Errorf("%s;\nSemaphore.Acquire(ctx) returned %v, want %v", tt.desc, got, tt.want[i])
}
cancel()
}
})
}
}
func TestNewSemaphore_Acquire_Error(t *testing.T) {
tests := []struct {
desc string
name string
maxConcurrency int
taskIDs []string
ctxFunc func(string) (context.Context, context.CancelFunc)
errStr string
}{
{
desc: "Should return error if context has no deadline",
name: "task-3",
maxConcurrency: 1,
taskIDs: []string{uuid.NewString(), uuid.NewString()},
ctxFunc: func(id string) (context.Context, context.CancelFunc) {
return context.Background(), func() {}
},
errStr: "provided context must have a deadline",
},
{
desc: "Should return error when context is missing taskID",
name: "task-4",
maxConcurrency: 1,
taskIDs: []string{uuid.NewString()},
ctxFunc: func(_ string) (context.Context, context.CancelFunc) {
return context.WithTimeout(context.Background(), time.Second)
},
errStr: "provided context is missing task ID value",
},
}
for _, tt := range tests {
t.Run(tt.desc, func(t *testing.T) {
opt := getRedisConnOpt(t)
rc := opt.MakeRedisClient().(redis.UniversalClient)
defer rc.Close()
if err := rc.Del(context.Background(), semaphoreKey(tt.name)).Err(); err != nil {
t.Errorf("%s;\nredis.UniversalClient.Del() got error %v", tt.desc, err)
}
sema := NewSemaphore(opt, tt.name, tt.maxConcurrency)
defer sema.Close()
for i := 0; i < len(tt.taskIDs); i++ {
ctx, cancel := tt.ctxFunc(tt.taskIDs[i])
_, err := sema.Acquire(ctx)
if err == nil || err.Error() != tt.errStr {
t.Errorf("%s;\nSemaphore.Acquire() got error %v want error %v", tt.desc, err, tt.errStr)
}
cancel()
}
})
}
}
func TestNewSemaphore_Acquire_StaleToken(t *testing.T) {
opt := getRedisConnOpt(t)
rc := opt.MakeRedisClient().(redis.UniversalClient)
defer rc.Close()
taskID := uuid.NewString()
// adding a set member to mimic the case where token is acquired but the goroutine crashed,
// in which case, the token will not be explicitly removed and should be present already
rc.ZAdd(context.Background(), semaphoreKey("stale-token"), &redis.Z{
Score: float64(time.Now().Add(-10 * time.Second).Unix()),
Member: taskID,
})
sema := NewSemaphore(opt, "stale-token", 1)
defer sema.Close()
ctx, cancel := asynqcontext.New(&base.TaskMessage{
ID: taskID,
Queue: "task-1",
}, time.Now().Add(time.Second))
defer cancel()
got, err := sema.Acquire(ctx)
if err != nil {
t.Errorf("Acquire_StaleToken;\nSemaphore.Acquire() got error %v", err)
}
if !got {
t.Error("Acquire_StaleToken;\nSemaphore.Acquire() got false want true")
}
}
func TestNewSemaphore_Release(t *testing.T) {
tests := []struct {
desc string
name string
taskIDs []string
ctxFunc func(string) (context.Context, context.CancelFunc)
wantCount int64
}{
{
desc: "Should decrease token count",
name: "task-5",
taskIDs: []string{uuid.NewString()},
ctxFunc: func(id string) (context.Context, context.CancelFunc) {
return asynqcontext.New(&base.TaskMessage{
ID: id,
Queue: "task-3",
}, time.Now().Add(time.Second))
},
},
{
desc: "Should decrease token count by 2",
name: "task-6",
taskIDs: []string{uuid.NewString(), uuid.NewString()},
ctxFunc: func(id string) (context.Context, context.CancelFunc) {
return asynqcontext.New(&base.TaskMessage{
ID: id,
Queue: "task-4",
}, time.Now().Add(time.Second))
},
},
}
for _, tt := range tests {
t.Run(tt.desc, func(t *testing.T) {
opt := getRedisConnOpt(t)
rc := opt.MakeRedisClient().(redis.UniversalClient)
defer rc.Close()
if err := rc.Del(context.Background(), semaphoreKey(tt.name)).Err(); err != nil {
t.Errorf("%s;\nredis.UniversalClient.Del() got error %v", tt.desc, err)
}
var members []*redis.Z
for i := 0; i < len(tt.taskIDs); i++ {
members = append(members, &redis.Z{
Score: float64(time.Now().Add(time.Duration(i) * time.Second).Unix()),
Member: tt.taskIDs[i],
})
}
if err := rc.ZAdd(context.Background(), semaphoreKey(tt.name), members...).Err(); err != nil {
t.Errorf("%s;\nredis.UniversalClient.ZAdd() got error %v", tt.desc, err)
}
sema := NewSemaphore(opt, tt.name, 3)
defer sema.Close()
for i := 0; i < len(tt.taskIDs); i++ {
ctx, cancel := tt.ctxFunc(tt.taskIDs[i])
if err := sema.Release(ctx); err != nil {
t.Errorf("%s;\nSemaphore.Release() got error %v", tt.desc, err)
}
cancel()
}
i, err := rc.ZCount(context.Background(), semaphoreKey(tt.name), "-inf", "+inf").Result()
if err != nil {
t.Errorf("%s;\nredis.UniversalClient.ZCount() got error %v", tt.desc, err)
}
if i != tt.wantCount {
t.Errorf("%s;\nSemaphore.Release(ctx) didn't release token, got %v want 0", tt.desc, i)
}
})
}
}
func TestNewSemaphore_Release_Error(t *testing.T) {
testID := uuid.NewString()
tests := []struct {
desc string
name string
taskIDs []string
ctxFunc func(string) (context.Context, context.CancelFunc)
errStr string
}{
{
desc: "Should return error when context is missing taskID",
name: "task-7",
taskIDs: []string{uuid.NewString()},
ctxFunc: func(_ string) (context.Context, context.CancelFunc) {
return context.WithTimeout(context.Background(), time.Second)
},
errStr: "provided context is missing task ID value",
},
{
desc: "Should return error when context has taskID which never acquired token",
name: "task-8",
taskIDs: []string{uuid.NewString()},
ctxFunc: func(_ string) (context.Context, context.CancelFunc) {
return asynqcontext.New(&base.TaskMessage{
ID: testID,
Queue: "task-4",
}, time.Now().Add(time.Second))
},
errStr: fmt.Sprintf("no token found for task %q", testID),
},
}
for _, tt := range tests {
t.Run(tt.desc, func(t *testing.T) {
opt := getRedisConnOpt(t)
rc := opt.MakeRedisClient().(redis.UniversalClient)
defer rc.Close()
if err := rc.Del(context.Background(), semaphoreKey(tt.name)).Err(); err != nil {
t.Errorf("%s;\nredis.UniversalClient.Del() got error %v", tt.desc, err)
}
var members []*redis.Z
for i := 0; i < len(tt.taskIDs); i++ {
members = append(members, &redis.Z{
Score: float64(time.Now().Add(time.Duration(i) * time.Second).Unix()),
Member: tt.taskIDs[i],
})
}
if err := rc.ZAdd(context.Background(), semaphoreKey(tt.name), members...).Err(); err != nil {
t.Errorf("%s;\nredis.UniversalClient.ZAdd() got error %v", tt.desc, err)
}
sema := NewSemaphore(opt, tt.name, 3)
defer sema.Close()
for i := 0; i < len(tt.taskIDs); i++ {
ctx, cancel := tt.ctxFunc(tt.taskIDs[i])
if err := sema.Release(ctx); err == nil || err.Error() != tt.errStr {
t.Errorf("%s;\nSemaphore.Release() got error %v want error %v", tt.desc, err, tt.errStr)
}
cancel()
}
})
}
}
func getRedisConnOpt(tb testing.TB) asynq.RedisConnOpt {
tb.Helper()
if useRedisCluster {
addrs := strings.Split(redisClusterAddrs, ",")
if len(addrs) == 0 {
tb.Fatal("No redis cluster addresses provided. Please set addresses using --redis_cluster_addrs flag.")
}
return asynq.RedisClusterClientOpt{
Addrs: addrs,
}
}
return asynq.RedisClientOpt{
Addr: redisAddr,
DB: redisDB,
}
}
type badConnOpt struct {
}
func (b badConnOpt) MakeRedisClient() interface{} {
return nil
}