2
0
mirror of https://github.com/hibiken/asynq.git synced 2025-10-20 21:26:14 +08:00

Compare commits

..

53 Commits

Author SHA1 Message Date
Ken Hibino
a19909f5f4 v0.22.0 2022-02-19 06:20:05 -08:00
Ken Hibino
cea5110d15 Add IsOrphaned field to TaskInfo 2022-02-19 06:15:44 -08:00
Ken Hibino
9b63e23274 Update log messages 2022-02-19 06:15:44 -08:00
Ken Hibino
de25201d9f Make timeutil.SimulatedClock concurrency safe 2022-02-19 06:15:44 -08:00
Ken Hibino
ec560afb01 Fix processor test 2022-02-19 06:15:44 -08:00
Ken Hibino
d4006894ad Remove base.DeadlinesKey 2022-02-19 06:15:44 -08:00
Ken Hibino
59927509d8 Remove timeout and deadline fields under task key 2022-02-19 06:15:44 -08:00
Ken Hibino
8211167de2 Update processor to create a lease and watch for expiration 2022-02-19 06:15:44 -08:00
Ken Hibino
d7169cd445 Update heartbeat to extend lease of active workers 2022-02-19 06:15:44 -08:00
Ken Hibino
dfae8638e1 Update RDB methods to work with lease 2022-02-19 06:15:44 -08:00
Ken Hibino
b9943de2ab Add Lease type to base package 2022-02-19 06:15:44 -08:00
Ken Hibino
871474f220 Update heartbeat goroutine to call ExtendLease on active tasks 2022-02-19 06:15:44 -08:00
Ken Hibino
87dc392c7f Add RDB.ExtendLease method 2022-02-19 06:15:44 -08:00
Ken Hibino
dabcb120d5 Update recoverer to use ListLeaseExpired 2022-02-19 06:15:44 -08:00
Ken Hibino
bc2f1986d7 Update ListDeadlineExceeded to ListLeaseExpired 2022-02-19 06:15:44 -08:00
Ken Hibino
b8cb579407 Update RDB methods to use lease instead of deadlines set 2022-02-19 06:15:44 -08:00
Ken Hibino
bca624792c Move task deadline compute logic to processor 2022-02-19 06:15:44 -08:00
Ken Hibino
d865d89900 Update RDB.Dequeue to insert task ID to lease set 2022-02-19 06:15:44 -08:00
Ken Hibino
852af7abd1 Add base.LeaseKey helper function 2022-02-19 06:15:44 -08:00
Ken Hibino
5490d2c625 Fix tests 2022-02-16 07:08:01 -08:00
Binaek Sarkar
ebd7a32c0f conventions 2022-02-16 06:43:08 -08:00
Binaek Sarkar
55d0610a03 test and changelog 2022-02-16 06:43:08 -08:00
Binaek Sarkar
ab8a4f5b1e review corrections 2022-02-16 06:43:08 -08:00
Binaek Sarkar
d7ceb0c090 first cut 2022-02-16 06:43:08 -08:00
Ken Hibino
8bd70c6f84 (ci): Run go (build|test) commands for each module 2022-02-01 07:00:00 -08:00
Ken Hibino
10ab4e3745 Remove replace directives in go.mod 2022-02-01 06:18:41 -08:00
Ken Hibino
349f4c50fb Add example for ResultWriter 2022-01-31 09:08:41 -08:00
Ken Hibino
dff2e3a336 v0.21.0 2022-01-22 06:15:29 -08:00
Ken Hibino
65040af7b5 Update changelog 2022-01-22 06:14:24 -08:00
Ken Hibino
053fe2d1ee Create PeriodicTaskManager 2022-01-22 05:59:33 -08:00
Ken Hibino
25832e5e95 Fix bug related to concurrently executing server state changes 2022-01-12 09:10:56 -08:00
Ken Hibino
aa26f3819e Fix flaky tests 2022-01-05 09:07:42 -08:00
Ken Hibino
d94614bb9b Add CODE_OF_CONDUCT.md 2022-01-04 06:17:48 -08:00
Mahdi Dibaiee
ce46b07652 Allow configuration of DelayedTaskCheckInterval 2022-01-03 14:44:00 -08:00
Mahdi Dibaiee
2d0170541c Add --json flag for asynq stats command 2022-01-02 07:24:29 -08:00
Andreas Thomas
c1f08106da fix: missing import statement in example code 2021-12-27 05:40:10 -08:00
Ken Hibino
74cf804197 Update readme 2021-12-20 05:51:51 -08:00
Ken Hibino
8dfabfccb3 Fix build 2021-12-19 07:06:37 -08:00
Ken Hibino
5f20edcbd1 v0.20.0 2021-12-19 07:00:21 -08:00
Ken Hibino
1ddb2f7bce Use math.MaxInt64 instead of custom const 2021-12-19 06:58:12 -08:00
Ken Hibino
82d18e3d91 Record total tasks processed/failed 2021-12-16 16:53:02 -08:00
Ken Hibino
43cb4ddf19 Add queue metrics exporter
Changes:
- Added `x/metrics` package
- Added `tools/metrics_exporter` binary
2021-12-16 06:01:01 -08:00
Francisco Miamoto
ddfc6747a1 Fix typo in Server doc 2021-12-13 16:23:30 -08:00
Ken Hibino
970cb7a606 v0.19.1 2021-12-12 06:16:13 -08:00
Ken Hibino
157e97e72e Update changelog 2021-12-11 10:29:43 -08:00
Ken Hibino
22e6c9d297 Delete "pending_since" under task-key when state changes to active 2021-12-11 10:29:43 -08:00
Ken Hibino
99a6750656 Add Latency field to QueueInfo 2021-12-11 10:29:43 -08:00
Ken Hibino
e7c1c3ad6f Use clock in RDB 2021-12-11 10:29:43 -08:00
Ken Hibino
c9183374c5 Add internal timeutil package 2021-12-11 10:29:43 -08:00
Ken Hibino
6e7106c8f2 Record time when task moved to pending state 2021-12-11 10:29:43 -08:00
Ken Hibino
9f2c321e98 Add EnqueueContext method to Client 2021-11-15 16:34:26 -08:00
Ken Hibino
e2b61c9056 Return error if Unique TTL is less than 1s 2021-11-09 16:37:02 -08:00
Ken Hibino
531d1ef089 Fix godoc around errors returned from Inspector 2021-11-09 15:45:20 -08:00
45 changed files with 3748 additions and 1104 deletions

View File

@@ -22,12 +22,21 @@ jobs:
with: with:
go-version: ${{ matrix.go-version }} go-version: ${{ matrix.go-version }}
- name: Build - name: Build core module
run: go build -v ./... run: go build -v ./...
- name: Test - name: Build x module
run: cd x && go build -v ./... && cd ..
- name: Build tools module
run: cd tools && go build -v ./... && cd ..
- name: Test core module
run: go test -race -v -coverprofile=coverage.txt -covermode=atomic ./... run: go test -race -v -coverprofile=coverage.txt -covermode=atomic ./...
- name: Test x module
run: cd x && go test -race -v ./... && cd ..
- name: Benchmark Test - name: Benchmark Test
run: go test -run=^$ -bench=. -loglevel=debug ./... run: go test -run=^$ -bench=. -loglevel=debug ./...

4
.gitignore vendored
View File

@@ -1,3 +1,4 @@
vendor
# Binaries for programs and plugins # Binaries for programs and plugins
*.exe *.exe
*.exe~ *.exe~
@@ -14,8 +15,9 @@
# Ignore examples for now # Ignore examples for now
/examples /examples
# Ignore command binary # Ignore tool binaries
/tools/asynq/asynq /tools/asynq/asynq
/tools/metrics_exporter/metrics_exporter
# Ignore asynq config file # Ignore asynq config file
.asynq.* .asynq.*

View File

@@ -7,6 +7,44 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [Unreleased] ## [Unreleased]
## [0.21.0] - 2022-02-19
### Added
- `BaseContext` is introduced in `Config` to specify callback hook to provide a base `context` from which `Handler` `context` is derived
- `IsOrphaned` field is added to `TaskInfo` to describe a task left in active state with no worker processing it.
### Changed
- `Server` now recovers tasks with an expired lease. Recovered tasks are retried/archived with `ErrLeaseExpired` error.
## [0.21.0] - 2022-01-22
### Added
- `PeriodicTaskManager` is added. Prefer using this over `Scheduler` as it has better support for dynamic periodic tasks.
- The `asynq stats` command now supports a `--json` option, making its output a JSON object
- Introduced new configuration for `DelayedTaskCheckInterval`. See [godoc](https://godoc.org/github.com/hibiken/asynq) for more details.
## [0.20.0] - 2021-12-19
### Added
- Package `x/metrics` is added.
- Tool `tools/metrics_exporter` binary is added.
- `ProcessedTotal` and `FailedTotal` fields were added to `QueueInfo` struct.
## [0.19.1] - 2021-12-12
### Added
- `Latency` field is added to `QueueInfo`.
- `EnqueueContext` method is added to `Client`.
### Fixed
- Fixed an error when user pass a duration less than 1s to `Unique` option
## [0.19.0] - 2021-11-06 ## [0.19.0] - 2021-11-06
### Changed ### Changed

128
CODE_OF_CONDUCT.md Normal file
View File

@@ -0,0 +1,128 @@
# Contributor Covenant Code of Conduct
## Our Pledge
We as members, contributors, and leaders pledge to make participation in our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
nationality, personal appearance, race, religion, or sexual identity
and orientation.
We pledge to act and interact in ways that contribute to an open, welcoming,
diverse, inclusive, and healthy community.
## Our Standards
Examples of behavior that contributes to a positive environment for our
community include:
* Demonstrating empathy and kindness toward other people
* Being respectful of differing opinions, viewpoints, and experiences
* Giving and gracefully accepting constructive feedback
* Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
* Focusing on what is best not just for us as individuals, but for the
overall community
Examples of unacceptable behavior include:
* The use of sexualized language or imagery, and sexual attention or
advances of any kind
* Trolling, insulting or derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or email
address, without their explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Enforcement Responsibilities
Community leaders are responsible for clarifying and enforcing our standards of
acceptable behavior and will take appropriate and fair corrective action in
response to any behavior that they deem inappropriate, threatening, offensive,
or harmful.
Community leaders have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, and will communicate reasons for moderation
decisions when appropriate.
## Scope
This Code of Conduct applies within all community spaces, and also applies when
an individual is officially representing the community in public spaces.
Examples of representing our community include using an official e-mail address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement at
ken.hibino7@gmail.com.
All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the
reporter of any incident.
## Enforcement Guidelines
Community leaders will follow these Community Impact Guidelines in determining
the consequences for any action they deem in violation of this Code of Conduct:
### 1. Correction
**Community Impact**: Use of inappropriate language or other behavior deemed
unprofessional or unwelcome in the community.
**Consequence**: A private, written warning from community leaders, providing
clarity around the nature of the violation and an explanation of why the
behavior was inappropriate. A public apology may be requested.
### 2. Warning
**Community Impact**: A violation through a single incident or series
of actions.
**Consequence**: A warning with consequences for continued behavior. No
interaction with the people involved, including unsolicited interaction with
those enforcing the Code of Conduct, for a specified period of time. This
includes avoiding interactions in community spaces as well as external channels
like social media. Violating these terms may lead to a temporary or
permanent ban.
### 3. Temporary Ban
**Community Impact**: A serious violation of community standards, including
sustained inappropriate behavior.
**Consequence**: A temporary ban from any sort of interaction or public
communication with the community for a specified period of time. No public or
private interaction with the people involved, including unsolicited interaction
with those enforcing the Code of Conduct, is allowed during this period.
Violating these terms may lead to a permanent ban.
### 4. Permanent Ban
**Community Impact**: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior, harassment of an
individual, or aggression toward or disparagement of classes of individuals.
**Consequence**: A permanent ban from any sort of public interaction within
the community.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 2.0, available at
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
Community Impact Guidelines were inspired by [Mozilla's code of conduct
enforcement ladder](https://github.com/mozilla/diversity).
[homepage]: https://www.contributor-covenant.org
For answers to common questions about this code of conduct, see the FAQ at
https://www.contributor-covenant.org/faq. Translations are available at
https://www.contributor-covenant.org/translations.

View File

@@ -38,6 +38,7 @@ Task queues are used as a mechanism to distribute work across multiple machines.
- [Periodic Tasks](https://github.com/hibiken/asynq/wiki/Periodic-Tasks) - [Periodic Tasks](https://github.com/hibiken/asynq/wiki/Periodic-Tasks)
- [Support Redis Cluster](https://github.com/hibiken/asynq/wiki/Redis-Cluster) for automatic sharding and high availability - [Support Redis Cluster](https://github.com/hibiken/asynq/wiki/Redis-Cluster) for automatic sharding and high availability
- [Support Redis Sentinels](https://github.com/hibiken/asynq/wiki/Automatic-Failover) for high availability - [Support Redis Sentinels](https://github.com/hibiken/asynq/wiki/Automatic-Failover) for high availability
- Integration with [Prometheus](https://prometheus.io/) to collect and visualize queue metrics
- [Web UI](#web-ui) to inspect and remote-control queues and tasks - [Web UI](#web-ui) to inspect and remote-control queues and tasks
- [CLI](#command-line-tool) to inspect and remote-control queues and tasks - [CLI](#command-line-tool) to inspect and remote-control queues and tasks
@@ -65,8 +66,11 @@ Next, write a package that encapsulates task creation and task handling.
package tasks package tasks
import ( import (
"context"
"encoding/json"
"fmt" "fmt"
"log"
"time"
"github.com/hibiken/asynq" "github.com/hibiken/asynq"
) )
@@ -271,6 +275,9 @@ Here's a few screenshots of the Web UI:
![Web UI TasksView](https://user-images.githubusercontent.com/11155743/114697070-1f0a0300-9d26-11eb-855c-d3ec263865b7.png) ![Web UI TasksView](https://user-images.githubusercontent.com/11155743/114697070-1f0a0300-9d26-11eb-855c-d3ec263865b7.png)
**Metrics view**
<img width="1532" alt="Screen Shot 2021-12-19 at 4 37 19 PM" src="https://user-images.githubusercontent.com/10953044/146777420-cae6c476-bac6-469c-acce-b2f6584e8707.png">
**Settings and adaptive dark mode** **Settings and adaptive dark mode**
![Web UI Settings and adaptive dark mode](https://user-images.githubusercontent.com/11155743/114697149-3517c380-9d26-11eb-9f7a-ae2dd00aad5b.png) ![Web UI Settings and adaptive dark mode](https://user-images.githubusercontent.com/11155743/114697149-3517c380-9d26-11eb-9f7a-ae2dd00aad5b.png)

View File

@@ -101,6 +101,14 @@ type TaskInfo struct {
// zero if not applicable. // zero if not applicable.
NextProcessAt time.Time NextProcessAt time.Time
// IsOrphaned describes whether the task is left in active state with no worker processing it.
// An orphaned task indicates that the worker has crashed or experienced network failures and was not able to
// extend its lease on the task.
//
// This task will be recovered by running a server against the queue the task is in.
// This field is only applicable to tasks with TaskStateActive.
IsOrphaned bool
// Retention is duration of the retention period after the task is successfully processed. // Retention is duration of the retention period after the task is successfully processed.
Retention time.Duration Retention time.Duration

View File

@@ -5,6 +5,7 @@
package asynq package asynq
import ( import (
"context"
"fmt" "fmt"
"strings" "strings"
"time" "time"
@@ -143,6 +144,7 @@ func (t deadlineOption) Value() interface{} { return time.Time(t) }
// Task enqueued with this option is guaranteed to be unique within the given ttl. // Task enqueued with this option is guaranteed to be unique within the given ttl.
// Once the task gets processed successfully or once the TTL has expired, another task with the same uniqueness may be enqueued. // Once the task gets processed successfully or once the TTL has expired, another task with the same uniqueness may be enqueued.
// ErrDuplicateTask error is returned when enqueueing a duplicate task. // ErrDuplicateTask error is returned when enqueueing a duplicate task.
// TTL duration must be greater than or equal to 1 second.
// //
// Uniqueness of a task is based on the following properties: // Uniqueness of a task is based on the following properties:
// - Task Type // - Task Type
@@ -246,7 +248,11 @@ func composeOptions(opts ...Option) (option, error) {
case deadlineOption: case deadlineOption:
res.deadline = time.Time(opt) res.deadline = time.Time(opt)
case uniqueOption: case uniqueOption:
res.uniqueTTL = time.Duration(opt) ttl := time.Duration(opt)
if ttl < 1*time.Second {
return option{}, errors.New("Unique TTL cannot be less than 1s")
}
res.uniqueTTL = ttl
case processAtOption: case processAtOption:
res.processAt = time.Time(opt) res.processAt = time.Time(opt)
case processInOption: case processInOption:
@@ -287,7 +293,7 @@ func (c *Client) Close() error {
return c.rdb.Close() return c.rdb.Close()
} }
// Enqueue enqueues the given task to be processed asynchronously. // Enqueue enqueues the given task to a queue.
// //
// Enqueue returns TaskInfo and nil error if the task is enqueued successfully, otherwise returns a non-nil error. // Enqueue returns TaskInfo and nil error if the task is enqueued successfully, otherwise returns a non-nil error.
// //
@@ -297,7 +303,25 @@ func (c *Client) Close() error {
// By deafult, max retry is set to 25 and timeout is set to 30 minutes. // By deafult, max retry is set to 25 and timeout is set to 30 minutes.
// //
// If no ProcessAt or ProcessIn options are provided, the task will be pending immediately. // If no ProcessAt or ProcessIn options are provided, the task will be pending immediately.
//
// Enqueue uses context.Background internally; to specify the context, use EnqueueContext.
func (c *Client) Enqueue(task *Task, opts ...Option) (*TaskInfo, error) { func (c *Client) Enqueue(task *Task, opts ...Option) (*TaskInfo, error) {
return c.EnqueueContext(context.Background(), task, opts...)
}
// EnqueueContext enqueues the given task to a queue.
//
// EnqueueContext returns TaskInfo and nil error if the task is enqueued successfully, otherwise returns a non-nil error.
//
// The argument opts specifies the behavior of task processing.
// If there are conflicting Option values the last one overrides others.
// Any options provided to NewTask can be overridden by options passed to Enqueue.
// By deafult, max retry is set to 25 and timeout is set to 30 minutes.
//
// If no ProcessAt or ProcessIn options are provided, the task will be pending immediately.
//
// The first argument context applies to the enqueue operation. To specify task timeout and deadline, use Timeout and Deadline option instead.
func (c *Client) EnqueueContext(ctx context.Context, task *Task, opts ...Option) (*TaskInfo, error) {
if strings.TrimSpace(task.Type()) == "" { if strings.TrimSpace(task.Type()) == "" {
return nil, fmt.Errorf("task typename cannot be empty") return nil, fmt.Errorf("task typename cannot be empty")
} }
@@ -338,10 +362,10 @@ func (c *Client) Enqueue(task *Task, opts ...Option) (*TaskInfo, error) {
var state base.TaskState var state base.TaskState
if opt.processAt.Before(now) || opt.processAt.Equal(now) { if opt.processAt.Before(now) || opt.processAt.Equal(now) {
opt.processAt = now opt.processAt = now
err = c.enqueue(msg, opt.uniqueTTL) err = c.enqueue(ctx, msg, opt.uniqueTTL)
state = base.TaskStatePending state = base.TaskStatePending
} else { } else {
err = c.schedule(msg, opt.processAt, opt.uniqueTTL) err = c.schedule(ctx, msg, opt.processAt, opt.uniqueTTL)
state = base.TaskStateScheduled state = base.TaskStateScheduled
} }
switch { switch {
@@ -355,17 +379,17 @@ func (c *Client) Enqueue(task *Task, opts ...Option) (*TaskInfo, error) {
return newTaskInfo(msg, state, opt.processAt, nil), nil return newTaskInfo(msg, state, opt.processAt, nil), nil
} }
func (c *Client) enqueue(msg *base.TaskMessage, uniqueTTL time.Duration) error { func (c *Client) enqueue(ctx context.Context, msg *base.TaskMessage, uniqueTTL time.Duration) error {
if uniqueTTL > 0 { if uniqueTTL > 0 {
return c.rdb.EnqueueUnique(msg, uniqueTTL) return c.rdb.EnqueueUnique(ctx, msg, uniqueTTL)
} }
return c.rdb.Enqueue(msg) return c.rdb.Enqueue(ctx, msg)
} }
func (c *Client) schedule(msg *base.TaskMessage, t time.Time, uniqueTTL time.Duration) error { func (c *Client) schedule(ctx context.Context, msg *base.TaskMessage, t time.Time, uniqueTTL time.Duration) error {
if uniqueTTL > 0 { if uniqueTTL > 0 {
ttl := t.Add(uniqueTTL).Sub(time.Now()) ttl := t.Add(uniqueTTL).Sub(time.Now())
return c.rdb.ScheduleUnique(msg, t, ttl) return c.rdb.ScheduleUnique(ctx, msg, t, ttl)
} }
return c.rdb.Schedule(msg, t) return c.rdb.Schedule(ctx, msg, t)
} }

View File

@@ -734,6 +734,11 @@ func TestClientEnqueueError(t *testing.T) {
task: NewTask("foo", nil), task: NewTask("foo", nil),
opts: []Option{TaskID(" ")}, opts: []Option{TaskID(" ")},
}, },
{
desc: "With unique option less than 1s",
task: NewTask("foo", nil),
opts: []Option{Unique(300 * time.Millisecond)},
},
} }
for _, tc := range tests { for _, tc := range tests {

View File

@@ -5,6 +5,7 @@
package asynq_test package asynq_test
import ( import (
"context"
"fmt" "fmt"
"log" "log"
"os" "os"
@@ -113,3 +114,20 @@ func ExampleParseRedisURI() {
// localhost:6379 // localhost:6379
// 10 // 10
} }
func ExampleResultWriter() {
// ResultWriter is only accessible in Handler.
h := func(ctx context.Context, task *asynq.Task) error {
// .. do task processing work
res := []byte("task result data")
n, err := task.ResultWriter().Write(res) // implements io.Writer
if err != nil {
return fmt.Errorf("failed to write task result: %v", err)
}
log.Printf(" %d bytes written", n)
return nil
}
_ = h
}

View File

@@ -70,6 +70,6 @@ func (f *forwarder) start(wg *sync.WaitGroup) {
func (f *forwarder) exec() { func (f *forwarder) exec() {
if err := f.broker.ForwardIfReady(f.queues...); err != nil { if err := f.broker.ForwardIfReady(f.queues...); err != nil {
f.logger.Errorf("Could not enqueue scheduled tasks: %v", err) f.logger.Errorf("Failed to forward scheduled tasks: %v", err)
} }
} }

2
go.mod
View File

@@ -1,6 +1,6 @@
module github.com/hibiken/asynq module github.com/hibiken/asynq
go 1.13 go 1.14
require ( require (
github.com/go-redis/redis/v8 v8.11.2 github.com/go-redis/redis/v8 v8.11.2

View File

@@ -12,6 +12,7 @@ import (
"github.com/google/uuid" "github.com/google/uuid"
"github.com/hibiken/asynq/internal/base" "github.com/hibiken/asynq/internal/base"
"github.com/hibiken/asynq/internal/log" "github.com/hibiken/asynq/internal/log"
"github.com/hibiken/asynq/internal/timeutil"
) )
// heartbeater is responsible for writing process info to redis periodically to // heartbeater is responsible for writing process info to redis periodically to
@@ -19,6 +20,7 @@ import (
type heartbeater struct { type heartbeater struct {
logger *log.Logger logger *log.Logger
broker base.Broker broker base.Broker
clock timeutil.Clock
// channel to communicate back to the long running "heartbeater" goroutine. // channel to communicate back to the long running "heartbeater" goroutine.
done chan struct{} done chan struct{}
@@ -41,7 +43,7 @@ type heartbeater struct {
workers map[string]*workerInfo workers map[string]*workerInfo
// state is shared with other goroutine but is concurrency safe. // state is shared with other goroutine but is concurrency safe.
state *base.ServerState state *serverState
// channels to receive updates on active workers. // channels to receive updates on active workers.
starting <-chan *workerInfo starting <-chan *workerInfo
@@ -55,7 +57,7 @@ type heartbeaterParams struct {
concurrency int concurrency int
queues map[string]int queues map[string]int
strictPriority bool strictPriority bool
state *base.ServerState state *serverState
starting <-chan *workerInfo starting <-chan *workerInfo
finished <-chan *base.TaskMessage finished <-chan *base.TaskMessage
} }
@@ -69,6 +71,7 @@ func newHeartbeater(params heartbeaterParams) *heartbeater {
return &heartbeater{ return &heartbeater{
logger: params.logger, logger: params.logger,
broker: params.broker, broker: params.broker,
clock: timeutil.NewRealClock(),
done: make(chan struct{}), done: make(chan struct{}),
interval: params.interval, interval: params.interval,
@@ -100,6 +103,8 @@ type workerInfo struct {
started time.Time started time.Time
// deadline the worker has to finish processing the task by. // deadline the worker has to finish processing the task by.
deadline time.Time deadline time.Time
// lease the worker holds for the task.
lease *base.Lease
} }
func (h *heartbeater) start(wg *sync.WaitGroup) { func (h *heartbeater) start(wg *sync.WaitGroup) {
@@ -107,7 +112,7 @@ func (h *heartbeater) start(wg *sync.WaitGroup) {
go func() { go func() {
defer wg.Done() defer wg.Done()
h.started = time.Now() h.started = h.clock.Now()
h.beat() h.beat()
@@ -134,7 +139,12 @@ func (h *heartbeater) start(wg *sync.WaitGroup) {
}() }()
} }
// beat extends lease for workers and writes server/worker info to redis.
func (h *heartbeater) beat() { func (h *heartbeater) beat() {
h.state.mu.Lock()
srvStatus := h.state.value.String()
h.state.mu.Unlock()
info := base.ServerInfo{ info := base.ServerInfo{
Host: h.host, Host: h.host,
PID: h.pid, PID: h.pid,
@@ -142,12 +152,13 @@ func (h *heartbeater) beat() {
Concurrency: h.concurrency, Concurrency: h.concurrency,
Queues: h.queues, Queues: h.queues,
StrictPriority: h.strictPriority, StrictPriority: h.strictPriority,
Status: h.state.String(), Status: srvStatus,
Started: h.started, Started: h.started,
ActiveWorkerCount: len(h.workers), ActiveWorkerCount: len(h.workers),
} }
var ws []*base.WorkerInfo var ws []*base.WorkerInfo
idsByQueue := make(map[string][]string)
for id, w := range h.workers { for id, w := range h.workers {
ws = append(ws, &base.WorkerInfo{ ws = append(ws, &base.WorkerInfo{
Host: h.host, Host: h.host,
@@ -160,11 +171,30 @@ func (h *heartbeater) beat() {
Started: w.started, Started: w.started,
Deadline: w.deadline, Deadline: w.deadline,
}) })
// Check lease before adding to the set to make sure not to extend the lease if the lease is already expired.
if w.lease.IsValid() {
idsByQueue[w.msg.Queue] = append(idsByQueue[w.msg.Queue], id)
} else {
w.lease.NotifyExpiration() // notify processor if the lease is expired
}
} }
// Note: Set TTL to be long enough so that it won't expire before we write again // Note: Set TTL to be long enough so that it won't expire before we write again
// and short enough to expire quickly once the process is shut down or killed. // and short enough to expire quickly once the process is shut down or killed.
if err := h.broker.WriteServerState(&info, ws, h.interval*2); err != nil { if err := h.broker.WriteServerState(&info, ws, h.interval*2); err != nil {
h.logger.Errorf("could not write server state data: %v", err) h.logger.Errorf("Failed to write server state data: %v", err)
}
for qname, ids := range idsByQueue {
expirationTime, err := h.broker.ExtendLease(qname, ids...)
if err != nil {
h.logger.Errorf("Failed to extend lease for tasks %v: %v", ids, err)
continue
}
for _, id := range ids {
if l := h.workers[id].lease; !l.Reset(expirationTime) {
h.logger.Warnf("Lease reset failed for %s; lease deadline: %v", id, l.Deadline())
}
}
} }
} }

View File

@@ -5,6 +5,7 @@
package asynq package asynq
import ( import (
"context"
"sync" "sync"
"testing" "testing"
"time" "time"
@@ -15,21 +16,143 @@ import (
"github.com/hibiken/asynq/internal/base" "github.com/hibiken/asynq/internal/base"
"github.com/hibiken/asynq/internal/rdb" "github.com/hibiken/asynq/internal/rdb"
"github.com/hibiken/asynq/internal/testbroker" "github.com/hibiken/asynq/internal/testbroker"
"github.com/hibiken/asynq/internal/timeutil"
) )
// Test goes through a few phases.
//
// Phase1: Simulate Server startup; Simulate starting tasks listed in startedWorkers
// Phase2: Simluate finishing tasks listed in finishedTasks
// Phase3: Simulate Server shutdown;
func TestHeartbeater(t *testing.T) { func TestHeartbeater(t *testing.T) {
r := setup(t) r := setup(t)
defer r.Close() defer r.Close()
rdbClient := rdb.NewRDB(r) rdbClient := rdb.NewRDB(r)
now := time.Now()
const elapsedTime = 10 * time.Second // simulated time elapsed between phase1 and phase2
clock := timeutil.NewSimulatedClock(time.Time{}) // time will be set in each test
t1 := h.NewTaskMessageWithQueue("task1", nil, "default")
t2 := h.NewTaskMessageWithQueue("task2", nil, "default")
t3 := h.NewTaskMessageWithQueue("task3", nil, "default")
t4 := h.NewTaskMessageWithQueue("task4", nil, "custom")
t5 := h.NewTaskMessageWithQueue("task5", nil, "custom")
t6 := h.NewTaskMessageWithQueue("task6", nil, "default")
// Note: intentionally set to time less than now.Add(rdb.LeaseDuration) to test lease extention is working.
lease1 := h.NewLeaseWithClock(now.Add(10*time.Second), clock)
lease2 := h.NewLeaseWithClock(now.Add(10*time.Second), clock)
lease3 := h.NewLeaseWithClock(now.Add(10*time.Second), clock)
lease4 := h.NewLeaseWithClock(now.Add(10*time.Second), clock)
lease5 := h.NewLeaseWithClock(now.Add(10*time.Second), clock)
lease6 := h.NewLeaseWithClock(now.Add(10*time.Second), clock)
tests := []struct { tests := []struct {
desc string
// Interval between heartbeats.
interval time.Duration interval time.Duration
// Server info.
host string host string
pid int pid int
queues map[string]int queues map[string]int
concurrency int concurrency int
active map[string][]*base.TaskMessage // initial active set state
lease map[string][]base.Z // initial lease set state
wantLease1 map[string][]base.Z // expected lease set state after starting all startedWorkers
wantLease2 map[string][]base.Z // expected lease set state after finishing all finishedTasks
startedWorkers []*workerInfo // workerInfo to send via the started channel
finishedTasks []*base.TaskMessage // tasks to send via the finished channel
startTime time.Time // simulated start time
elapsedTime time.Duration // simulated time elapsed between starting and finishing processing tasks
}{ }{
{2 * time.Second, "localhost", 45678, map[string]int{"default": 1}, 10}, {
desc: "With single queue",
interval: 2 * time.Second,
host: "localhost",
pid: 45678,
queues: map[string]int{"default": 1},
concurrency: 10,
active: map[string][]*base.TaskMessage{
"default": {t1, t2, t3},
},
lease: map[string][]base.Z{
"default": {
{Message: t1, Score: now.Add(10 * time.Second).Unix()},
{Message: t2, Score: now.Add(10 * time.Second).Unix()},
{Message: t3, Score: now.Add(10 * time.Second).Unix()},
},
},
startedWorkers: []*workerInfo{
{msg: t1, started: now, deadline: now.Add(2 * time.Minute), lease: lease1},
{msg: t2, started: now, deadline: now.Add(2 * time.Minute), lease: lease2},
{msg: t3, started: now, deadline: now.Add(2 * time.Minute), lease: lease3},
},
finishedTasks: []*base.TaskMessage{t1, t2},
wantLease1: map[string][]base.Z{
"default": {
{Message: t1, Score: now.Add(rdb.LeaseDuration).Unix()},
{Message: t2, Score: now.Add(rdb.LeaseDuration).Unix()},
{Message: t3, Score: now.Add(rdb.LeaseDuration).Unix()},
},
},
wantLease2: map[string][]base.Z{
"default": {
{Message: t3, Score: now.Add(elapsedTime).Add(rdb.LeaseDuration).Unix()},
},
},
startTime: now,
elapsedTime: elapsedTime,
},
{
desc: "With multiple queue",
interval: 2 * time.Second,
host: "localhost",
pid: 45678,
queues: map[string]int{"default": 1, "custom": 2},
concurrency: 10,
active: map[string][]*base.TaskMessage{
"default": {t6},
"custom": {t4, t5},
},
lease: map[string][]base.Z{
"default": {
{Message: t6, Score: now.Add(10 * time.Second).Unix()},
},
"custom": {
{Message: t4, Score: now.Add(10 * time.Second).Unix()},
{Message: t5, Score: now.Add(10 * time.Second).Unix()},
},
},
startedWorkers: []*workerInfo{
{msg: t6, started: now, deadline: now.Add(2 * time.Minute), lease: lease6},
{msg: t4, started: now, deadline: now.Add(2 * time.Minute), lease: lease4},
{msg: t5, started: now, deadline: now.Add(2 * time.Minute), lease: lease5},
},
finishedTasks: []*base.TaskMessage{t6, t5},
wantLease1: map[string][]base.Z{
"default": {
{Message: t6, Score: now.Add(rdb.LeaseDuration).Unix()},
},
"custom": {
{Message: t4, Score: now.Add(rdb.LeaseDuration).Unix()},
{Message: t5, Score: now.Add(rdb.LeaseDuration).Unix()},
},
},
wantLease2: map[string][]base.Z{
"default": {},
"custom": {
{Message: t4, Score: now.Add(elapsedTime).Add(rdb.LeaseDuration).Unix()},
},
},
startTime: now,
elapsedTime: elapsedTime,
},
} }
timeCmpOpt := cmpopts.EquateApproxTime(10 * time.Millisecond) timeCmpOpt := cmpopts.EquateApproxTime(10 * time.Millisecond)
@@ -37,8 +160,15 @@ func TestHeartbeater(t *testing.T) {
ignoreFieldOpt := cmpopts.IgnoreFields(base.ServerInfo{}, "ServerID") ignoreFieldOpt := cmpopts.IgnoreFields(base.ServerInfo{}, "ServerID")
for _, tc := range tests { for _, tc := range tests {
h.FlushDB(t, r) h.FlushDB(t, r)
h.SeedAllActiveQueues(t, r, tc.active)
h.SeedAllLease(t, r, tc.lease)
state := base.NewServerState() clock.SetTime(tc.startTime)
rdbClient.SetClock(clock)
srvState := &serverState{}
startingCh := make(chan *workerInfo)
finishedCh := make(chan *base.TaskMessage)
hb := newHeartbeater(heartbeaterParams{ hb := newHeartbeater(heartbeaterParams{
logger: testLogger, logger: testLogger,
broker: rdbClient, broker: rdbClient,
@@ -46,72 +176,134 @@ func TestHeartbeater(t *testing.T) {
concurrency: tc.concurrency, concurrency: tc.concurrency,
queues: tc.queues, queues: tc.queues,
strictPriority: false, strictPriority: false,
state: state, state: srvState,
starting: make(chan *workerInfo), starting: startingCh,
finished: make(chan *base.TaskMessage), finished: finishedCh,
}) })
hb.clock = clock
// Change host and pid fields for testing purpose. // Change host and pid fields for testing purpose.
hb.host = tc.host hb.host = tc.host
hb.pid = tc.pid hb.pid = tc.pid
state.Set(base.StateActive) //===================
// Start Phase1
//===================
srvState.mu.Lock()
srvState.value = srvStateActive // simulating Server.Start
srvState.mu.Unlock()
var wg sync.WaitGroup var wg sync.WaitGroup
hb.start(&wg) hb.start(&wg)
want := &base.ServerInfo{ // Simulate processor starting to work on tasks.
for _, w := range tc.startedWorkers {
startingCh <- w
}
// Wait for heartbeater to write to redis
time.Sleep(tc.interval * 2)
ss, err := rdbClient.ListServers()
if err != nil {
t.Errorf("%s: could not read server info from redis: %v", tc.desc, err)
hb.shutdown()
continue
}
if len(ss) != 1 {
t.Errorf("%s: (*RDB).ListServers returned %d server info, want 1", tc.desc, len(ss))
hb.shutdown()
continue
}
wantInfo := &base.ServerInfo{
Host: tc.host, Host: tc.host,
PID: tc.pid, PID: tc.pid,
Queues: tc.queues, Queues: tc.queues,
Concurrency: tc.concurrency, Concurrency: tc.concurrency,
Started: time.Now(), Started: now,
Status: "active", Status: "active",
ActiveWorkerCount: len(tc.startedWorkers),
} }
if diff := cmp.Diff(wantInfo, ss[0], timeCmpOpt, ignoreOpt, ignoreFieldOpt); diff != "" {
// allow for heartbeater to write to redis t.Errorf("%s: redis stored server status %+v, want %+v; (-want, +got)\n%s", tc.desc, ss[0], wantInfo, diff)
time.Sleep(tc.interval)
ss, err := rdbClient.ListServers()
if err != nil {
t.Errorf("could not read server info from redis: %v", err)
hb.shutdown() hb.shutdown()
continue continue
} }
if len(ss) != 1 { for qname, wantLease := range tc.wantLease1 {
t.Errorf("(*RDB).ListServers returned %d process info, want 1", len(ss)) gotLease := h.GetLeaseEntries(t, r, qname)
hb.shutdown() if diff := cmp.Diff(wantLease, gotLease, h.SortZSetEntryOpt); diff != "" {
continue t.Errorf("%s: mismatch found in %q: (-want,+got):\n%s", tc.desc, base.LeaseKey(qname), diff)
}
} }
if diff := cmp.Diff(want, ss[0], timeCmpOpt, ignoreOpt, ignoreFieldOpt); diff != "" { for _, w := range tc.startedWorkers {
t.Errorf("redis stored process status %+v, want %+v; (-want, +got)\n%s", ss[0], want, diff) if want := now.Add(rdb.LeaseDuration); w.lease.Deadline() != want {
hb.shutdown() t.Errorf("%s: lease deadline for %v is set to %v, want %v", tc.desc, w.msg, w.lease.Deadline(), want)
continue }
} }
// status change //===================
state.Set(base.StateClosed) // Start Phase2
//===================
// allow for heartbeater to write to redis clock.AdvanceTime(tc.elapsedTime)
// Simulate processor finished processing tasks.
for _, msg := range tc.finishedTasks {
if err := rdbClient.Done(context.Background(), msg); err != nil {
t.Fatalf("RDB.Done failed: %v", err)
}
finishedCh <- msg
}
// Wait for heartbeater to write to redis
time.Sleep(tc.interval * 2) time.Sleep(tc.interval * 2)
want.Status = "closed" for qname, wantLease := range tc.wantLease2 {
gotLease := h.GetLeaseEntries(t, r, qname)
if diff := cmp.Diff(wantLease, gotLease, h.SortZSetEntryOpt); diff != "" {
t.Errorf("%s: mismatch found in %q: (-want,+got):\n%s", tc.desc, base.LeaseKey(qname), diff)
}
}
//===================
// Start Phase3
//===================
// Server state change; simulating Server.Shutdown
srvState.mu.Lock()
srvState.value = srvStateClosed
srvState.mu.Unlock()
// Wait for heartbeater to write to redis
time.Sleep(tc.interval * 2)
wantInfo = &base.ServerInfo{
Host: tc.host,
PID: tc.pid,
Queues: tc.queues,
Concurrency: tc.concurrency,
Started: now,
Status: "closed",
ActiveWorkerCount: len(tc.startedWorkers) - len(tc.finishedTasks),
}
ss, err = rdbClient.ListServers() ss, err = rdbClient.ListServers()
if err != nil { if err != nil {
t.Errorf("could not read process status from redis: %v", err) t.Errorf("%s: could not read server status from redis: %v", tc.desc, err)
hb.shutdown() hb.shutdown()
continue continue
} }
if len(ss) != 1 { if len(ss) != 1 {
t.Errorf("(*RDB).ListProcesses returned %d process info, want 1", len(ss)) t.Errorf("%s: (*RDB).ListServers returned %d server info, want 1", tc.desc, len(ss))
hb.shutdown() hb.shutdown()
continue continue
} }
if diff := cmp.Diff(want, ss[0], timeCmpOpt, ignoreOpt, ignoreFieldOpt); diff != "" { if diff := cmp.Diff(wantInfo, ss[0], timeCmpOpt, ignoreOpt, ignoreFieldOpt); diff != "" {
t.Errorf("redis stored process status %+v, want %+v; (-want, +got)\n%s", ss[0], want, diff) t.Errorf("%s: redis stored process status %+v, want %+v; (-want, +got)\n%s", tc.desc, ss[0], wantInfo, diff)
hb.shutdown() hb.shutdown()
continue continue
} }
@@ -131,8 +323,7 @@ func TestHeartbeaterWithRedisDown(t *testing.T) {
r := rdb.NewRDB(setup(t)) r := rdb.NewRDB(setup(t))
defer r.Close() defer r.Close()
testBroker := testbroker.NewTestBroker(r) testBroker := testbroker.NewTestBroker(r)
state := base.NewServerState() state := &serverState{value: srvStateActive}
state.Set(base.StateActive)
hb := newHeartbeater(heartbeaterParams{ hb := newHeartbeater(heartbeaterParams{
logger: testLogger, logger: testLogger,
broker: testBroker, broker: testBroker,

View File

@@ -52,6 +52,9 @@ type QueueInfo struct {
// It is an approximate memory usage value in bytes since the value is computed by sampling. // It is an approximate memory usage value in bytes since the value is computed by sampling.
MemoryUsage int64 MemoryUsage int64
// Latency of the queue, measured by the oldest pending task in the queue.
Latency time.Duration
// Size is the total number of tasks in the queue. // Size is the total number of tasks in the queue.
// The value is the sum of Pending, Active, Scheduled, Retry, and Archived. // The value is the sum of Pending, Active, Scheduled, Retry, and Archived.
Size int Size int
@@ -69,12 +72,17 @@ type QueueInfo struct {
// Number of stored completed tasks. // Number of stored completed tasks.
Completed int Completed int
// Total number of tasks being processed during the given date. // Total number of tasks being processed within the given date (counter resets daily).
// The number includes both succeeded and failed tasks. // The number includes both succeeded and failed tasks.
Processed int Processed int
// Total number of tasks failed to be processed during the given date. // Total number of tasks failed to be processed within the given date (counter resets daily).
Failed int Failed int
// Total number of tasks processed (cumulative).
ProcessedTotal int
// Total number of tasks failed (cumulative).
FailedTotal int
// Paused indicates whether the queue is paused. // Paused indicates whether the queue is paused.
// If true, tasks in the queue will not be processed. // If true, tasks in the queue will not be processed.
Paused bool Paused bool
@@ -95,6 +103,7 @@ func (i *Inspector) GetQueueInfo(qname string) (*QueueInfo, error) {
return &QueueInfo{ return &QueueInfo{
Queue: stats.Queue, Queue: stats.Queue,
MemoryUsage: stats.MemoryUsage, MemoryUsage: stats.MemoryUsage,
Latency: stats.Latency,
Size: stats.Size, Size: stats.Size,
Pending: stats.Pending, Pending: stats.Pending,
Active: stats.Active, Active: stats.Active,
@@ -104,6 +113,8 @@ func (i *Inspector) GetQueueInfo(qname string) (*QueueInfo, error) {
Completed: stats.Completed, Completed: stats.Completed,
Processed: stats.Processed, Processed: stats.Processed,
Failed: stats.Failed, Failed: stats.Failed,
ProcessedTotal: stats.ProcessedTotal,
FailedTotal: stats.FailedTotal,
Paused: stats.Paused, Paused: stats.Paused,
Timestamp: stats.Timestamp, Timestamp: stats.Timestamp,
}, nil }, nil
@@ -177,8 +188,8 @@ func (i *Inspector) DeleteQueue(qname string, force bool) error {
// GetTaskInfo retrieves task information given a task id and queue name. // GetTaskInfo retrieves task information given a task id and queue name.
// //
// Returns ErrQueueNotFound if a queue with the given name doesn't exist. // Returns an error wrapping ErrQueueNotFound if a queue with the given name doesn't exist.
// Returns ErrTaskNotFound if a task with the given id doesn't exist in the queue. // Returns an error wrapping ErrTaskNotFound if a task with the given id doesn't exist in the queue.
func (i *Inspector) GetTaskInfo(qname, id string) (*TaskInfo, error) { func (i *Inspector) GetTaskInfo(qname, id string) (*TaskInfo, error) {
info, err := i.rdb.GetTaskInfo(qname, id) info, err := i.rdb.GetTaskInfo(qname, id)
switch { switch {
@@ -297,16 +308,28 @@ func (i *Inspector) ListActiveTasks(qname string, opts ...ListOption) ([]*TaskIn
case err != nil: case err != nil:
return nil, fmt.Errorf("asynq: %v", err) return nil, fmt.Errorf("asynq: %v", err)
} }
expired, err := i.rdb.ListLeaseExpired(time.Now(), qname)
if err != nil {
return nil, fmt.Errorf("asynq: %v", err)
}
expiredSet := make(map[string]struct{}) // set of expired message IDs
for _, msg := range expired {
expiredSet[msg.ID] = struct{}{}
}
var tasks []*TaskInfo var tasks []*TaskInfo
for _, i := range infos { for _, i := range infos {
tasks = append(tasks, newTaskInfo( t := newTaskInfo(
i.Message, i.Message,
i.State, i.State,
i.NextProcessAt, i.NextProcessAt,
i.Result, i.Result,
)) )
if _, ok := expiredSet[i.Message.ID]; ok {
t.IsOrphaned = true
} }
return tasks, err tasks = append(tasks, t)
}
return tasks, nil
} }
// ListScheduledTasks retrieves scheduled tasks from the specified queue. // ListScheduledTasks retrieves scheduled tasks from the specified queue.
@@ -479,8 +502,8 @@ func (i *Inspector) DeleteAllCompletedTasks(qname string) (int, error) {
// The task needs to be in pending, scheduled, retry, or archived state, // The task needs to be in pending, scheduled, retry, or archived state,
// otherwise DeleteTask will return an error. // otherwise DeleteTask will return an error.
// //
// If a queue with the given name doesn't exist, it returns ErrQueueNotFound. // If a queue with the given name doesn't exist, it returns an error wrapping ErrQueueNotFound.
// If a task with the given id doesn't exist in the queue, it returns ErrTaskNotFound. // If a task with the given id doesn't exist in the queue, it returns an error wrapping ErrTaskNotFound.
// If the task is in active state, it returns a non-nil error. // If the task is in active state, it returns a non-nil error.
func (i *Inspector) DeleteTask(qname, id string) error { func (i *Inspector) DeleteTask(qname, id string) error {
if err := base.ValidateQueueName(qname); err != nil { if err := base.ValidateQueueName(qname); err != nil {
@@ -533,8 +556,8 @@ func (i *Inspector) RunAllArchivedTasks(qname string) (int, error) {
// The task needs to be in scheduled, retry, or archived state, otherwise RunTask // The task needs to be in scheduled, retry, or archived state, otherwise RunTask
// will return an error. // will return an error.
// //
// If a queue with the given name doesn't exist, it returns ErrQueueNotFound. // If a queue with the given name doesn't exist, it returns an error wrapping ErrQueueNotFound.
// If a task with the given id doesn't exist in the queue, it returns ErrTaskNotFound. // If a task with the given id doesn't exist in the queue, it returns an error wrapping ErrTaskNotFound.
// If the task is in pending or active state, it returns a non-nil error. // If the task is in pending or active state, it returns a non-nil error.
func (i *Inspector) RunTask(qname, id string) error { func (i *Inspector) RunTask(qname, id string) error {
if err := base.ValidateQueueName(qname); err != nil { if err := base.ValidateQueueName(qname); err != nil {
@@ -586,8 +609,8 @@ func (i *Inspector) ArchiveAllRetryTasks(qname string) (int, error) {
// The task needs to be in pending, scheduled, or retry state, otherwise ArchiveTask // The task needs to be in pending, scheduled, or retry state, otherwise ArchiveTask
// will return an error. // will return an error.
// //
// If a queue with the given name doesn't exist, it returns ErrQueueNotFound. // If a queue with the given name doesn't exist, it returns an error wrapping ErrQueueNotFound.
// If a task with the given id doesn't exist in the queue, it returns ErrTaskNotFound. // If a task with the given id doesn't exist in the queue, it returns an error wrapping ErrTaskNotFound.
// If the task is in already archived, it returns a non-nil error. // If the task is in already archived, it returns a non-nil error.
func (i *Inspector) ArchiveTask(qname, id string) error { func (i *Inspector) ArchiveTask(qname, id string) error {
if err := base.ValidateQueueName(qname); err != nil { if err := base.ValidateQueueName(qname); err != nil {

View File

@@ -8,7 +8,6 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"math"
"sort" "sort"
"testing" "testing"
"time" "time"
@@ -19,6 +18,7 @@ import (
h "github.com/hibiken/asynq/internal/asynqtest" h "github.com/hibiken/asynq/internal/asynqtest"
"github.com/hibiken/asynq/internal/base" "github.com/hibiken/asynq/internal/base"
"github.com/hibiken/asynq/internal/rdb" "github.com/hibiken/asynq/internal/rdb"
"github.com/hibiken/asynq/internal/timeutil"
) )
func TestInspectorQueues(t *testing.T) { func TestInspectorQueues(t *testing.T) {
@@ -269,6 +269,7 @@ func TestInspectorGetQueueInfo(t *testing.T) {
ignoreMemUsg := cmpopts.IgnoreFields(QueueInfo{}, "MemoryUsage") ignoreMemUsg := cmpopts.IgnoreFields(QueueInfo{}, "MemoryUsage")
inspector := NewInspector(getRedisConnOpt(t)) inspector := NewInspector(getRedisConnOpt(t))
inspector.rdb.SetClock(timeutil.NewSimulatedClock(now))
tests := []struct { tests := []struct {
pending map[string][]*base.TaskMessage pending map[string][]*base.TaskMessage
@@ -279,6 +280,9 @@ func TestInspectorGetQueueInfo(t *testing.T) {
completed map[string][]base.Z completed map[string][]base.Z
processed map[string]int processed map[string]int
failed map[string]int failed map[string]int
processedTotal map[string]int
failedTotal map[string]int
oldestPendingMessageEnqueueTime map[string]time.Time
qname string qname string
want *QueueInfo want *QueueInfo
}{ }{
@@ -326,9 +330,25 @@ func TestInspectorGetQueueInfo(t *testing.T) {
"critical": 0, "critical": 0,
"low": 5, "low": 5,
}, },
processedTotal: map[string]int{
"default": 11111,
"critical": 22222,
"low": 33333,
},
failedTotal: map[string]int{
"default": 111,
"critical": 222,
"low": 333,
},
oldestPendingMessageEnqueueTime: map[string]time.Time{
"default": now.Add(-15 * time.Second),
"critical": now.Add(-200 * time.Millisecond),
"low": now.Add(-30 * time.Second),
},
qname: "default", qname: "default",
want: &QueueInfo{ want: &QueueInfo{
Queue: "default", Queue: "default",
Latency: 15 * time.Second,
Size: 4, Size: 4,
Pending: 1, Pending: 1,
Active: 1, Active: 1,
@@ -338,6 +358,8 @@ func TestInspectorGetQueueInfo(t *testing.T) {
Completed: 0, Completed: 0,
Processed: 120, Processed: 120,
Failed: 2, Failed: 2,
ProcessedTotal: 11111,
FailedTotal: 111,
Paused: false, Paused: false,
Timestamp: now, Timestamp: now,
}, },
@@ -352,13 +374,25 @@ func TestInspectorGetQueueInfo(t *testing.T) {
h.SeedAllRetryQueues(t, r, tc.retry) h.SeedAllRetryQueues(t, r, tc.retry)
h.SeedAllArchivedQueues(t, r, tc.archived) h.SeedAllArchivedQueues(t, r, tc.archived)
h.SeedAllCompletedQueues(t, r, tc.completed) h.SeedAllCompletedQueues(t, r, tc.completed)
ctx := context.Background()
for qname, n := range tc.processed { for qname, n := range tc.processed {
processedKey := base.ProcessedKey(qname, now) r.Set(ctx, base.ProcessedKey(qname, now), n, 0)
r.Set(context.Background(), processedKey, n, 0)
} }
for qname, n := range tc.failed { for qname, n := range tc.failed {
failedKey := base.FailedKey(qname, now) r.Set(ctx, base.FailedKey(qname, now), n, 0)
r.Set(context.Background(), failedKey, n, 0) }
for qname, n := range tc.processedTotal {
r.Set(ctx, base.ProcessedTotalKey(qname), n, 0)
}
for qname, n := range tc.failedTotal {
r.Set(ctx, base.FailedTotalKey(qname), n, 0)
}
for qname, enqueueTime := range tc.oldestPendingMessageEnqueueTime {
if enqueueTime.IsZero() {
continue
}
oldestPendingMessageID := r.LRange(ctx, base.PendingKey(qname), -1, -1).Val()[0] // get the right most msg in the list
r.HSet(ctx, base.TaskKey(qname, oldestPendingMessageID), "pending_since", enqueueTime.UnixNano())
} }
got, err := inspector.GetQueueInfo(tc.qname) got, err := inspector.GetQueueInfo(tc.qname)
@@ -711,6 +745,12 @@ func TestInspectorListPendingTasks(t *testing.T) {
} }
} }
func newOrphanedTaskInfo(msg *base.TaskMessage) *TaskInfo {
info := newTaskInfo(msg, base.TaskStateActive, time.Time{}, nil)
info.IsOrphaned = true
return info
}
func TestInspectorListActiveTasks(t *testing.T) { func TestInspectorListActiveTasks(t *testing.T) {
r := setup(t) r := setup(t)
defer r.Close() defer r.Close()
@@ -720,10 +760,12 @@ func TestInspectorListActiveTasks(t *testing.T) {
m4 := h.NewTaskMessageWithQueue("task4", nil, "custom") m4 := h.NewTaskMessageWithQueue("task4", nil, "custom")
inspector := NewInspector(getRedisConnOpt(t)) inspector := NewInspector(getRedisConnOpt(t))
now := time.Now()
tests := []struct { tests := []struct {
desc string desc string
active map[string][]*base.TaskMessage active map[string][]*base.TaskMessage
lease map[string][]base.Z
qname string qname string
want []*TaskInfo want []*TaskInfo
}{ }{
@@ -733,10 +775,42 @@ func TestInspectorListActiveTasks(t *testing.T) {
"default": {m1, m2}, "default": {m1, m2},
"custom": {m3, m4}, "custom": {m3, m4},
}, },
lease: map[string][]base.Z{
"default": {
{Message: m1, Score: now.Add(20 * time.Second).Unix()},
{Message: m2, Score: now.Add(20 * time.Second).Unix()},
},
"custom": {
{Message: m3, Score: now.Add(20 * time.Second).Unix()},
{Message: m4, Score: now.Add(20 * time.Second).Unix()},
},
},
qname: "custom",
want: []*TaskInfo{
newTaskInfo(m3, base.TaskStateActive, time.Time{}, nil),
newTaskInfo(m4, base.TaskStateActive, time.Time{}, nil),
},
},
{
desc: "with an orphaned task",
active: map[string][]*base.TaskMessage{
"default": {m1, m2},
"custom": {m3, m4},
},
lease: map[string][]base.Z{
"default": {
{Message: m1, Score: now.Add(20 * time.Second).Unix()},
{Message: m2, Score: now.Add(-10 * time.Second).Unix()}, // orphaned task
},
"custom": {
{Message: m3, Score: now.Add(20 * time.Second).Unix()},
{Message: m4, Score: now.Add(20 * time.Second).Unix()},
},
},
qname: "default", qname: "default",
want: []*TaskInfo{ want: []*TaskInfo{
newTaskInfo(m1, base.TaskStateActive, time.Time{}, nil), newTaskInfo(m1, base.TaskStateActive, time.Time{}, nil),
newTaskInfo(m2, base.TaskStateActive, time.Time{}, nil), newOrphanedTaskInfo(m2),
}, },
}, },
} }
@@ -744,6 +818,7 @@ func TestInspectorListActiveTasks(t *testing.T) {
for _, tc := range tests { for _, tc := range tests {
h.FlushDB(t, r) h.FlushDB(t, r)
h.SeedAllActiveQueues(t, r, tc.active) h.SeedAllActiveQueues(t, r, tc.active)
h.SeedAllLease(t, r, tc.lease)
got, err := inspector.ListActiveTasks(tc.qname) got, err := inspector.ListActiveTasks(tc.qname)
if err != nil { if err != nil {
@@ -1488,6 +1563,7 @@ func TestInspectorArchiveAllPendingTasks(t *testing.T) {
z1 := base.Z{Message: m1, Score: now.Add(5 * time.Minute).Unix()} z1 := base.Z{Message: m1, Score: now.Add(5 * time.Minute).Unix()}
z2 := base.Z{Message: m2, Score: now.Add(15 * time.Minute).Unix()} z2 := base.Z{Message: m2, Score: now.Add(15 * time.Minute).Unix()}
inspector := NewInspector(getRedisConnOpt(t)) inspector := NewInspector(getRedisConnOpt(t))
inspector.rdb.SetClock(timeutil.NewSimulatedClock(now))
tests := []struct { tests := []struct {
pending map[string][]*base.TaskMessage pending map[string][]*base.TaskMessage
@@ -1579,12 +1655,8 @@ func TestInspectorArchiveAllPendingTasks(t *testing.T) {
} }
} }
for qname, want := range tc.wantArchived { for qname, want := range tc.wantArchived {
// Allow Z.Score to differ by up to 2.
approxOpt := cmp.Comparer(func(a, b int64) bool {
return math.Abs(float64(a-b)) < 2
})
gotArchived := h.GetArchivedEntries(t, r, qname) gotArchived := h.GetArchivedEntries(t, r, qname)
if diff := cmp.Diff(want, gotArchived, h.SortZSetEntryOpt, approxOpt); diff != "" { if diff := cmp.Diff(want, gotArchived, h.SortZSetEntryOpt); diff != "" {
t.Errorf("unexpected archived tasks in queue %q: (-want, +got)\n%s", qname, diff) t.Errorf("unexpected archived tasks in queue %q: (-want, +got)\n%s", qname, diff)
} }
} }
@@ -1605,6 +1677,7 @@ func TestInspectorArchiveAllScheduledTasks(t *testing.T) {
z4 := base.Z{Message: m4, Score: now.Add(2 * time.Minute).Unix()} z4 := base.Z{Message: m4, Score: now.Add(2 * time.Minute).Unix()}
inspector := NewInspector(getRedisConnOpt(t)) inspector := NewInspector(getRedisConnOpt(t))
inspector.rdb.SetClock(timeutil.NewSimulatedClock(now))
tests := []struct { tests := []struct {
scheduled map[string][]base.Z scheduled map[string][]base.Z
@@ -1712,12 +1785,8 @@ func TestInspectorArchiveAllScheduledTasks(t *testing.T) {
} }
} }
for qname, want := range tc.wantArchived { for qname, want := range tc.wantArchived {
// Allow Z.Score to differ by up to 2.
approxOpt := cmp.Comparer(func(a, b int64) bool {
return math.Abs(float64(a-b)) < 2
})
gotArchived := h.GetArchivedEntries(t, r, qname) gotArchived := h.GetArchivedEntries(t, r, qname)
if diff := cmp.Diff(want, gotArchived, h.SortZSetEntryOpt, approxOpt); diff != "" { if diff := cmp.Diff(want, gotArchived, h.SortZSetEntryOpt); diff != "" {
t.Errorf("unexpected archived tasks in queue %q: (-want, +got)\n%s", qname, diff) t.Errorf("unexpected archived tasks in queue %q: (-want, +got)\n%s", qname, diff)
} }
} }
@@ -1738,6 +1807,7 @@ func TestInspectorArchiveAllRetryTasks(t *testing.T) {
z4 := base.Z{Message: m4, Score: now.Add(2 * time.Minute).Unix()} z4 := base.Z{Message: m4, Score: now.Add(2 * time.Minute).Unix()}
inspector := NewInspector(getRedisConnOpt(t)) inspector := NewInspector(getRedisConnOpt(t))
inspector.rdb.SetClock(timeutil.NewSimulatedClock(now))
tests := []struct { tests := []struct {
retry map[string][]base.Z retry map[string][]base.Z
@@ -1828,10 +1898,9 @@ func TestInspectorArchiveAllRetryTasks(t *testing.T) {
t.Errorf("unexpected retry tasks in queue %q: (-want, +got)\n%s", qname, diff) t.Errorf("unexpected retry tasks in queue %q: (-want, +got)\n%s", qname, diff)
} }
} }
cmpOpt := h.EquateInt64Approx(2) // allow for 2 seconds difference in Z.Score
for qname, want := range tc.wantArchived { for qname, want := range tc.wantArchived {
wantArchived := h.GetArchivedEntries(t, r, qname) wantArchived := h.GetArchivedEntries(t, r, qname)
if diff := cmp.Diff(want, wantArchived, h.SortZSetEntryOpt, cmpOpt); diff != "" { if diff := cmp.Diff(want, wantArchived, h.SortZSetEntryOpt); diff != "" {
t.Errorf("unexpected archived tasks in queue %q: (-want, +got)\n%s", qname, diff) t.Errorf("unexpected archived tasks in queue %q: (-want, +got)\n%s", qname, diff)
} }
} }
@@ -2779,8 +2848,9 @@ func TestInspectorArchiveTaskArchivesPendingTask(t *testing.T) {
m1 := h.NewTaskMessage("task1", nil) m1 := h.NewTaskMessage("task1", nil)
m2 := h.NewTaskMessageWithQueue("task2", nil, "custom") m2 := h.NewTaskMessageWithQueue("task2", nil, "custom")
m3 := h.NewTaskMessageWithQueue("task3", nil, "custom") m3 := h.NewTaskMessageWithQueue("task3", nil, "custom")
inspector := NewInspector(getRedisConnOpt(t))
now := time.Now() now := time.Now()
inspector := NewInspector(getRedisConnOpt(t))
inspector.rdb.SetClock(timeutil.NewSimulatedClock(now))
tests := []struct { tests := []struct {
pending map[string][]*base.TaskMessage pending map[string][]*base.TaskMessage
@@ -2875,6 +2945,7 @@ func TestInspectorArchiveTaskArchivesScheduledTask(t *testing.T) {
z3 := base.Z{Message: m3, Score: now.Add(2 * time.Minute).Unix()} z3 := base.Z{Message: m3, Score: now.Add(2 * time.Minute).Unix()}
inspector := NewInspector(getRedisConnOpt(t)) inspector := NewInspector(getRedisConnOpt(t))
inspector.rdb.SetClock(timeutil.NewSimulatedClock(now))
tests := []struct { tests := []struct {
scheduled map[string][]base.Z scheduled map[string][]base.Z
@@ -2951,6 +3022,7 @@ func TestInspectorArchiveTaskArchivesRetryTask(t *testing.T) {
z3 := base.Z{Message: m3, Score: now.Add(2 * time.Minute).Unix()} z3 := base.Z{Message: m3, Score: now.Add(2 * time.Minute).Unix()}
inspector := NewInspector(getRedisConnOpt(t)) inspector := NewInspector(getRedisConnOpt(t))
inspector.rdb.SetClock(timeutil.NewSimulatedClock(now))
tests := []struct { tests := []struct {
retry map[string][]base.Z retry map[string][]base.Z
@@ -3025,6 +3097,7 @@ func TestInspectorArchiveTaskError(t *testing.T) {
z3 := base.Z{Message: m3, Score: now.Add(2 * time.Minute).Unix()} z3 := base.Z{Message: m3, Score: now.Add(2 * time.Minute).Unix()}
inspector := NewInspector(getRedisConnOpt(t)) inspector := NewInspector(getRedisConnOpt(t))
inspector.rdb.SetClock(timeutil.NewSimulatedClock(now))
tests := []struct { tests := []struct {
retry map[string][]base.Z retry map[string][]base.Z

View File

@@ -18,6 +18,7 @@ import (
"github.com/google/go-cmp/cmp/cmpopts" "github.com/google/go-cmp/cmp/cmpopts"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/hibiken/asynq/internal/base" "github.com/hibiken/asynq/internal/base"
"github.com/hibiken/asynq/internal/timeutil"
) )
// EquateInt64Approx returns a Comparer option that treats int64 values // EquateInt64Approx returns a Comparer option that treats int64 values
@@ -114,6 +115,13 @@ func NewTaskMessageWithQueue(taskType string, payload []byte, qname string) *bas
} }
} }
// NewLeaseWithClock returns a new lease with the given expiration time and clock.
func NewLeaseWithClock(expirationTime time.Time, clock timeutil.Clock) *base.Lease {
l := base.NewLease(expirationTime)
l.Clock = clock
return l
}
// JSON serializes the given key-value pairs into stream of bytes in JSON. // JSON serializes the given key-value pairs into stream of bytes in JSON.
func JSON(kv map[string]interface{}) []byte { func JSON(kv map[string]interface{}) []byte {
b, err := json.Marshal(kv) b, err := json.Marshal(kv)
@@ -223,11 +231,11 @@ func SeedArchivedQueue(tb testing.TB, r redis.UniversalClient, entries []base.Z,
seedRedisZSet(tb, r, base.ArchivedKey(qname), entries, base.TaskStateArchived) seedRedisZSet(tb, r, base.ArchivedKey(qname), entries, base.TaskStateArchived)
} }
// SeedDeadlines initializes the deadlines set with the given entries. // SeedLease initializes the lease set with the given entries.
func SeedDeadlines(tb testing.TB, r redis.UniversalClient, entries []base.Z, qname string) { func SeedLease(tb testing.TB, r redis.UniversalClient, entries []base.Z, qname string) {
tb.Helper() tb.Helper()
r.SAdd(context.Background(), base.AllQueues, qname) r.SAdd(context.Background(), base.AllQueues, qname)
seedRedisZSet(tb, r, base.DeadlinesKey(qname), entries, base.TaskStateActive) seedRedisZSet(tb, r, base.LeaseKey(qname), entries, base.TaskStateActive)
} }
// SeedCompletedQueue initializes the completed set witht the given entries. // SeedCompletedQueue initializes the completed set witht the given entries.
@@ -279,11 +287,11 @@ func SeedAllArchivedQueues(tb testing.TB, r redis.UniversalClient, archived map[
} }
} }
// SeedAllDeadlines initializes all of the deadlines with the given entries. // SeedAllLease initializes all of the lease sets with the given entries.
func SeedAllDeadlines(tb testing.TB, r redis.UniversalClient, deadlines map[string][]base.Z) { func SeedAllLease(tb testing.TB, r redis.UniversalClient, lease map[string][]base.Z) {
tb.Helper() tb.Helper()
for q, entries := range deadlines { for q, entries := range lease {
SeedDeadlines(tb, r, entries, q) SeedLease(tb, r, entries, q)
} }
} }
@@ -307,8 +315,6 @@ func seedRedisList(tb testing.TB, c redis.UniversalClient, key string,
data := map[string]interface{}{ data := map[string]interface{}{
"msg": encoded, "msg": encoded,
"state": state.String(), "state": state.String(),
"timeout": msg.Timeout,
"deadline": msg.Deadline,
"unique_key": msg.UniqueKey, "unique_key": msg.UniqueKey,
} }
if err := c.HSet(context.Background(), key, data).Err(); err != nil { if err := c.HSet(context.Background(), key, data).Err(); err != nil {
@@ -337,8 +343,6 @@ func seedRedisZSet(tb testing.TB, c redis.UniversalClient, key string,
data := map[string]interface{}{ data := map[string]interface{}{
"msg": encoded, "msg": encoded,
"state": state.String(), "state": state.String(),
"timeout": msg.Timeout,
"deadline": msg.Deadline,
"unique_key": msg.UniqueKey, "unique_key": msg.UniqueKey,
} }
if err := c.HSet(context.Background(), key, data).Err(); err != nil { if err := c.HSet(context.Background(), key, data).Err(); err != nil {
@@ -416,11 +420,11 @@ func GetArchivedEntries(tb testing.TB, r redis.UniversalClient, qname string) []
return getMessagesFromZSetWithScores(tb, r, qname, base.ArchivedKey, base.TaskStateArchived) return getMessagesFromZSetWithScores(tb, r, qname, base.ArchivedKey, base.TaskStateArchived)
} }
// GetDeadlinesEntries returns all task messages and its score in the deadlines set for the given queue. // GetLeaseEntries returns all task IDs and its score in the lease set for the given queue.
// It also asserts the state field of the task. // It also asserts the state field of the task.
func GetDeadlinesEntries(tb testing.TB, r redis.UniversalClient, qname string) []base.Z { func GetLeaseEntries(tb testing.TB, r redis.UniversalClient, qname string) []base.Z {
tb.Helper() tb.Helper()
return getMessagesFromZSetWithScores(tb, r, qname, base.DeadlinesKey, base.TaskStateActive) return getMessagesFromZSetWithScores(tb, r, qname, base.LeaseKey, base.TaskStateActive)
} }
// GetCompletedEntries returns all completed messages and its score in the given queue. // GetCompletedEntries returns all completed messages and its score in the given queue.

View File

@@ -18,11 +18,12 @@ import (
"github.com/golang/protobuf/ptypes" "github.com/golang/protobuf/ptypes"
"github.com/hibiken/asynq/internal/errors" "github.com/hibiken/asynq/internal/errors"
pb "github.com/hibiken/asynq/internal/proto" pb "github.com/hibiken/asynq/internal/proto"
"github.com/hibiken/asynq/internal/timeutil"
"google.golang.org/protobuf/proto" "google.golang.org/protobuf/proto"
) )
// Version of asynq library and CLI. // Version of asynq library and CLI.
const Version = "0.19.0" const Version = "0.22.0"
// DefaultQueueName is the queue name used if none are specified by user. // DefaultQueueName is the queue name used if none are specified by user.
const DefaultQueueName = "default" const DefaultQueueName = "default"
@@ -136,9 +137,9 @@ func ArchivedKey(qname string) string {
return fmt.Sprintf("%sarchived", QueueKeyPrefix(qname)) return fmt.Sprintf("%sarchived", QueueKeyPrefix(qname))
} }
// DeadlinesKey returns a redis key for the deadlines. // LeaseKey returns a redis key for the lease.
func DeadlinesKey(qname string) string { func LeaseKey(qname string) string {
return fmt.Sprintf("%sdeadlines", QueueKeyPrefix(qname)) return fmt.Sprintf("%slease", QueueKeyPrefix(qname))
} }
func CompletedKey(qname string) string { func CompletedKey(qname string) string {
@@ -150,6 +151,16 @@ func PausedKey(qname string) string {
return fmt.Sprintf("%spaused", QueueKeyPrefix(qname)) return fmt.Sprintf("%spaused", QueueKeyPrefix(qname))
} }
// ProcessedTotalKey returns a redis key for total processed count for the given queue.
func ProcessedTotalKey(qname string) string {
return fmt.Sprintf("%sprocessed", QueueKeyPrefix(qname))
}
// FailedTotalKey returns a redis key for total failure count for the given queue.
func FailedTotalKey(qname string) string {
return fmt.Sprintf("%sfailed", QueueKeyPrefix(qname))
}
// ProcessedKey returns a redis key for processed count for the given day for the queue. // ProcessedKey returns a redis key for processed count for the given day for the queue.
func ProcessedKey(qname string, t time.Time) string { func ProcessedKey(qname string, t time.Time) string {
return fmt.Sprintf("%sprocessed:%s", QueueKeyPrefix(qname), t.UTC().Format("2006-01-02")) return fmt.Sprintf("%sprocessed:%s", QueueKeyPrefix(qname), t.UTC().Format("2006-01-02"))
@@ -308,68 +319,6 @@ type Z struct {
Score int64 Score int64
} }
// ServerState represents state of a server.
// ServerState methods are concurrency safe.
type ServerState struct {
mu sync.Mutex
val ServerStateValue
}
// NewServerState returns a new state instance.
// Initial state is set to StateNew.
func NewServerState() *ServerState {
return &ServerState{val: StateNew}
}
type ServerStateValue int
const (
// StateNew represents a new server. Server begins in
// this state and then transition to StatusActive when
// Start or Run is callled.
StateNew ServerStateValue = iota
// StateActive indicates the server is up and active.
StateActive
// StateStopped indicates the server is up but no longer processing new tasks.
StateStopped
// StateClosed indicates the server has been shutdown.
StateClosed
)
var serverStates = []string{
"new",
"active",
"stopped",
"closed",
}
func (s *ServerState) String() string {
s.mu.Lock()
defer s.mu.Unlock()
if StateNew <= s.val && s.val <= StateClosed {
return serverStates[s.val]
}
return "unknown status"
}
// Get returns the status value.
func (s *ServerState) Get() ServerStateValue {
s.mu.Lock()
v := s.val
s.mu.Unlock()
return v
}
// Set sets the status value.
func (s *ServerState) Set(v ServerStateValue) {
s.mu.Lock()
s.val = v
s.mu.Unlock()
}
// ServerInfo holds information about a running server. // ServerInfo holds information about a running server.
type ServerInfo struct { type ServerInfo struct {
Host string Host string
@@ -655,24 +604,92 @@ func (c *Cancelations) Get(id string) (fn context.CancelFunc, ok bool) {
return fn, ok return fn, ok
} }
// Lease is a time bound lease for worker to process task.
// It provides a communication channel between lessor and lessee about lease expiration.
type Lease struct {
once sync.Once
ch chan struct{}
Clock timeutil.Clock
mu sync.Mutex
expireAt time.Time // guarded by mu
}
func NewLease(expirationTime time.Time) *Lease {
return &Lease{
ch: make(chan struct{}),
expireAt: expirationTime,
Clock: timeutil.NewRealClock(),
}
}
// Reset chanegs the lease to expire at the given time.
// It returns true if the lease is still valid and reset operation was successful, false if the lease had been expired.
func (l *Lease) Reset(expirationTime time.Time) bool {
if !l.IsValid() {
return false
}
l.mu.Lock()
defer l.mu.Unlock()
l.expireAt = expirationTime
return true
}
// Sends a notification to lessee about expired lease
// Returns true if notification was sent, returns false if the lease is still valid and notification was not sent.
func (l *Lease) NotifyExpiration() bool {
if l.IsValid() {
return false
}
l.once.Do(l.closeCh)
return true
}
func (l *Lease) closeCh() {
close(l.ch)
}
// Done returns a communication channel from which the lessee can read to get notified when lessor notifies about lease expiration.
func (l *Lease) Done() <-chan struct{} {
return l.ch
}
// Deadline returns the expiration time of the lease.
func (l *Lease) Deadline() time.Time {
l.mu.Lock()
defer l.mu.Unlock()
return l.expireAt
}
// IsValid returns true if the lease's expieration time is in the future or equals to the current time,
// returns false otherwise.
func (l *Lease) IsValid() bool {
now := l.Clock.Now()
l.mu.Lock()
defer l.mu.Unlock()
return l.expireAt.After(now) || l.expireAt.Equal(now)
}
// Broker is a message broker that supports operations to manage task queues. // Broker is a message broker that supports operations to manage task queues.
// //
// See rdb.RDB as a reference implementation. // See rdb.RDB as a reference implementation.
type Broker interface { type Broker interface {
Ping() error Ping() error
Enqueue(msg *TaskMessage) error Enqueue(ctx context.Context, msg *TaskMessage) error
EnqueueUnique(msg *TaskMessage, ttl time.Duration) error EnqueueUnique(ctx context.Context, msg *TaskMessage, ttl time.Duration) error
Dequeue(qnames ...string) (*TaskMessage, time.Time, error) Dequeue(qnames ...string) (*TaskMessage, time.Time, error)
Done(msg *TaskMessage) error Done(ctx context.Context, msg *TaskMessage) error
MarkAsComplete(msg *TaskMessage) error MarkAsComplete(ctx context.Context, msg *TaskMessage) error
Requeue(msg *TaskMessage) error Requeue(ctx context.Context, msg *TaskMessage) error
Schedule(msg *TaskMessage, processAt time.Time) error Schedule(ctx context.Context, msg *TaskMessage, processAt time.Time) error
ScheduleUnique(msg *TaskMessage, processAt time.Time, ttl time.Duration) error ScheduleUnique(ctx context.Context, msg *TaskMessage, processAt time.Time, ttl time.Duration) error
Retry(msg *TaskMessage, processAt time.Time, errMsg string, isFailure bool) error Retry(ctx context.Context, msg *TaskMessage, processAt time.Time, errMsg string, isFailure bool) error
Archive(msg *TaskMessage, errMsg string) error Archive(ctx context.Context, msg *TaskMessage, errMsg string) error
ForwardIfReady(qnames ...string) error ForwardIfReady(qnames ...string) error
DeleteExpiredCompletedTasks(qname string) error DeleteExpiredCompletedTasks(qname string) error
ListDeadlineExceeded(deadline time.Time, qnames ...string) ([]*TaskMessage, error) ListLeaseExpired(cutoff time.Time, qnames ...string) ([]*TaskMessage, error)
ExtendLease(qname string, ids ...string) (time.Time, error)
WriteServerState(info *ServerInfo, workers []*WorkerInfo, ttl time.Duration) error WriteServerState(info *ServerInfo, workers []*WorkerInfo, ttl time.Duration) error
ClearServerState(host string, pid int, serverID string) error ClearServerState(host string, pid int, serverID string) error
CancelationPubSub() (*redis.PubSub, error) // TODO: Need to decouple from redis to support other brokers CancelationPubSub() (*redis.PubSub, error) // TODO: Need to decouple from redis to support other brokers

View File

@@ -16,6 +16,7 @@ import (
"github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/hibiken/asynq/internal/timeutil"
) )
func TestTaskKey(t *testing.T) { func TestTaskKey(t *testing.T) {
@@ -71,19 +72,19 @@ func TestActiveKey(t *testing.T) {
} }
} }
func TestDeadlinesKey(t *testing.T) { func TestLeaseKey(t *testing.T) {
tests := []struct { tests := []struct {
qname string qname string
want string want string
}{ }{
{"default", "asynq:{default}:deadlines"}, {"default", "asynq:{default}:lease"},
{"custom", "asynq:{custom}:deadlines"}, {"custom", "asynq:{custom}:lease"},
} }
for _, tc := range tests { for _, tc := range tests {
got := DeadlinesKey(tc.qname) got := LeaseKey(tc.qname)
if got != tc.want { if got != tc.want {
t.Errorf("DeadlinesKey(%q) = %q, want %q", tc.qname, got, tc.want) t.Errorf("LeaseKey(%q) = %q, want %q", tc.qname, got, tc.want)
} }
} }
} }
@@ -173,6 +174,40 @@ func TestPausedKey(t *testing.T) {
} }
} }
func TestProcessedTotalKey(t *testing.T) {
tests := []struct {
qname string
want string
}{
{"default", "asynq:{default}:processed"},
{"custom", "asynq:{custom}:processed"},
}
for _, tc := range tests {
got := ProcessedTotalKey(tc.qname)
if got != tc.want {
t.Errorf("ProcessedTotalKey(%q) = %q, want %q", tc.qname, got, tc.want)
}
}
}
func TestFailedTotalKey(t *testing.T) {
tests := []struct {
qname string
want string
}{
{"default", "asynq:{default}:failed"},
{"custom", "asynq:{custom}:failed"},
}
for _, tc := range tests {
got := FailedTotalKey(tc.qname)
if got != tc.want {
t.Errorf("FailedTotalKey(%q) = %q, want %q", tc.qname, got, tc.want)
}
}
}
func TestProcessedKey(t *testing.T) { func TestProcessedKey(t *testing.T) {
tests := []struct { tests := []struct {
qname string qname string
@@ -549,30 +584,6 @@ func TestSchedulerEnqueueEventEncoding(t *testing.T) {
} }
} }
// Test for status being accessed by multiple goroutines.
// Run with -race flag to check for data race.
func TestStatusConcurrentAccess(t *testing.T) {
status := NewServerState()
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
status.Get()
_ = status.String()
}()
wg.Add(1)
go func() {
defer wg.Done()
status.Set(StateClosed)
_ = status.String()
}()
wg.Wait()
}
// Test for cancelations being accessed by multiple goroutines. // Test for cancelations being accessed by multiple goroutines.
// Run with -race flag to check for data race. // Run with -race flag to check for data race.
func TestCancelationsConcurrentAccess(t *testing.T) { func TestCancelationsConcurrentAccess(t *testing.T) {
@@ -617,3 +628,75 @@ func TestCancelationsConcurrentAccess(t *testing.T) {
t.Errorf("(*Cancelations).Get(%q) = _, true, want <nil>, false", key2) t.Errorf("(*Cancelations).Get(%q) = _, true, want <nil>, false", key2)
} }
} }
func TestLeaseReset(t *testing.T) {
now := time.Now()
clock := timeutil.NewSimulatedClock(now)
l := NewLease(now.Add(30 * time.Second))
l.Clock = clock
// Check initial state
if !l.IsValid() {
t.Errorf("lease should be valid when expiration is set to a future time")
}
if want := now.Add(30 * time.Second); l.Deadline() != want {
t.Errorf("Lease.Deadline() = %v, want %v", l.Deadline(), want)
}
// Test Reset
if !l.Reset(now.Add(45 * time.Second)) {
t.Fatalf("Lease.Reset returned false when extending")
}
if want := now.Add(45 * time.Second); l.Deadline() != want {
t.Errorf("After Reset: Lease.Deadline() = %v, want %v", l.Deadline(), want)
}
clock.AdvanceTime(1 * time.Minute) // simulate lease expiration
if l.IsValid() {
t.Errorf("lease should be invalid after expiration")
}
// Reset should return false if lease is expired.
if l.Reset(time.Now().Add(20 * time.Second)) {
t.Errorf("Lease.Reset should return false after expiration")
}
}
func TestLeaseNotifyExpiration(t *testing.T) {
now := time.Now()
clock := timeutil.NewSimulatedClock(now)
l := NewLease(now.Add(30 * time.Second))
l.Clock = clock
select {
case <-l.Done():
t.Fatalf("Lease.Done() did not block")
default:
}
if l.NotifyExpiration() {
t.Fatalf("Lease.NotifyExpiration() should return false when lease is still valid")
}
clock.AdvanceTime(1 * time.Minute) // simulate lease expiration
if l.IsValid() {
t.Errorf("Lease should be invalid after expiration")
}
if !l.NotifyExpiration() {
t.Errorf("Lease.NotifyExpiration() return return true after expiration")
}
if !l.NotifyExpiration() {
t.Errorf("It should be leagal to call Lease.NotifyExpiration multiple times")
}
select {
case <-l.Done():
// expected
default:
t.Errorf("Lease.Done() blocked after call to Lease.NotifyExpiration()")
}
}

View File

@@ -28,14 +28,14 @@ type ctxKey int
const metadataCtxKey ctxKey = 0 const metadataCtxKey ctxKey = 0
// New returns a context and cancel function for a given task message. // New returns a context and cancel function for a given task message.
func New(msg *base.TaskMessage, deadline time.Time) (context.Context, context.CancelFunc) { func New(base context.Context, msg *base.TaskMessage, deadline time.Time) (context.Context, context.CancelFunc) {
metadata := taskMetadata{ metadata := taskMetadata{
id: msg.ID, id: msg.ID,
maxRetry: msg.Retry, maxRetry: msg.Retry,
retryCount: msg.Retried, retryCount: msg.Retried,
qname: msg.Queue, qname: msg.Queue,
} }
ctx := context.WithValue(context.Background(), metadataCtxKey, metadata) ctx := context.WithValue(base, metadataCtxKey, metadata)
return context.WithDeadline(ctx, deadline) return context.WithDeadline(ctx, deadline)
} }

View File

@@ -6,6 +6,7 @@ package context
import ( import (
"context" "context"
"fmt"
"testing" "testing"
"time" "time"
@@ -28,7 +29,7 @@ func TestCreateContextWithFutureDeadline(t *testing.T) {
Payload: nil, Payload: nil,
} }
ctx, cancel := New(msg, tc.deadline) ctx, cancel := New(context.Background(), msg, tc.deadline)
select { select {
case x := <-ctx.Done(): case x := <-ctx.Done():
t.Errorf("<-ctx.Done() == %v, want nothing (it should block)", x) t.Errorf("<-ctx.Done() == %v, want nothing (it should block)", x)
@@ -53,6 +54,53 @@ func TestCreateContextWithFutureDeadline(t *testing.T) {
} }
} }
func TestCreateContextWithBaseContext(t *testing.T) {
type ctxKey string
type ctxValue string
var key ctxKey = "key"
var value ctxValue = "value"
tests := []struct {
baseCtx context.Context
validate func(ctx context.Context, t *testing.T) error
}{
{
baseCtx: context.WithValue(context.Background(), key, value),
validate: func(ctx context.Context, t *testing.T) error {
got, ok := ctx.Value(key).(ctxValue)
if !ok {
return fmt.Errorf("ctx.Value().(ctxValue) returned false, expected to be true")
}
if want := value; got != want {
return fmt.Errorf("ctx.Value().(ctxValue) returned unknown value (%v), expected to be %s", got, value)
}
return nil
},
},
}
for _, tc := range tests {
msg := &base.TaskMessage{
Type: "something",
ID: uuid.NewString(),
Payload: nil,
}
ctx, cancel := New(tc.baseCtx, msg, time.Now().Add(30*time.Minute))
defer cancel()
select {
case x := <-ctx.Done():
t.Errorf("<-ctx.Done() == %v, want nothing (it should block)", x)
default:
}
if err := tc.validate(ctx, t); err != nil {
t.Errorf("%v", err)
}
}
}
func TestCreateContextWithPastDeadline(t *testing.T) { func TestCreateContextWithPastDeadline(t *testing.T) {
tests := []struct { tests := []struct {
deadline time.Time deadline time.Time
@@ -67,7 +115,7 @@ func TestCreateContextWithPastDeadline(t *testing.T) {
Payload: nil, Payload: nil,
} }
ctx, cancel := New(msg, tc.deadline) ctx, cancel := New(context.Background(), msg, tc.deadline)
defer cancel() defer cancel()
select { select {
@@ -97,7 +145,7 @@ func TestGetTaskMetadataFromContext(t *testing.T) {
} }
for _, tc := range tests { for _, tc := range tests {
ctx, cancel := New(tc.msg, time.Now().Add(30*time.Minute)) ctx, cancel := New(context.Background(), tc.msg, time.Now().Add(30*time.Minute))
defer cancel() defer cancel()
id, ok := GetTaskID(ctx) id, ok := GetTaskID(ctx)

View File

@@ -5,6 +5,7 @@
package rdb package rdb
import ( import (
"context"
"fmt" "fmt"
"testing" "testing"
"time" "time"
@@ -15,6 +16,7 @@ import (
func BenchmarkEnqueue(b *testing.B) { func BenchmarkEnqueue(b *testing.B) {
r := setup(b) r := setup(b)
ctx := context.Background()
msg := asynqtest.NewTaskMessage("task1", nil) msg := asynqtest.NewTaskMessage("task1", nil)
b.ResetTimer() b.ResetTimer()
@@ -23,7 +25,7 @@ func BenchmarkEnqueue(b *testing.B) {
asynqtest.FlushDB(b, r.client) asynqtest.FlushDB(b, r.client)
b.StartTimer() b.StartTimer()
if err := r.Enqueue(msg); err != nil { if err := r.Enqueue(ctx, msg); err != nil {
b.Fatalf("Enqueue failed: %v", err) b.Fatalf("Enqueue failed: %v", err)
} }
} }
@@ -31,6 +33,7 @@ func BenchmarkEnqueue(b *testing.B) {
func BenchmarkEnqueueUnique(b *testing.B) { func BenchmarkEnqueueUnique(b *testing.B) {
r := setup(b) r := setup(b)
ctx := context.Background()
msg := &base.TaskMessage{ msg := &base.TaskMessage{
Type: "task1", Type: "task1",
Payload: nil, Payload: nil,
@@ -45,7 +48,7 @@ func BenchmarkEnqueueUnique(b *testing.B) {
asynqtest.FlushDB(b, r.client) asynqtest.FlushDB(b, r.client)
b.StartTimer() b.StartTimer()
if err := r.EnqueueUnique(msg, uniqueTTL); err != nil { if err := r.EnqueueUnique(ctx, msg, uniqueTTL); err != nil {
b.Fatalf("EnqueueUnique failed: %v", err) b.Fatalf("EnqueueUnique failed: %v", err)
} }
} }
@@ -53,6 +56,7 @@ func BenchmarkEnqueueUnique(b *testing.B) {
func BenchmarkSchedule(b *testing.B) { func BenchmarkSchedule(b *testing.B) {
r := setup(b) r := setup(b)
ctx := context.Background()
msg := asynqtest.NewTaskMessage("task1", nil) msg := asynqtest.NewTaskMessage("task1", nil)
processAt := time.Now().Add(3 * time.Minute) processAt := time.Now().Add(3 * time.Minute)
b.ResetTimer() b.ResetTimer()
@@ -62,7 +66,7 @@ func BenchmarkSchedule(b *testing.B) {
asynqtest.FlushDB(b, r.client) asynqtest.FlushDB(b, r.client)
b.StartTimer() b.StartTimer()
if err := r.Schedule(msg, processAt); err != nil { if err := r.Schedule(ctx, msg, processAt); err != nil {
b.Fatalf("Schedule failed: %v", err) b.Fatalf("Schedule failed: %v", err)
} }
} }
@@ -70,6 +74,7 @@ func BenchmarkSchedule(b *testing.B) {
func BenchmarkScheduleUnique(b *testing.B) { func BenchmarkScheduleUnique(b *testing.B) {
r := setup(b) r := setup(b)
ctx := context.Background()
msg := &base.TaskMessage{ msg := &base.TaskMessage{
Type: "task1", Type: "task1",
Payload: nil, Payload: nil,
@@ -85,7 +90,7 @@ func BenchmarkScheduleUnique(b *testing.B) {
asynqtest.FlushDB(b, r.client) asynqtest.FlushDB(b, r.client)
b.StartTimer() b.StartTimer()
if err := r.ScheduleUnique(msg, processAt, uniqueTTL); err != nil { if err := r.ScheduleUnique(ctx, msg, processAt, uniqueTTL); err != nil {
b.Fatalf("EnqueueUnique failed: %v", err) b.Fatalf("EnqueueUnique failed: %v", err)
} }
} }
@@ -93,6 +98,7 @@ func BenchmarkScheduleUnique(b *testing.B) {
func BenchmarkDequeueSingleQueue(b *testing.B) { func BenchmarkDequeueSingleQueue(b *testing.B) {
r := setup(b) r := setup(b)
ctx := context.Background()
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
@@ -101,7 +107,7 @@ func BenchmarkDequeueSingleQueue(b *testing.B) {
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
m := asynqtest.NewTaskMessageWithQueue( m := asynqtest.NewTaskMessageWithQueue(
fmt.Sprintf("task%d", i), nil, base.DefaultQueueName) fmt.Sprintf("task%d", i), nil, base.DefaultQueueName)
if err := r.Enqueue(m); err != nil { if err := r.Enqueue(ctx, m); err != nil {
b.Fatalf("Enqueue failed: %v", err) b.Fatalf("Enqueue failed: %v", err)
} }
} }
@@ -116,6 +122,7 @@ func BenchmarkDequeueSingleQueue(b *testing.B) {
func BenchmarkDequeueMultipleQueues(b *testing.B) { func BenchmarkDequeueMultipleQueues(b *testing.B) {
qnames := []string{"critical", "default", "low"} qnames := []string{"critical", "default", "low"}
r := setup(b) r := setup(b)
ctx := context.Background()
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
@@ -125,7 +132,7 @@ func BenchmarkDequeueMultipleQueues(b *testing.B) {
for _, qname := range qnames { for _, qname := range qnames {
m := asynqtest.NewTaskMessageWithQueue( m := asynqtest.NewTaskMessageWithQueue(
fmt.Sprintf("%s_task%d", qname, i), nil, qname) fmt.Sprintf("%s_task%d", qname, i), nil, qname)
if err := r.Enqueue(m); err != nil { if err := r.Enqueue(ctx, m); err != nil {
b.Fatalf("Enqueue failed: %v", err) b.Fatalf("Enqueue failed: %v", err)
} }
} }
@@ -149,16 +156,17 @@ func BenchmarkDone(b *testing.B) {
{Message: m2, Score: time.Now().Add(20 * time.Second).Unix()}, {Message: m2, Score: time.Now().Add(20 * time.Second).Unix()},
{Message: m3, Score: time.Now().Add(30 * time.Second).Unix()}, {Message: m3, Score: time.Now().Add(30 * time.Second).Unix()},
} }
ctx := context.Background()
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
b.StopTimer() b.StopTimer()
asynqtest.FlushDB(b, r.client) asynqtest.FlushDB(b, r.client)
asynqtest.SeedActiveQueue(b, r.client, msgs, base.DefaultQueueName) asynqtest.SeedActiveQueue(b, r.client, msgs, base.DefaultQueueName)
asynqtest.SeedDeadlines(b, r.client, zs, base.DefaultQueueName) asynqtest.SeedLease(b, r.client, zs, base.DefaultQueueName)
b.StartTimer() b.StartTimer()
if err := r.Done(msgs[0]); err != nil { if err := r.Done(ctx, msgs[0]); err != nil {
b.Fatalf("Done failed: %v", err) b.Fatalf("Done failed: %v", err)
} }
} }
@@ -175,16 +183,17 @@ func BenchmarkRetry(b *testing.B) {
{Message: m2, Score: time.Now().Add(20 * time.Second).Unix()}, {Message: m2, Score: time.Now().Add(20 * time.Second).Unix()},
{Message: m3, Score: time.Now().Add(30 * time.Second).Unix()}, {Message: m3, Score: time.Now().Add(30 * time.Second).Unix()},
} }
ctx := context.Background()
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
b.StopTimer() b.StopTimer()
asynqtest.FlushDB(b, r.client) asynqtest.FlushDB(b, r.client)
asynqtest.SeedActiveQueue(b, r.client, msgs, base.DefaultQueueName) asynqtest.SeedActiveQueue(b, r.client, msgs, base.DefaultQueueName)
asynqtest.SeedDeadlines(b, r.client, zs, base.DefaultQueueName) asynqtest.SeedLease(b, r.client, zs, base.DefaultQueueName)
b.StartTimer() b.StartTimer()
if err := r.Retry(msgs[0], time.Now().Add(1*time.Minute), "error", true /*isFailure*/); err != nil { if err := r.Retry(ctx, msgs[0], time.Now().Add(1*time.Minute), "error", true /*isFailure*/); err != nil {
b.Fatalf("Retry failed: %v", err) b.Fatalf("Retry failed: %v", err)
} }
} }
@@ -201,16 +210,17 @@ func BenchmarkArchive(b *testing.B) {
{Message: m2, Score: time.Now().Add(20 * time.Second).Unix()}, {Message: m2, Score: time.Now().Add(20 * time.Second).Unix()},
{Message: m3, Score: time.Now().Add(30 * time.Second).Unix()}, {Message: m3, Score: time.Now().Add(30 * time.Second).Unix()},
} }
ctx := context.Background()
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
b.StopTimer() b.StopTimer()
asynqtest.FlushDB(b, r.client) asynqtest.FlushDB(b, r.client)
asynqtest.SeedActiveQueue(b, r.client, msgs, base.DefaultQueueName) asynqtest.SeedActiveQueue(b, r.client, msgs, base.DefaultQueueName)
asynqtest.SeedDeadlines(b, r.client, zs, base.DefaultQueueName) asynqtest.SeedLease(b, r.client, zs, base.DefaultQueueName)
b.StartTimer() b.StartTimer()
if err := r.Archive(msgs[0], "error"); err != nil { if err := r.Archive(ctx, msgs[0], "error"); err != nil {
b.Fatalf("Archive failed: %v", err) b.Fatalf("Archive failed: %v", err)
} }
} }
@@ -227,16 +237,17 @@ func BenchmarkRequeue(b *testing.B) {
{Message: m2, Score: time.Now().Add(20 * time.Second).Unix()}, {Message: m2, Score: time.Now().Add(20 * time.Second).Unix()},
{Message: m3, Score: time.Now().Add(30 * time.Second).Unix()}, {Message: m3, Score: time.Now().Add(30 * time.Second).Unix()},
} }
ctx := context.Background()
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
b.StopTimer() b.StopTimer()
asynqtest.FlushDB(b, r.client) asynqtest.FlushDB(b, r.client)
asynqtest.SeedActiveQueue(b, r.client, msgs, base.DefaultQueueName) asynqtest.SeedActiveQueue(b, r.client, msgs, base.DefaultQueueName)
asynqtest.SeedDeadlines(b, r.client, zs, base.DefaultQueueName) asynqtest.SeedLease(b, r.client, zs, base.DefaultQueueName)
b.StartTimer() b.StartTimer()
if err := r.Requeue(msgs[0]); err != nil { if err := r.Requeue(ctx, msgs[0]); err != nil {
b.Fatalf("Requeue failed: %v", err) b.Fatalf("Requeue failed: %v", err)
} }
} }

View File

@@ -41,11 +41,20 @@ type Stats struct {
Retry int Retry int
Archived int Archived int
Completed int Completed int
// Total number of tasks processed during the current date.
// Number of tasks processed within the current date.
// The number includes both succeeded and failed tasks. // The number includes both succeeded and failed tasks.
Processed int Processed int
// Total number of tasks failed during the current date. // Number of tasks failed within the current date.
Failed int Failed int
// Total number of tasks processed (both succeeded and failed) from this queue.
ProcessedTotal int
// Total number of tasks failed.
FailedTotal int
// Latency of the queue, measured by the oldest pending task in the queue.
Latency time.Duration
// Time this stats was taken. // Time this stats was taken.
Timestamp time.Time Timestamp time.Time
} }
@@ -63,7 +72,7 @@ type DailyStats struct {
Time time.Time Time time.Time
} }
// KEYS[1] -> asynq:<qname> // KEYS[1] -> asynq:<qname>:pending
// KEYS[2] -> asynq:<qname>:active // KEYS[2] -> asynq:<qname>:active
// KEYS[3] -> asynq:<qname>:scheduled // KEYS[3] -> asynq:<qname>:scheduled
// KEYS[4] -> asynq:<qname>:retry // KEYS[4] -> asynq:<qname>:retry
@@ -71,11 +80,16 @@ type DailyStats struct {
// KEYS[6] -> asynq:<qname>:completed // KEYS[6] -> asynq:<qname>:completed
// KEYS[7] -> asynq:<qname>:processed:<yyyy-mm-dd> // KEYS[7] -> asynq:<qname>:processed:<yyyy-mm-dd>
// KEYS[8] -> asynq:<qname>:failed:<yyyy-mm-dd> // KEYS[8] -> asynq:<qname>:failed:<yyyy-mm-dd>
// KEYS[9] -> asynq:<qname>:paused // KEYS[9] -> asynq:<qname>:processed
// KEYS[10] -> asynq:<qname>:failed
// KEYS[11] -> asynq:<qname>:paused
//
// ARGV[1] -> task key prefix
var currentStatsCmd = redis.NewScript(` var currentStatsCmd = redis.NewScript(`
local res = {} local res = {}
local pendingTaskCount = redis.call("LLEN", KEYS[1])
table.insert(res, KEYS[1]) table.insert(res, KEYS[1])
table.insert(res, redis.call("LLEN", KEYS[1])) table.insert(res, pendingTaskCount)
table.insert(res, KEYS[2]) table.insert(res, KEYS[2])
table.insert(res, redis.call("LLEN", KEYS[2])) table.insert(res, redis.call("LLEN", KEYS[2]))
table.insert(res, KEYS[3]) table.insert(res, KEYS[3])
@@ -86,22 +100,24 @@ table.insert(res, KEYS[5])
table.insert(res, redis.call("ZCARD", KEYS[5])) table.insert(res, redis.call("ZCARD", KEYS[5]))
table.insert(res, KEYS[6]) table.insert(res, KEYS[6])
table.insert(res, redis.call("ZCARD", KEYS[6])) table.insert(res, redis.call("ZCARD", KEYS[6]))
local pcount = 0 for i=7,10 do
local p = redis.call("GET", KEYS[7]) local count = 0
if p then local n = redis.call("GET", KEYS[i])
pcount = tonumber(p) if n then
count = tonumber(n)
end end
table.insert(res, KEYS[7]) table.insert(res, KEYS[i])
table.insert(res, pcount) table.insert(res, count)
local fcount = 0 end
local f = redis.call("GET", KEYS[8]) table.insert(res, KEYS[11])
if f then table.insert(res, redis.call("EXISTS", KEYS[11]))
fcount = tonumber(f) table.insert(res, "oldest_pending_since")
if pendingTaskCount > 0 then
local id = redis.call("LRANGE", KEYS[1], -1, -1)[1]
table.insert(res, redis.call("HGET", ARGV[1] .. id, "pending_since"))
else
table.insert(res, 0)
end end
table.insert(res, KEYS[8])
table.insert(res, fcount)
table.insert(res, KEYS[9])
table.insert(res, redis.call("EXISTS", KEYS[9]))
return res`) return res`)
// CurrentStats returns a current state of the queues. // CurrentStats returns a current state of the queues.
@@ -114,7 +130,7 @@ func (r *RDB) CurrentStats(qname string) (*Stats, error) {
if !exists { if !exists {
return nil, errors.E(op, errors.NotFound, &errors.QueueNotFoundError{Queue: qname}) return nil, errors.E(op, errors.NotFound, &errors.QueueNotFoundError{Queue: qname})
} }
now := time.Now() now := r.clock.Now()
res, err := currentStatsCmd.Run(context.Background(), r.client, []string{ res, err := currentStatsCmd.Run(context.Background(), r.client, []string{
base.PendingKey(qname), base.PendingKey(qname),
base.ActiveKey(qname), base.ActiveKey(qname),
@@ -124,8 +140,10 @@ func (r *RDB) CurrentStats(qname string) (*Stats, error) {
base.CompletedKey(qname), base.CompletedKey(qname),
base.ProcessedKey(qname, now), base.ProcessedKey(qname, now),
base.FailedKey(qname, now), base.FailedKey(qname, now),
base.ProcessedTotalKey(qname),
base.FailedTotalKey(qname),
base.PausedKey(qname), base.PausedKey(qname),
}).Result() }, base.TaskKeyPrefix(qname)).Result()
if err != nil { if err != nil {
return nil, errors.E(op, errors.Unknown, err) return nil, errors.E(op, errors.Unknown, err)
} }
@@ -164,12 +182,22 @@ func (r *RDB) CurrentStats(qname string) (*Stats, error) {
stats.Processed = val stats.Processed = val
case base.FailedKey(qname, now): case base.FailedKey(qname, now):
stats.Failed = val stats.Failed = val
case base.ProcessedTotalKey(qname):
stats.ProcessedTotal = val
case base.FailedTotalKey(qname):
stats.FailedTotal = val
case base.PausedKey(qname): case base.PausedKey(qname):
if val == 0 { if val == 0 {
stats.Paused = false stats.Paused = false
} else { } else {
stats.Paused = true stats.Paused = true
} }
case "oldest_pending_since":
if val == 0 {
stats.Latency = 0
} else {
stats.Latency = r.clock.Now().Sub(time.Unix(0, int64(val)))
}
} }
} }
stats.Size = size stats.Size = size
@@ -288,7 +316,7 @@ func (r *RDB) HistoricalStats(qname string, n int) ([]*DailyStats, error) {
return nil, errors.E(op, errors.NotFound, &errors.QueueNotFoundError{Queue: qname}) return nil, errors.E(op, errors.NotFound, &errors.QueueNotFoundError{Queue: qname})
} }
const day = 24 * time.Hour const day = 24 * time.Hour
now := time.Now().UTC() now := r.clock.Now().UTC()
var days []time.Time var days []time.Time
var keys []string var keys []string
for i := 0; i < n; i++ { for i := 0; i < n; i++ {
@@ -405,7 +433,7 @@ func (r *RDB) GetTaskInfo(qname, id string) (*base.TaskInfo, error) {
keys := []string{base.TaskKey(qname, id)} keys := []string{base.TaskKey(qname, id)}
argv := []interface{}{ argv := []interface{}{
id, id,
time.Now().Unix(), r.clock.Now().Unix(),
base.QueueKeyPrefix(qname), base.QueueKeyPrefix(qname),
} }
res, err := getTaskInfoCmd.Run(context.Background(), r.client, keys, argv...).Result() res, err := getTaskInfoCmd.Run(context.Background(), r.client, keys, argv...).Result()
@@ -566,7 +594,7 @@ func (r *RDB) listMessages(qname string, state base.TaskState, pgn Pagination) (
} }
var nextProcessAt time.Time var nextProcessAt time.Time
if state == base.TaskStatePending { if state == base.TaskStatePending {
nextProcessAt = time.Now() nextProcessAt = r.clock.Now()
} }
infos = append(infos, &base.TaskInfo{ infos = append(infos, &base.TaskInfo{
Message: m, Message: m,
@@ -971,7 +999,7 @@ func (r *RDB) ArchiveAllPendingTasks(qname string) (int64, error) {
base.PendingKey(qname), base.PendingKey(qname),
base.ArchivedKey(qname), base.ArchivedKey(qname),
} }
now := time.Now() now := r.clock.Now()
argv := []interface{}{ argv := []interface{}{
now.Unix(), now.Unix(),
now.AddDate(0, 0, -archivedExpirationInDays).Unix(), now.AddDate(0, 0, -archivedExpirationInDays).Unix(),
@@ -1051,7 +1079,7 @@ func (r *RDB) ArchiveTask(qname, id string) error {
base.TaskKey(qname, id), base.TaskKey(qname, id),
base.ArchivedKey(qname), base.ArchivedKey(qname),
} }
now := time.Now() now := r.clock.Now()
argv := []interface{}{ argv := []interface{}{
id, id,
now.Unix(), now.Unix(),
@@ -1116,7 +1144,7 @@ func (r *RDB) archiveAll(src, dst, qname string) (int64, error) {
src, src,
dst, dst,
} }
now := time.Now() now := r.clock.Now()
argv := []interface{}{ argv := []interface{}{
now.Unix(), now.Unix(),
now.AddDate(0, 0, -archivedExpirationInDays).Unix(), now.AddDate(0, 0, -archivedExpirationInDays).Unix(),
@@ -1359,7 +1387,7 @@ func (r *RDB) DeleteAllPendingTasks(qname string) (int64, error) {
// KEYS[3] -> asynq:{<qname>}:scheduled // KEYS[3] -> asynq:{<qname>}:scheduled
// KEYS[4] -> asynq:{<qname>}:retry // KEYS[4] -> asynq:{<qname>}:retry
// KEYS[5] -> asynq:{<qname>}:archived // KEYS[5] -> asynq:{<qname>}:archived
// KEYS[6] -> asynq:{<qname>}:deadlines // KEYS[6] -> asynq:{<qname>}:lease
// -- // --
// ARGV[1] -> task key prefix // ARGV[1] -> task key prefix
// //
@@ -1419,7 +1447,7 @@ return 1`)
// KEYS[3] -> asynq:{<qname>}:scheduled // KEYS[3] -> asynq:{<qname>}:scheduled
// KEYS[4] -> asynq:{<qname>}:retry // KEYS[4] -> asynq:{<qname>}:retry
// KEYS[5] -> asynq:{<qname>}:archived // KEYS[5] -> asynq:{<qname>}:archived
// KEYS[6] -> asynq:{<qname>}:deadlines // KEYS[6] -> asynq:{<qname>}:lease
// -- // --
// ARGV[1] -> task key prefix // ARGV[1] -> task key prefix
// //
@@ -1488,7 +1516,7 @@ func (r *RDB) RemoveQueue(qname string, force bool) error {
base.ScheduledKey(qname), base.ScheduledKey(qname),
base.RetryKey(qname), base.RetryKey(qname),
base.ArchivedKey(qname), base.ArchivedKey(qname),
base.DeadlinesKey(qname), base.LeaseKey(qname),
} }
res, err := script.Run(context.Background(), r.client, keys, base.TaskKeyPrefix(qname)).Result() res, err := script.Run(context.Background(), r.client, keys, base.TaskKeyPrefix(qname)).Result()
if err != nil { if err != nil {
@@ -1522,7 +1550,7 @@ return keys`)
// ListServers returns the list of server info. // ListServers returns the list of server info.
func (r *RDB) ListServers() ([]*base.ServerInfo, error) { func (r *RDB) ListServers() ([]*base.ServerInfo, error) {
now := time.Now() now := r.clock.Now()
res, err := listServerKeysCmd.Run(context.Background(), r.client, []string{base.AllServers}, now.Unix()).Result() res, err := listServerKeysCmd.Run(context.Background(), r.client, []string{base.AllServers}, now.Unix()).Result()
if err != nil { if err != nil {
return nil, err return nil, err
@@ -1556,7 +1584,7 @@ return keys`)
// ListWorkers returns the list of worker stats. // ListWorkers returns the list of worker stats.
func (r *RDB) ListWorkers() ([]*base.WorkerInfo, error) { func (r *RDB) ListWorkers() ([]*base.WorkerInfo, error) {
var op errors.Op = "rdb.ListWorkers" var op errors.Op = "rdb.ListWorkers"
now := time.Now() now := r.clock.Now()
res, err := listWorkersCmd.Run(context.Background(), r.client, []string{base.AllWorkers}, now.Unix()).Result() res, err := listWorkersCmd.Run(context.Background(), r.client, []string{base.AllWorkers}, now.Unix()).Result()
if err != nil { if err != nil {
return nil, errors.E(op, errors.Unknown, err) return nil, errors.E(op, errors.Unknown, err)
@@ -1591,7 +1619,7 @@ return keys`)
// ListSchedulerEntries returns the list of scheduler entries. // ListSchedulerEntries returns the list of scheduler entries.
func (r *RDB) ListSchedulerEntries() ([]*base.SchedulerEntry, error) { func (r *RDB) ListSchedulerEntries() ([]*base.SchedulerEntry, error) {
now := time.Now() now := r.clock.Now()
res, err := listSchedulerKeysCmd.Run(context.Background(), r.client, []string{base.AllSchedulers}, now.Unix()).Result() res, err := listSchedulerKeysCmd.Run(context.Background(), r.client, []string{base.AllSchedulers}, now.Unix()).Result()
if err != nil { if err != nil {
return nil, err return nil, err
@@ -1642,7 +1670,7 @@ func (r *RDB) ListSchedulerEnqueueEvents(entryID string, pgn Pagination) ([]*bas
// Pause pauses processing of tasks from the given queue. // Pause pauses processing of tasks from the given queue.
func (r *RDB) Pause(qname string) error { func (r *RDB) Pause(qname string) error {
key := base.PausedKey(qname) key := base.PausedKey(qname)
ok, err := r.client.SetNX(context.Background(), key, time.Now().Unix(), 0).Result() ok, err := r.client.SetNX(context.Background(), key, r.clock.Now().Unix(), 0).Result()
if err != nil { if err != nil {
return err return err
} }

View File

@@ -17,6 +17,7 @@ import (
h "github.com/hibiken/asynq/internal/asynqtest" h "github.com/hibiken/asynq/internal/asynqtest"
"github.com/hibiken/asynq/internal/base" "github.com/hibiken/asynq/internal/base"
"github.com/hibiken/asynq/internal/errors" "github.com/hibiken/asynq/internal/errors"
"github.com/hibiken/asynq/internal/timeutil"
) )
func TestAllQueues(t *testing.T) { func TestAllQueues(t *testing.T) {
@@ -60,17 +61,21 @@ func TestCurrentStats(t *testing.T) {
m5 := h.NewTaskMessageWithQueue("important_notification", nil, "critical") m5 := h.NewTaskMessageWithQueue("important_notification", nil, "critical")
m6 := h.NewTaskMessageWithQueue("minor_notification", nil, "low") m6 := h.NewTaskMessageWithQueue("minor_notification", nil, "low")
now := time.Now() now := time.Now()
r.SetClock(timeutil.NewSimulatedClock(now))
tests := []struct { tests := []struct {
pending map[string][]*base.TaskMessage pending map[string][]*base.TaskMessage
inProgress map[string][]*base.TaskMessage active map[string][]*base.TaskMessage
scheduled map[string][]base.Z scheduled map[string][]base.Z
retry map[string][]base.Z retry map[string][]base.Z
archived map[string][]base.Z archived map[string][]base.Z
completed map[string][]base.Z completed map[string][]base.Z
processed map[string]int processed map[string]int
failed map[string]int failed map[string]int
processedTotal map[string]int
failedTotal map[string]int
paused []string paused []string
oldestPendingMessageEnqueueTime map[string]time.Time
qname string qname string
want *Stats want *Stats
}{ }{
@@ -80,7 +85,7 @@ func TestCurrentStats(t *testing.T) {
"critical": {m5}, "critical": {m5},
"low": {m6}, "low": {m6},
}, },
inProgress: map[string][]*base.TaskMessage{ active: map[string][]*base.TaskMessage{
"default": {m2}, "default": {m2},
"critical": {}, "critical": {},
"low": {}, "low": {},
@@ -118,6 +123,21 @@ func TestCurrentStats(t *testing.T) {
"critical": 0, "critical": 0,
"low": 1, "low": 1,
}, },
processedTotal: map[string]int{
"default": 11111,
"critical": 22222,
"low": 33333,
},
failedTotal: map[string]int{
"default": 111,
"critical": 222,
"low": 333,
},
oldestPendingMessageEnqueueTime: map[string]time.Time{
"default": now.Add(-15 * time.Second),
"critical": now.Add(-200 * time.Millisecond),
"low": now.Add(-30 * time.Second),
},
paused: []string{}, paused: []string{},
qname: "default", qname: "default",
want: &Stats{ want: &Stats{
@@ -132,16 +152,19 @@ func TestCurrentStats(t *testing.T) {
Completed: 0, Completed: 0,
Processed: 120, Processed: 120,
Failed: 2, Failed: 2,
ProcessedTotal: 11111,
FailedTotal: 111,
Latency: 15 * time.Second,
Timestamp: now, Timestamp: now,
}, },
}, },
{ {
pending: map[string][]*base.TaskMessage{ pending: map[string][]*base.TaskMessage{
"default": {m1}, "default": {m1},
"critical": {m5}, "critical": {},
"low": {m6}, "low": {m6},
}, },
inProgress: map[string][]*base.TaskMessage{ active: map[string][]*base.TaskMessage{
"default": {m2}, "default": {m2},
"critical": {}, "critical": {},
"low": {}, "low": {},
@@ -179,13 +202,28 @@ func TestCurrentStats(t *testing.T) {
"critical": 0, "critical": 0,
"low": 1, "low": 1,
}, },
processedTotal: map[string]int{
"default": 11111,
"critical": 22222,
"low": 33333,
},
failedTotal: map[string]int{
"default": 111,
"critical": 222,
"low": 333,
},
oldestPendingMessageEnqueueTime: map[string]time.Time{
"default": now.Add(-15 * time.Second),
"critical": time.Time{}, // zero value since there's no pending task in this queue
"low": now.Add(-30 * time.Second),
},
paused: []string{"critical", "low"}, paused: []string{"critical", "low"},
qname: "critical", qname: "critical",
want: &Stats{ want: &Stats{
Queue: "critical", Queue: "critical",
Paused: true, Paused: true,
Size: 1, Size: 0,
Pending: 1, Pending: 0,
Active: 0, Active: 0,
Scheduled: 0, Scheduled: 0,
Retry: 0, Retry: 0,
@@ -193,6 +231,9 @@ func TestCurrentStats(t *testing.T) {
Completed: 0, Completed: 0,
Processed: 100, Processed: 100,
Failed: 0, Failed: 0,
ProcessedTotal: 22222,
FailedTotal: 222,
Latency: 0,
Timestamp: now, Timestamp: now,
}, },
}, },
@@ -206,18 +247,30 @@ func TestCurrentStats(t *testing.T) {
} }
} }
h.SeedAllPendingQueues(t, r.client, tc.pending) h.SeedAllPendingQueues(t, r.client, tc.pending)
h.SeedAllActiveQueues(t, r.client, tc.inProgress) h.SeedAllActiveQueues(t, r.client, tc.active)
h.SeedAllScheduledQueues(t, r.client, tc.scheduled) h.SeedAllScheduledQueues(t, r.client, tc.scheduled)
h.SeedAllRetryQueues(t, r.client, tc.retry) h.SeedAllRetryQueues(t, r.client, tc.retry)
h.SeedAllArchivedQueues(t, r.client, tc.archived) h.SeedAllArchivedQueues(t, r.client, tc.archived)
h.SeedAllCompletedQueues(t, r.client, tc.completed) h.SeedAllCompletedQueues(t, r.client, tc.completed)
ctx := context.Background()
for qname, n := range tc.processed { for qname, n := range tc.processed {
processedKey := base.ProcessedKey(qname, now) r.client.Set(ctx, base.ProcessedKey(qname, now), n, 0)
r.client.Set(context.Background(), processedKey, n, 0)
} }
for qname, n := range tc.failed { for qname, n := range tc.failed {
failedKey := base.FailedKey(qname, now) r.client.Set(ctx, base.FailedKey(qname, now), n, 0)
r.client.Set(context.Background(), failedKey, n, 0) }
for qname, n := range tc.processedTotal {
r.client.Set(ctx, base.ProcessedTotalKey(qname), n, 0)
}
for qname, n := range tc.failedTotal {
r.client.Set(ctx, base.FailedTotalKey(qname), n, 0)
}
for qname, enqueueTime := range tc.oldestPendingMessageEnqueueTime {
if enqueueTime.IsZero() {
continue
}
oldestPendingMessageID := r.client.LRange(ctx, base.PendingKey(qname), -1, -1).Val()[0] // get the right most msg in the list
r.client.HSet(ctx, base.TaskKey(qname, oldestPendingMessageID), "pending_since", enqueueTime.UnixNano())
} }
got, err := r.CurrentStats(tc.qname) got, err := r.CurrentStats(tc.qname)
@@ -879,7 +932,7 @@ func TestListScheduledPagination(t *testing.T) {
// create 100 tasks with an increasing number of wait time. // create 100 tasks with an increasing number of wait time.
for i := 0; i < 100; i++ { for i := 0; i < 100; i++ {
msg := h.NewTaskMessage(fmt.Sprintf("task %d", i), nil) msg := h.NewTaskMessage(fmt.Sprintf("task %d", i), nil)
if err := r.Schedule(msg, time.Now().Add(time.Duration(i)*time.Second)); err != nil { if err := r.Schedule(context.Background(), msg, time.Now().Add(time.Duration(i)*time.Second)); err != nil {
t.Fatal(err) t.Fatal(err)
} }
} }
@@ -2612,8 +2665,11 @@ func TestArchiveAllPendingTasks(t *testing.T) {
m2 := h.NewTaskMessage("task2", nil) m2 := h.NewTaskMessage("task2", nil)
m3 := h.NewTaskMessageWithQueue("task3", nil, "custom") m3 := h.NewTaskMessageWithQueue("task3", nil, "custom")
m4 := h.NewTaskMessageWithQueue("task4", nil, "custom") m4 := h.NewTaskMessageWithQueue("task4", nil, "custom")
t1 := time.Now().Add(1 * time.Minute) now := time.Now()
t2 := time.Now().Add(1 * time.Hour) t1 := now.Add(1 * time.Minute)
t2 := now.Add(1 * time.Hour)
r.SetClock(timeutil.NewSimulatedClock(now))
tests := []struct { tests := []struct {
pending map[string][]*base.TaskMessage pending map[string][]*base.TaskMessage
@@ -2637,8 +2693,8 @@ func TestArchiveAllPendingTasks(t *testing.T) {
}, },
wantArchived: map[string][]base.Z{ wantArchived: map[string][]base.Z{
"default": { "default": {
{Message: m1, Score: time.Now().Unix()}, {Message: m1, Score: now.Unix()},
{Message: m2, Score: time.Now().Unix()}, {Message: m2, Score: now.Unix()},
}, },
}, },
}, },
@@ -2656,7 +2712,7 @@ func TestArchiveAllPendingTasks(t *testing.T) {
}, },
wantArchived: map[string][]base.Z{ wantArchived: map[string][]base.Z{
"default": { "default": {
{Message: m1, Score: time.Now().Unix()}, {Message: m1, Score: now.Unix()},
{Message: m2, Score: t2.Unix()}, {Message: m2, Score: t2.Unix()},
}, },
}, },
@@ -2701,8 +2757,8 @@ func TestArchiveAllPendingTasks(t *testing.T) {
wantArchived: map[string][]base.Z{ wantArchived: map[string][]base.Z{
"default": {}, "default": {},
"custom": { "custom": {
{Message: m3, Score: time.Now().Unix()}, {Message: m3, Score: now.Unix()},
{Message: m4, Score: time.Now().Unix()}, {Message: m4, Score: now.Unix()},
}, },
}, },
}, },
@@ -2730,7 +2786,7 @@ func TestArchiveAllPendingTasks(t *testing.T) {
for qname, want := range tc.wantArchived { for qname, want := range tc.wantArchived {
gotArchived := h.GetArchivedEntries(t, r.client, qname) gotArchived := h.GetArchivedEntries(t, r.client, qname)
if diff := cmp.Diff(want, gotArchived, h.SortZSetEntryOpt, zScoreCmpOpt); diff != "" { if diff := cmp.Diff(want, gotArchived, h.SortZSetEntryOpt); diff != "" {
t.Errorf("mismatch found in %q; (-want,+got)\n%s", t.Errorf("mismatch found in %q; (-want,+got)\n%s",
base.ArchivedKey(qname), diff) base.ArchivedKey(qname), diff)
} }
@@ -2744,10 +2800,13 @@ func TestArchiveAllRetryTasks(t *testing.T) {
m2 := h.NewTaskMessage("task2", nil) m2 := h.NewTaskMessage("task2", nil)
m3 := h.NewTaskMessageWithQueue("task3", nil, "custom") m3 := h.NewTaskMessageWithQueue("task3", nil, "custom")
m4 := h.NewTaskMessageWithQueue("task4", nil, "custom") m4 := h.NewTaskMessageWithQueue("task4", nil, "custom")
t1 := time.Now().Add(1 * time.Minute) now := time.Now()
t2 := time.Now().Add(1 * time.Hour) t1 := now.Add(1 * time.Minute)
t3 := time.Now().Add(2 * time.Hour) t2 := now.Add(1 * time.Hour)
t4 := time.Now().Add(3 * time.Hour) t3 := now.Add(2 * time.Hour)
t4 := now.Add(3 * time.Hour)
r.SetClock(timeutil.NewSimulatedClock(now))
tests := []struct { tests := []struct {
retry map[string][]base.Z retry map[string][]base.Z
@@ -2774,8 +2833,8 @@ func TestArchiveAllRetryTasks(t *testing.T) {
}, },
wantArchived: map[string][]base.Z{ wantArchived: map[string][]base.Z{
"default": { "default": {
{Message: m1, Score: time.Now().Unix()}, {Message: m1, Score: now.Unix()},
{Message: m2, Score: time.Now().Unix()}, {Message: m2, Score: now.Unix()},
}, },
}, },
}, },
@@ -2793,7 +2852,7 @@ func TestArchiveAllRetryTasks(t *testing.T) {
}, },
wantArchived: map[string][]base.Z{ wantArchived: map[string][]base.Z{
"default": { "default": {
{Message: m1, Score: time.Now().Unix()}, {Message: m1, Score: now.Unix()},
{Message: m2, Score: t2.Unix()}, {Message: m2, Score: t2.Unix()},
}, },
}, },
@@ -2847,8 +2906,8 @@ func TestArchiveAllRetryTasks(t *testing.T) {
wantArchived: map[string][]base.Z{ wantArchived: map[string][]base.Z{
"default": {}, "default": {},
"custom": { "custom": {
{Message: m3, Score: time.Now().Unix()}, {Message: m3, Score: now.Unix()},
{Message: m4, Score: time.Now().Unix()}, {Message: m4, Score: now.Unix()},
}, },
}, },
}, },
@@ -2868,7 +2927,7 @@ func TestArchiveAllRetryTasks(t *testing.T) {
for qname, want := range tc.wantRetry { for qname, want := range tc.wantRetry {
gotRetry := h.GetRetryEntries(t, r.client, qname) gotRetry := h.GetRetryEntries(t, r.client, qname)
if diff := cmp.Diff(want, gotRetry, h.SortZSetEntryOpt, zScoreCmpOpt); diff != "" { if diff := cmp.Diff(want, gotRetry, h.SortZSetEntryOpt); diff != "" {
t.Errorf("mismatch found in %q; (-want,+got)\n%s", t.Errorf("mismatch found in %q; (-want,+got)\n%s",
base.RetryKey(qname), diff) base.RetryKey(qname), diff)
} }
@@ -2876,7 +2935,7 @@ func TestArchiveAllRetryTasks(t *testing.T) {
for qname, want := range tc.wantArchived { for qname, want := range tc.wantArchived {
gotArchived := h.GetArchivedEntries(t, r.client, qname) gotArchived := h.GetArchivedEntries(t, r.client, qname)
if diff := cmp.Diff(want, gotArchived, h.SortZSetEntryOpt, zScoreCmpOpt); diff != "" { if diff := cmp.Diff(want, gotArchived, h.SortZSetEntryOpt); diff != "" {
t.Errorf("mismatch found in %q; (-want,+got)\n%s", t.Errorf("mismatch found in %q; (-want,+got)\n%s",
base.ArchivedKey(qname), diff) base.ArchivedKey(qname), diff)
} }
@@ -2891,10 +2950,13 @@ func TestArchiveAllScheduledTasks(t *testing.T) {
m2 := h.NewTaskMessage("task2", nil) m2 := h.NewTaskMessage("task2", nil)
m3 := h.NewTaskMessageWithQueue("task3", nil, "custom") m3 := h.NewTaskMessageWithQueue("task3", nil, "custom")
m4 := h.NewTaskMessageWithQueue("task4", nil, "custom") m4 := h.NewTaskMessageWithQueue("task4", nil, "custom")
t1 := time.Now().Add(time.Minute) now := time.Now()
t2 := time.Now().Add(time.Hour) t1 := now.Add(time.Minute)
t3 := time.Now().Add(time.Hour) t2 := now.Add(time.Hour)
t4 := time.Now().Add(time.Hour) t3 := now.Add(time.Hour)
t4 := now.Add(time.Hour)
r.SetClock(timeutil.NewSimulatedClock(now))
tests := []struct { tests := []struct {
scheduled map[string][]base.Z scheduled map[string][]base.Z
@@ -2921,8 +2983,8 @@ func TestArchiveAllScheduledTasks(t *testing.T) {
}, },
wantArchived: map[string][]base.Z{ wantArchived: map[string][]base.Z{
"default": { "default": {
{Message: m1, Score: time.Now().Unix()}, {Message: m1, Score: now.Unix()},
{Message: m2, Score: time.Now().Unix()}, {Message: m2, Score: now.Unix()},
}, },
}, },
}, },
@@ -2940,7 +3002,7 @@ func TestArchiveAllScheduledTasks(t *testing.T) {
}, },
wantArchived: map[string][]base.Z{ wantArchived: map[string][]base.Z{
"default": { "default": {
{Message: m1, Score: time.Now().Unix()}, {Message: m1, Score: now.Unix()},
{Message: m2, Score: t2.Unix()}, {Message: m2, Score: t2.Unix()},
}, },
}, },
@@ -2994,8 +3056,8 @@ func TestArchiveAllScheduledTasks(t *testing.T) {
wantArchived: map[string][]base.Z{ wantArchived: map[string][]base.Z{
"default": {}, "default": {},
"custom": { "custom": {
{Message: m3, Score: time.Now().Unix()}, {Message: m3, Score: now.Unix()},
{Message: m4, Score: time.Now().Unix()}, {Message: m4, Score: now.Unix()},
}, },
}, },
}, },
@@ -3015,7 +3077,7 @@ func TestArchiveAllScheduledTasks(t *testing.T) {
for qname, want := range tc.wantScheduled { for qname, want := range tc.wantScheduled {
gotScheduled := h.GetScheduledEntries(t, r.client, qname) gotScheduled := h.GetScheduledEntries(t, r.client, qname)
if diff := cmp.Diff(want, gotScheduled, h.SortZSetEntryOpt, zScoreCmpOpt); diff != "" { if diff := cmp.Diff(want, gotScheduled, h.SortZSetEntryOpt); diff != "" {
t.Errorf("mismatch found in %q; (-want,+got)\n%s", t.Errorf("mismatch found in %q; (-want,+got)\n%s",
base.ScheduledKey(qname), diff) base.ScheduledKey(qname), diff)
} }
@@ -3023,7 +3085,7 @@ func TestArchiveAllScheduledTasks(t *testing.T) {
for qname, want := range tc.wantArchived { for qname, want := range tc.wantArchived {
gotArchived := h.GetArchivedEntries(t, r.client, qname) gotArchived := h.GetArchivedEntries(t, r.client, qname)
if diff := cmp.Diff(want, gotArchived, h.SortZSetEntryOpt, zScoreCmpOpt); diff != "" { if diff := cmp.Diff(want, gotArchived, h.SortZSetEntryOpt); diff != "" {
t.Errorf("mismatch found in %q; (-want,+got)\n%s", t.Errorf("mismatch found in %q; (-want,+got)\n%s",
base.ArchivedKey(qname), diff) base.ArchivedKey(qname), diff)
} }
@@ -3974,7 +4036,7 @@ func TestRemoveQueue(t *testing.T) {
keys := []string{ keys := []string{
base.PendingKey(tc.qname), base.PendingKey(tc.qname),
base.ActiveKey(tc.qname), base.ActiveKey(tc.qname),
base.DeadlinesKey(tc.qname), base.LeaseKey(tc.qname),
base.ScheduledKey(tc.qname), base.ScheduledKey(tc.qname),
base.RetryKey(tc.qname), base.RetryKey(tc.qname),
base.ArchivedKey(tc.qname), base.ArchivedKey(tc.qname),

View File

@@ -8,24 +8,33 @@ package rdb
import ( import (
"context" "context"
"fmt" "fmt"
"math"
"time" "time"
"github.com/go-redis/redis/v8" "github.com/go-redis/redis/v8"
"github.com/hibiken/asynq/internal/base" "github.com/hibiken/asynq/internal/base"
"github.com/hibiken/asynq/internal/errors" "github.com/hibiken/asynq/internal/errors"
"github.com/hibiken/asynq/internal/timeutil"
"github.com/spf13/cast" "github.com/spf13/cast"
) )
const statsTTL = 90 * 24 * time.Hour // 90 days const statsTTL = 90 * 24 * time.Hour // 90 days
// LeaseDuration is the duration used to initially create a lease and to extend it thereafter.
const LeaseDuration = 30 * time.Second
// RDB is a client interface to query and mutate task queues. // RDB is a client interface to query and mutate task queues.
type RDB struct { type RDB struct {
client redis.UniversalClient client redis.UniversalClient
clock timeutil.Clock
} }
// NewRDB returns a new instance of RDB. // NewRDB returns a new instance of RDB.
func NewRDB(client redis.UniversalClient) *RDB { func NewRDB(client redis.UniversalClient) *RDB {
return &RDB{client} return &RDB{
client: client,
clock: timeutil.NewRealClock(),
}
} }
// Close closes the connection with redis server. // Close closes the connection with redis server.
@@ -38,21 +47,28 @@ func (r *RDB) Client() redis.UniversalClient {
return r.client return r.client
} }
// SetClock sets the clock used by RDB to the given clock.
//
// Use this function to set the clock to SimulatedClock in tests.
func (r *RDB) SetClock(c timeutil.Clock) {
r.clock = c
}
// Ping checks the connection with redis server. // Ping checks the connection with redis server.
func (r *RDB) Ping() error { func (r *RDB) Ping() error {
return r.client.Ping(context.Background()).Err() return r.client.Ping(context.Background()).Err()
} }
func (r *RDB) runScript(op errors.Op, script *redis.Script, keys []string, args ...interface{}) error { func (r *RDB) runScript(ctx context.Context, op errors.Op, script *redis.Script, keys []string, args ...interface{}) error {
if err := script.Run(context.Background(), r.client, keys, args...).Err(); err != nil { if err := script.Run(ctx, r.client, keys, args...).Err(); err != nil {
return errors.E(op, errors.Internal, fmt.Sprintf("redis eval error: %v", err)) return errors.E(op, errors.Internal, fmt.Sprintf("redis eval error: %v", err))
} }
return nil return nil
} }
// Runs the given script with keys and args and retuns the script's return value as int64. // Runs the given script with keys and args and retuns the script's return value as int64.
func (r *RDB) runScriptWithErrorCode(op errors.Op, script *redis.Script, keys []string, args ...interface{}) (int64, error) { func (r *RDB) runScriptWithErrorCode(ctx context.Context, op errors.Op, script *redis.Script, keys []string, args ...interface{}) (int64, error) {
res, err := script.Run(context.Background(), r.client, keys, args...).Result() res, err := script.Run(ctx, r.client, keys, args...).Result()
if err != nil { if err != nil {
return 0, errors.E(op, errors.Unknown, fmt.Sprintf("redis eval error: %v", err)) return 0, errors.E(op, errors.Unknown, fmt.Sprintf("redis eval error: %v", err))
} }
@@ -71,8 +87,7 @@ func (r *RDB) runScriptWithErrorCode(op errors.Op, script *redis.Script, keys []
// -- // --
// ARGV[1] -> task message data // ARGV[1] -> task message data
// ARGV[2] -> task ID // ARGV[2] -> task ID
// ARGV[3] -> task timeout in seconds (0 if not timeout) // ARGV[3] -> current unix time in nsec
// ARGV[4] -> task deadline in unix time (0 if no deadline)
// //
// Output: // Output:
// Returns 1 if successfully enqueued // Returns 1 if successfully enqueued
@@ -84,20 +99,19 @@ end
redis.call("HSET", KEYS[1], redis.call("HSET", KEYS[1],
"msg", ARGV[1], "msg", ARGV[1],
"state", "pending", "state", "pending",
"timeout", ARGV[3], "pending_since", ARGV[3])
"deadline", ARGV[4])
redis.call("LPUSH", KEYS[2], ARGV[2]) redis.call("LPUSH", KEYS[2], ARGV[2])
return 1 return 1
`) `)
// Enqueue adds the given task to the pending list of the queue. // Enqueue adds the given task to the pending list of the queue.
func (r *RDB) Enqueue(msg *base.TaskMessage) error { func (r *RDB) Enqueue(ctx context.Context, msg *base.TaskMessage) error {
var op errors.Op = "rdb.Enqueue" var op errors.Op = "rdb.Enqueue"
encoded, err := base.EncodeMessage(msg) encoded, err := base.EncodeMessage(msg)
if err != nil { if err != nil {
return errors.E(op, errors.Unknown, fmt.Sprintf("cannot encode message: %v", err)) return errors.E(op, errors.Unknown, fmt.Sprintf("cannot encode message: %v", err))
} }
if err := r.client.SAdd(context.Background(), base.AllQueues, msg.Queue).Err(); err != nil { if err := r.client.SAdd(ctx, base.AllQueues, msg.Queue).Err(); err != nil {
return errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "sadd", Err: err}) return errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "sadd", Err: err})
} }
keys := []string{ keys := []string{
@@ -107,10 +121,9 @@ func (r *RDB) Enqueue(msg *base.TaskMessage) error {
argv := []interface{}{ argv := []interface{}{
encoded, encoded,
msg.ID, msg.ID,
msg.Timeout, r.clock.Now().UnixNano(),
msg.Deadline,
} }
n, err := r.runScriptWithErrorCode(op, enqueueCmd, keys, argv...) n, err := r.runScriptWithErrorCode(ctx, op, enqueueCmd, keys, argv...)
if err != nil { if err != nil {
return err return err
} }
@@ -129,8 +142,7 @@ func (r *RDB) Enqueue(msg *base.TaskMessage) error {
// ARGV[1] -> task ID // ARGV[1] -> task ID
// ARGV[2] -> uniqueness lock TTL // ARGV[2] -> uniqueness lock TTL
// ARGV[3] -> task message data // ARGV[3] -> task message data
// ARGV[4] -> task timeout in seconds (0 if not timeout) // ARGV[4] -> current unix time in nsec
// ARGV[5] -> task deadline in unix time (0 if no deadline)
// //
// Output: // Output:
// Returns 1 if successfully enqueued // Returns 1 if successfully enqueued
@@ -147,8 +159,7 @@ end
redis.call("HSET", KEYS[2], redis.call("HSET", KEYS[2],
"msg", ARGV[3], "msg", ARGV[3],
"state", "pending", "state", "pending",
"timeout", ARGV[4], "pending_since", ARGV[4],
"deadline", ARGV[5],
"unique_key", KEYS[1]) "unique_key", KEYS[1])
redis.call("LPUSH", KEYS[3], ARGV[1]) redis.call("LPUSH", KEYS[3], ARGV[1])
return 1 return 1
@@ -156,13 +167,13 @@ return 1
// EnqueueUnique inserts the given task if the task's uniqueness lock can be acquired. // EnqueueUnique inserts the given task if the task's uniqueness lock can be acquired.
// It returns ErrDuplicateTask if the lock cannot be acquired. // It returns ErrDuplicateTask if the lock cannot be acquired.
func (r *RDB) EnqueueUnique(msg *base.TaskMessage, ttl time.Duration) error { func (r *RDB) EnqueueUnique(ctx context.Context, msg *base.TaskMessage, ttl time.Duration) error {
var op errors.Op = "rdb.EnqueueUnique" var op errors.Op = "rdb.EnqueueUnique"
encoded, err := base.EncodeMessage(msg) encoded, err := base.EncodeMessage(msg)
if err != nil { if err != nil {
return errors.E(op, errors.Internal, "cannot encode task message: %v", err) return errors.E(op, errors.Internal, "cannot encode task message: %v", err)
} }
if err := r.client.SAdd(context.Background(), base.AllQueues, msg.Queue).Err(); err != nil { if err := r.client.SAdd(ctx, base.AllQueues, msg.Queue).Err(); err != nil {
return errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "sadd", Err: err}) return errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "sadd", Err: err})
} }
keys := []string{ keys := []string{
@@ -174,10 +185,9 @@ func (r *RDB) EnqueueUnique(msg *base.TaskMessage, ttl time.Duration) error {
msg.ID, msg.ID,
int(ttl.Seconds()), int(ttl.Seconds()),
encoded, encoded,
msg.Timeout, r.clock.Now().UnixNano(),
msg.Deadline,
} }
n, err := r.runScriptWithErrorCode(op, enqueueUniqueCmd, keys, argv...) n, err := r.runScriptWithErrorCode(ctx, op, enqueueUniqueCmd, keys, argv...)
if err != nil { if err != nil {
return err return err
} }
@@ -194,61 +204,46 @@ func (r *RDB) EnqueueUnique(msg *base.TaskMessage, ttl time.Duration) error {
// KEYS[1] -> asynq:{<qname>}:pending // KEYS[1] -> asynq:{<qname>}:pending
// KEYS[2] -> asynq:{<qname>}:paused // KEYS[2] -> asynq:{<qname>}:paused
// KEYS[3] -> asynq:{<qname>}:active // KEYS[3] -> asynq:{<qname>}:active
// KEYS[4] -> asynq:{<qname>}:deadlines // KEYS[4] -> asynq:{<qname>}:lease
// -- // --
// ARGV[1] -> current time in Unix time // ARGV[1] -> initial lease expiration Unix time
// ARGV[2] -> task key prefix // ARGV[2] -> task key prefix
// //
// Output: // Output:
// Returns nil if no processable task is found in the given queue. // Returns nil if no processable task is found in the given queue.
// Returns tuple {msg , deadline} if task is found, where `msg` is the encoded // Returns an encoded TaskMessage.
// TaskMessage, and `deadline` is Unix time in seconds.
// //
// Note: dequeueCmd checks whether a queue is paused first, before // Note: dequeueCmd checks whether a queue is paused first, before
// calling RPOPLPUSH to pop a task from the queue. // calling RPOPLPUSH to pop a task from the queue.
// It computes the task deadline by inspecting Timout and Deadline fields,
// and inserts the task to the deadlines zset with the computed deadline.
var dequeueCmd = redis.NewScript(` var dequeueCmd = redis.NewScript(`
if redis.call("EXISTS", KEYS[2]) == 0 then if redis.call("EXISTS", KEYS[2]) == 0 then
local id = redis.call("RPOPLPUSH", KEYS[1], KEYS[3]) local id = redis.call("RPOPLPUSH", KEYS[1], KEYS[3])
if id then if id then
local key = ARGV[2] .. id local key = ARGV[2] .. id
redis.call("HSET", key, "state", "active") redis.call("HSET", key, "state", "active")
local data = redis.call("HMGET", key, "msg", "timeout", "deadline") redis.call("HDEL", key, "pending_since")
local msg = data[1] redis.call("ZADD", KEYS[4], ARGV[1], id)
local timeout = tonumber(data[2]) return redis.call("HGET", key, "msg")
local deadline = tonumber(data[3])
local score
if timeout ~= 0 and deadline ~= 0 then
score = math.min(ARGV[1]+timeout, deadline)
elseif timeout ~= 0 then
score = ARGV[1] + timeout
elseif deadline ~= 0 then
score = deadline
else
return redis.error_reply("asynq internal error: both timeout and deadline are not set")
end
redis.call("ZADD", KEYS[4], score, id)
return {msg, score}
end end
end end
return nil`) return nil`)
// Dequeue queries given queues in order and pops a task message // Dequeue queries given queues in order and pops a task message
// off a queue if one exists and returns the message and deadline. // off a queue if one exists and returns the message and its lease expiration time.
// Dequeue skips a queue if the queue is paused. // Dequeue skips a queue if the queue is paused.
// If all queues are empty, ErrNoProcessableTask error is returned. // If all queues are empty, ErrNoProcessableTask error is returned.
func (r *RDB) Dequeue(qnames ...string) (msg *base.TaskMessage, deadline time.Time, err error) { func (r *RDB) Dequeue(qnames ...string) (msg *base.TaskMessage, leaseExpirationTime time.Time, err error) {
var op errors.Op = "rdb.Dequeue" var op errors.Op = "rdb.Dequeue"
for _, qname := range qnames { for _, qname := range qnames {
keys := []string{ keys := []string{
base.PendingKey(qname), base.PendingKey(qname),
base.PausedKey(qname), base.PausedKey(qname),
base.ActiveKey(qname), base.ActiveKey(qname),
base.DeadlinesKey(qname), base.LeaseKey(qname),
} }
leaseExpirationTime = r.clock.Now().Add(LeaseDuration)
argv := []interface{}{ argv := []interface{}{
time.Now().Unix(), leaseExpirationTime.Unix(),
base.TaskKeyPrefix(qname), base.TaskKeyPrefix(qname),
} }
res, err := dequeueCmd.Run(context.Background(), r.client, keys, argv...).Result() res, err := dequeueCmd.Run(context.Background(), r.client, keys, argv...).Result()
@@ -257,35 +252,27 @@ func (r *RDB) Dequeue(qnames ...string) (msg *base.TaskMessage, deadline time.Ti
} else if err != nil { } else if err != nil {
return nil, time.Time{}, errors.E(op, errors.Unknown, fmt.Sprintf("redis eval error: %v", err)) return nil, time.Time{}, errors.E(op, errors.Unknown, fmt.Sprintf("redis eval error: %v", err))
} }
data, err := cast.ToSliceE(res) encoded, err := cast.ToStringE(res)
if err != nil {
return nil, time.Time{}, errors.E(op, errors.Internal, fmt.Sprintf("cast error: unexpected return value from Lua script: %v", res))
}
if len(data) != 2 {
return nil, time.Time{}, errors.E(op, errors.Internal, fmt.Sprintf("Lua script returned %d values; expected 2", len(data)))
}
encoded, err := cast.ToStringE(data[0])
if err != nil {
return nil, time.Time{}, errors.E(op, errors.Internal, fmt.Sprintf("cast error: unexpected return value from Lua script: %v", res))
}
d, err := cast.ToInt64E(data[1])
if err != nil { if err != nil {
return nil, time.Time{}, errors.E(op, errors.Internal, fmt.Sprintf("cast error: unexpected return value from Lua script: %v", res)) return nil, time.Time{}, errors.E(op, errors.Internal, fmt.Sprintf("cast error: unexpected return value from Lua script: %v", res))
} }
if msg, err = base.DecodeMessage([]byte(encoded)); err != nil { if msg, err = base.DecodeMessage([]byte(encoded)); err != nil {
return nil, time.Time{}, errors.E(op, errors.Internal, fmt.Sprintf("cannot decode message: %v", err)) return nil, time.Time{}, errors.E(op, errors.Internal, fmt.Sprintf("cannot decode message: %v", err))
} }
return msg, time.Unix(d, 0), nil return msg, leaseExpirationTime, nil
} }
return nil, time.Time{}, errors.E(op, errors.NotFound, errors.ErrNoProcessableTask) return nil, time.Time{}, errors.E(op, errors.NotFound, errors.ErrNoProcessableTask)
} }
// KEYS[1] -> asynq:{<qname>}:active // KEYS[1] -> asynq:{<qname>}:active
// KEYS[2] -> asynq:{<qname>}:deadlines // KEYS[2] -> asynq:{<qname>}:lease
// KEYS[3] -> asynq:{<qname>}:t:<task_id> // KEYS[3] -> asynq:{<qname>}:t:<task_id>
// KEYS[4] -> asynq:{<qname>}:processed:<yyyy-mm-dd> // KEYS[4] -> asynq:{<qname>}:processed:<yyyy-mm-dd>
// KEYS[5] -> asynq:{<qname>}:processed
// -------
// ARGV[1] -> task ID // ARGV[1] -> task ID
// ARGV[2] -> stats expiration timestamp // ARGV[2] -> stats expiration timestamp
// ARGV[3] -> max int64 value
var doneCmd = redis.NewScript(` var doneCmd = redis.NewScript(`
if redis.call("LREM", KEYS[1], 0, ARGV[1]) == 0 then if redis.call("LREM", KEYS[1], 0, ARGV[1]) == 0 then
return redis.error_reply("NOT FOUND") return redis.error_reply("NOT FOUND")
@@ -300,16 +287,25 @@ local n = redis.call("INCR", KEYS[4])
if tonumber(n) == 1 then if tonumber(n) == 1 then
redis.call("EXPIREAT", KEYS[4], ARGV[2]) redis.call("EXPIREAT", KEYS[4], ARGV[2])
end end
local total = redis.call("GET", KEYS[5])
if tonumber(total) == tonumber(ARGV[3]) then
redis.call("SET", KEYS[5], 1)
else
redis.call("INCR", KEYS[5])
end
return redis.status_reply("OK") return redis.status_reply("OK")
`) `)
// KEYS[1] -> asynq:{<qname>}:active // KEYS[1] -> asynq:{<qname>}:active
// KEYS[2] -> asynq:{<qname>}:deadlines // KEYS[2] -> asynq:{<qname>}:lease
// KEYS[3] -> asynq:{<qname>}:t:<task_id> // KEYS[3] -> asynq:{<qname>}:t:<task_id>
// KEYS[4] -> asynq:{<qname>}:processed:<yyyy-mm-dd> // KEYS[4] -> asynq:{<qname>}:processed:<yyyy-mm-dd>
// KEYS[5] -> unique key // KEYS[5] -> asynq:{<qname>}:processed
// KEYS[6] -> unique key
// -------
// ARGV[1] -> task ID // ARGV[1] -> task ID
// ARGV[2] -> stats expiration timestamp // ARGV[2] -> stats expiration timestamp
// ARGV[3] -> max int64 value
var doneUniqueCmd = redis.NewScript(` var doneUniqueCmd = redis.NewScript(`
if redis.call("LREM", KEYS[1], 0, ARGV[1]) == 0 then if redis.call("LREM", KEYS[1], 0, ARGV[1]) == 0 then
return redis.error_reply("NOT FOUND") return redis.error_reply("NOT FOUND")
@@ -324,45 +320,56 @@ local n = redis.call("INCR", KEYS[4])
if tonumber(n) == 1 then if tonumber(n) == 1 then
redis.call("EXPIREAT", KEYS[4], ARGV[2]) redis.call("EXPIREAT", KEYS[4], ARGV[2])
end end
if redis.call("GET", KEYS[5]) == ARGV[1] then local total = redis.call("GET", KEYS[5])
redis.call("DEL", KEYS[5]) if tonumber(total) == tonumber(ARGV[3]) then
redis.call("SET", KEYS[5], 1)
else
redis.call("INCR", KEYS[5])
end
if redis.call("GET", KEYS[6]) == ARGV[1] then
redis.call("DEL", KEYS[6])
end end
return redis.status_reply("OK") return redis.status_reply("OK")
`) `)
// Done removes the task from active queue and deletes the task. // Done removes the task from active queue and deletes the task.
// It removes a uniqueness lock acquired by the task, if any. // It removes a uniqueness lock acquired by the task, if any.
func (r *RDB) Done(msg *base.TaskMessage) error { func (r *RDB) Done(ctx context.Context, msg *base.TaskMessage) error {
var op errors.Op = "rdb.Done" var op errors.Op = "rdb.Done"
now := time.Now() now := r.clock.Now()
expireAt := now.Add(statsTTL) expireAt := now.Add(statsTTL)
keys := []string{ keys := []string{
base.ActiveKey(msg.Queue), base.ActiveKey(msg.Queue),
base.DeadlinesKey(msg.Queue), base.LeaseKey(msg.Queue),
base.TaskKey(msg.Queue, msg.ID), base.TaskKey(msg.Queue, msg.ID),
base.ProcessedKey(msg.Queue, now), base.ProcessedKey(msg.Queue, now),
base.ProcessedTotalKey(msg.Queue),
} }
argv := []interface{}{ argv := []interface{}{
msg.ID, msg.ID,
expireAt.Unix(), expireAt.Unix(),
math.MaxInt64,
} }
// Note: We cannot pass empty unique key when running this script in redis-cluster. // Note: We cannot pass empty unique key when running this script in redis-cluster.
if len(msg.UniqueKey) > 0 { if len(msg.UniqueKey) > 0 {
keys = append(keys, msg.UniqueKey) keys = append(keys, msg.UniqueKey)
return r.runScript(op, doneUniqueCmd, keys, argv...) return r.runScript(ctx, op, doneUniqueCmd, keys, argv...)
} }
return r.runScript(op, doneCmd, keys, argv...) return r.runScript(ctx, op, doneCmd, keys, argv...)
} }
// KEYS[1] -> asynq:{<qname>}:active // KEYS[1] -> asynq:{<qname>}:active
// KEYS[2] -> asynq:{<qname>}:deadlines // KEYS[2] -> asynq:{<qname>}:lease
// KEYS[3] -> asynq:{<qname>}:completed // KEYS[3] -> asynq:{<qname>}:completed
// KEYS[4] -> asynq:{<qname>}:t:<task_id> // KEYS[4] -> asynq:{<qname>}:t:<task_id>
// KEYS[5] -> asynq:{<qname>}:processed:<yyyy-mm-dd> // KEYS[5] -> asynq:{<qname>}:processed:<yyyy-mm-dd>
// KEYS[6] -> asynq:{<qname>}:processed
//
// ARGV[1] -> task ID // ARGV[1] -> task ID
// ARGV[2] -> stats expiration timestamp // ARGV[2] -> stats expiration timestamp
// ARGV[3] -> task exipration time in unix time // ARGV[3] -> task exipration time in unix time
// ARGV[4] -> task message data // ARGV[4] -> task message data
// ARGV[5] -> max int64 value
var markAsCompleteCmd = redis.NewScript(` var markAsCompleteCmd = redis.NewScript(`
if redis.call("LREM", KEYS[1], 0, ARGV[1]) == 0 then if redis.call("LREM", KEYS[1], 0, ARGV[1]) == 0 then
return redis.error_reply("NOT FOUND") return redis.error_reply("NOT FOUND")
@@ -378,19 +385,28 @@ local n = redis.call("INCR", KEYS[5])
if tonumber(n) == 1 then if tonumber(n) == 1 then
redis.call("EXPIREAT", KEYS[5], ARGV[2]) redis.call("EXPIREAT", KEYS[5], ARGV[2])
end end
local total = redis.call("GET", KEYS[6])
if tonumber(total) == tonumber(ARGV[5]) then
redis.call("SET", KEYS[6], 1)
else
redis.call("INCR", KEYS[6])
end
return redis.status_reply("OK") return redis.status_reply("OK")
`) `)
// KEYS[1] -> asynq:{<qname>}:active // KEYS[1] -> asynq:{<qname>}:active
// KEYS[2] -> asynq:{<qname>}:deadlines // KEYS[2] -> asynq:{<qname>}:lease
// KEYS[3] -> asynq:{<qname>}:completed // KEYS[3] -> asynq:{<qname>}:completed
// KEYS[4] -> asynq:{<qname>}:t:<task_id> // KEYS[4] -> asynq:{<qname>}:t:<task_id>
// KEYS[5] -> asynq:{<qname>}:processed:<yyyy-mm-dd> // KEYS[5] -> asynq:{<qname>}:processed:<yyyy-mm-dd>
// KEYS[6] -> asynq:{<qname>}:unique:{<checksum>} // KEYS[6] -> asynq:{<qname>}:processed
// KEYS[7] -> asynq:{<qname>}:unique:{<checksum>}
//
// ARGV[1] -> task ID // ARGV[1] -> task ID
// ARGV[2] -> stats expiration timestamp // ARGV[2] -> stats expiration timestamp
// ARGV[3] -> task exipration time in unix time // ARGV[3] -> task exipration time in unix time
// ARGV[4] -> task message data // ARGV[4] -> task message data
// ARGV[5] -> max int64 value
var markAsCompleteUniqueCmd = redis.NewScript(` var markAsCompleteUniqueCmd = redis.NewScript(`
if redis.call("LREM", KEYS[1], 0, ARGV[1]) == 0 then if redis.call("LREM", KEYS[1], 0, ARGV[1]) == 0 then
return redis.error_reply("NOT FOUND") return redis.error_reply("NOT FOUND")
@@ -406,17 +422,23 @@ local n = redis.call("INCR", KEYS[5])
if tonumber(n) == 1 then if tonumber(n) == 1 then
redis.call("EXPIREAT", KEYS[5], ARGV[2]) redis.call("EXPIREAT", KEYS[5], ARGV[2])
end end
if redis.call("GET", KEYS[6]) == ARGV[1] then local total = redis.call("GET", KEYS[6])
redis.call("DEL", KEYS[6]) if tonumber(total) == tonumber(ARGV[5]) then
redis.call("SET", KEYS[6], 1)
else
redis.call("INCR", KEYS[6])
end
if redis.call("GET", KEYS[7]) == ARGV[1] then
redis.call("DEL", KEYS[7])
end end
return redis.status_reply("OK") return redis.status_reply("OK")
`) `)
// MarkAsComplete removes the task from active queue to mark the task as completed. // MarkAsComplete removes the task from active queue to mark the task as completed.
// It removes a uniqueness lock acquired by the task, if any. // It removes a uniqueness lock acquired by the task, if any.
func (r *RDB) MarkAsComplete(msg *base.TaskMessage) error { func (r *RDB) MarkAsComplete(ctx context.Context, msg *base.TaskMessage) error {
var op errors.Op = "rdb.MarkAsComplete" var op errors.Op = "rdb.MarkAsComplete"
now := time.Now() now := r.clock.Now()
statsExpireAt := now.Add(statsTTL) statsExpireAt := now.Add(statsTTL)
msg.CompletedAt = now.Unix() msg.CompletedAt = now.Unix()
encoded, err := base.EncodeMessage(msg) encoded, err := base.EncodeMessage(msg)
@@ -425,27 +447,29 @@ func (r *RDB) MarkAsComplete(msg *base.TaskMessage) error {
} }
keys := []string{ keys := []string{
base.ActiveKey(msg.Queue), base.ActiveKey(msg.Queue),
base.DeadlinesKey(msg.Queue), base.LeaseKey(msg.Queue),
base.CompletedKey(msg.Queue), base.CompletedKey(msg.Queue),
base.TaskKey(msg.Queue, msg.ID), base.TaskKey(msg.Queue, msg.ID),
base.ProcessedKey(msg.Queue, now), base.ProcessedKey(msg.Queue, now),
base.ProcessedTotalKey(msg.Queue),
} }
argv := []interface{}{ argv := []interface{}{
msg.ID, msg.ID,
statsExpireAt.Unix(), statsExpireAt.Unix(),
now.Unix() + msg.Retention, now.Unix() + msg.Retention,
encoded, encoded,
math.MaxInt64,
} }
// Note: We cannot pass empty unique key when running this script in redis-cluster. // Note: We cannot pass empty unique key when running this script in redis-cluster.
if len(msg.UniqueKey) > 0 { if len(msg.UniqueKey) > 0 {
keys = append(keys, msg.UniqueKey) keys = append(keys, msg.UniqueKey)
return r.runScript(op, markAsCompleteUniqueCmd, keys, argv...) return r.runScript(ctx, op, markAsCompleteUniqueCmd, keys, argv...)
} }
return r.runScript(op, markAsCompleteCmd, keys, argv...) return r.runScript(ctx, op, markAsCompleteCmd, keys, argv...)
} }
// KEYS[1] -> asynq:{<qname>}:active // KEYS[1] -> asynq:{<qname>}:active
// KEYS[2] -> asynq:{<qname>}:deadlines // KEYS[2] -> asynq:{<qname>}:lease
// KEYS[3] -> asynq:{<qname>}:pending // KEYS[3] -> asynq:{<qname>}:pending
// KEYS[4] -> asynq:{<qname>}:t:<task_id> // KEYS[4] -> asynq:{<qname>}:t:<task_id>
// ARGV[1] -> task ID // ARGV[1] -> task ID
@@ -462,24 +486,23 @@ redis.call("HSET", KEYS[4], "state", "pending")
return redis.status_reply("OK")`) return redis.status_reply("OK")`)
// Requeue moves the task from active queue to the specified queue. // Requeue moves the task from active queue to the specified queue.
func (r *RDB) Requeue(msg *base.TaskMessage) error { func (r *RDB) Requeue(ctx context.Context, msg *base.TaskMessage) error {
var op errors.Op = "rdb.Requeue" var op errors.Op = "rdb.Requeue"
keys := []string{ keys := []string{
base.ActiveKey(msg.Queue), base.ActiveKey(msg.Queue),
base.DeadlinesKey(msg.Queue), base.LeaseKey(msg.Queue),
base.PendingKey(msg.Queue), base.PendingKey(msg.Queue),
base.TaskKey(msg.Queue, msg.ID), base.TaskKey(msg.Queue, msg.ID),
} }
return r.runScript(op, requeueCmd, keys, msg.ID) return r.runScript(ctx, op, requeueCmd, keys, msg.ID)
} }
// KEYS[1] -> asynq:{<qname>}:t:<task_id> // KEYS[1] -> asynq:{<qname>}:t:<task_id>
// KEYS[2] -> asynq:{<qname>}:scheduled // KEYS[2] -> asynq:{<qname>}:scheduled
// -------
// ARGV[1] -> task message data // ARGV[1] -> task message data
// ARGV[2] -> process_at time in Unix time // ARGV[2] -> process_at time in Unix time
// ARGV[3] -> task ID // ARGV[3] -> task ID
// ARGV[4] -> task timeout in seconds (0 if not timeout)
// ARGV[5] -> task deadline in unix time (0 if no deadline)
// //
// Output: // Output:
// Returns 1 if successfully enqueued // Returns 1 if successfully enqueued
@@ -490,21 +513,19 @@ if redis.call("EXISTS", KEYS[1]) == 1 then
end end
redis.call("HSET", KEYS[1], redis.call("HSET", KEYS[1],
"msg", ARGV[1], "msg", ARGV[1],
"state", "scheduled", "state", "scheduled")
"timeout", ARGV[4],
"deadline", ARGV[5])
redis.call("ZADD", KEYS[2], ARGV[2], ARGV[3]) redis.call("ZADD", KEYS[2], ARGV[2], ARGV[3])
return 1 return 1
`) `)
// Schedule adds the task to the scheduled set to be processed in the future. // Schedule adds the task to the scheduled set to be processed in the future.
func (r *RDB) Schedule(msg *base.TaskMessage, processAt time.Time) error { func (r *RDB) Schedule(ctx context.Context, msg *base.TaskMessage, processAt time.Time) error {
var op errors.Op = "rdb.Schedule" var op errors.Op = "rdb.Schedule"
encoded, err := base.EncodeMessage(msg) encoded, err := base.EncodeMessage(msg)
if err != nil { if err != nil {
return errors.E(op, errors.Unknown, fmt.Sprintf("cannot encode message: %v", err)) return errors.E(op, errors.Unknown, fmt.Sprintf("cannot encode message: %v", err))
} }
if err := r.client.SAdd(context.Background(), base.AllQueues, msg.Queue).Err(); err != nil { if err := r.client.SAdd(ctx, base.AllQueues, msg.Queue).Err(); err != nil {
return errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "sadd", Err: err}) return errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "sadd", Err: err})
} }
keys := []string{ keys := []string{
@@ -515,10 +536,8 @@ func (r *RDB) Schedule(msg *base.TaskMessage, processAt time.Time) error {
encoded, encoded,
processAt.Unix(), processAt.Unix(),
msg.ID, msg.ID,
msg.Timeout,
msg.Deadline,
} }
n, err := r.runScriptWithErrorCode(op, scheduleCmd, keys, argv...) n, err := r.runScriptWithErrorCode(ctx, op, scheduleCmd, keys, argv...)
if err != nil { if err != nil {
return err return err
} }
@@ -531,12 +550,11 @@ func (r *RDB) Schedule(msg *base.TaskMessage, processAt time.Time) error {
// KEYS[1] -> unique key // KEYS[1] -> unique key
// KEYS[2] -> asynq:{<qname>}:t:<task_id> // KEYS[2] -> asynq:{<qname>}:t:<task_id>
// KEYS[3] -> asynq:{<qname>}:scheduled // KEYS[3] -> asynq:{<qname>}:scheduled
// -------
// ARGV[1] -> task ID // ARGV[1] -> task ID
// ARGV[2] -> uniqueness lock TTL // ARGV[2] -> uniqueness lock TTL
// ARGV[3] -> score (process_at timestamp) // ARGV[3] -> score (process_at timestamp)
// ARGV[4] -> task message // ARGV[4] -> task message
// ARGV[5] -> task timeout in seconds (0 if not timeout)
// ARGV[6] -> task deadline in unix time (0 if no deadline)
// //
// Output: // Output:
// Returns 1 if successfully scheduled // Returns 1 if successfully scheduled
@@ -553,8 +571,6 @@ end
redis.call("HSET", KEYS[2], redis.call("HSET", KEYS[2],
"msg", ARGV[4], "msg", ARGV[4],
"state", "scheduled", "state", "scheduled",
"timeout", ARGV[5],
"deadline", ARGV[6],
"unique_key", KEYS[1]) "unique_key", KEYS[1])
redis.call("ZADD", KEYS[3], ARGV[3], ARGV[1]) redis.call("ZADD", KEYS[3], ARGV[3], ARGV[1])
return 1 return 1
@@ -562,13 +578,13 @@ return 1
// ScheduleUnique adds the task to the backlog queue to be processed in the future if the uniqueness lock can be acquired. // ScheduleUnique adds the task to the backlog queue to be processed in the future if the uniqueness lock can be acquired.
// It returns ErrDuplicateTask if the lock cannot be acquired. // It returns ErrDuplicateTask if the lock cannot be acquired.
func (r *RDB) ScheduleUnique(msg *base.TaskMessage, processAt time.Time, ttl time.Duration) error { func (r *RDB) ScheduleUnique(ctx context.Context, msg *base.TaskMessage, processAt time.Time, ttl time.Duration) error {
var op errors.Op = "rdb.ScheduleUnique" var op errors.Op = "rdb.ScheduleUnique"
encoded, err := base.EncodeMessage(msg) encoded, err := base.EncodeMessage(msg)
if err != nil { if err != nil {
return errors.E(op, errors.Internal, fmt.Sprintf("cannot encode task message: %v", err)) return errors.E(op, errors.Internal, fmt.Sprintf("cannot encode task message: %v", err))
} }
if err := r.client.SAdd(context.Background(), base.AllQueues, msg.Queue).Err(); err != nil { if err := r.client.SAdd(ctx, base.AllQueues, msg.Queue).Err(); err != nil {
return errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "sadd", Err: err}) return errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "sadd", Err: err})
} }
keys := []string{ keys := []string{
@@ -581,10 +597,8 @@ func (r *RDB) ScheduleUnique(msg *base.TaskMessage, processAt time.Time, ttl tim
int(ttl.Seconds()), int(ttl.Seconds()),
processAt.Unix(), processAt.Unix(),
encoded, encoded,
msg.Timeout,
msg.Deadline,
} }
n, err := r.runScriptWithErrorCode(op, scheduleUniqueCmd, keys, argv...) n, err := r.runScriptWithErrorCode(ctx, op, scheduleUniqueCmd, keys, argv...)
if err != nil { if err != nil {
return err return err
} }
@@ -599,15 +613,19 @@ func (r *RDB) ScheduleUnique(msg *base.TaskMessage, processAt time.Time, ttl tim
// KEYS[1] -> asynq:{<qname>}:t:<task_id> // KEYS[1] -> asynq:{<qname>}:t:<task_id>
// KEYS[2] -> asynq:{<qname>}:active // KEYS[2] -> asynq:{<qname>}:active
// KEYS[3] -> asynq:{<qname>}:deadlines // KEYS[3] -> asynq:{<qname>}:lease
// KEYS[4] -> asynq:{<qname>}:retry // KEYS[4] -> asynq:{<qname>}:retry
// KEYS[5] -> asynq:{<qname>}:processed:<yyyy-mm-dd> // KEYS[5] -> asynq:{<qname>}:processed:<yyyy-mm-dd>
// KEYS[6] -> asynq:{<qname>}:failed:<yyyy-mm-dd> // KEYS[6] -> asynq:{<qname>}:failed:<yyyy-mm-dd>
// KEYS[7] -> asynq:{<qname>}:processed
// KEYS[8] -> asynq:{<qname>}:failed
// -------
// ARGV[1] -> task ID // ARGV[1] -> task ID
// ARGV[2] -> updated base.TaskMessage value // ARGV[2] -> updated base.TaskMessage value
// ARGV[3] -> retry_at UNIX timestamp // ARGV[3] -> retry_at UNIX timestamp
// ARGV[4] -> stats expiration timestamp // ARGV[4] -> stats expiration timestamp
// ARGV[5] -> is_failure (bool) // ARGV[5] -> is_failure (bool)
// ARGV[6] -> max int64 value
var retryCmd = redis.NewScript(` var retryCmd = redis.NewScript(`
if redis.call("LREM", KEYS[2], 0, ARGV[1]) == 0 then if redis.call("LREM", KEYS[2], 0, ARGV[1]) == 0 then
return redis.error_reply("NOT FOUND") return redis.error_reply("NOT FOUND")
@@ -626,15 +644,23 @@ if tonumber(ARGV[5]) == 1 then
if tonumber(m) == 1 then if tonumber(m) == 1 then
redis.call("EXPIREAT", KEYS[6], ARGV[4]) redis.call("EXPIREAT", KEYS[6], ARGV[4])
end end
local total = redis.call("GET", KEYS[7])
if tonumber(total) == tonumber(ARGV[6]) then
redis.call("SET", KEYS[7], 1)
redis.call("SET", KEYS[8], 1)
else
redis.call("INCR", KEYS[7])
redis.call("INCR", KEYS[8])
end
end end
return redis.status_reply("OK")`) return redis.status_reply("OK")`)
// Retry moves the task from active to retry queue. // Retry moves the task from active to retry queue.
// It also annotates the message with the given error message and // It also annotates the message with the given error message and
// if isFailure is true increments the retried counter. // if isFailure is true increments the retried counter.
func (r *RDB) Retry(msg *base.TaskMessage, processAt time.Time, errMsg string, isFailure bool) error { func (r *RDB) Retry(ctx context.Context, msg *base.TaskMessage, processAt time.Time, errMsg string, isFailure bool) error {
var op errors.Op = "rdb.Retry" var op errors.Op = "rdb.Retry"
now := time.Now() now := r.clock.Now()
modified := *msg modified := *msg
if isFailure { if isFailure {
modified.Retried++ modified.Retried++
@@ -649,10 +675,12 @@ func (r *RDB) Retry(msg *base.TaskMessage, processAt time.Time, errMsg string, i
keys := []string{ keys := []string{
base.TaskKey(msg.Queue, msg.ID), base.TaskKey(msg.Queue, msg.ID),
base.ActiveKey(msg.Queue), base.ActiveKey(msg.Queue),
base.DeadlinesKey(msg.Queue), base.LeaseKey(msg.Queue),
base.RetryKey(msg.Queue), base.RetryKey(msg.Queue),
base.ProcessedKey(msg.Queue, now), base.ProcessedKey(msg.Queue, now),
base.FailedKey(msg.Queue, now), base.FailedKey(msg.Queue, now),
base.ProcessedTotalKey(msg.Queue),
base.FailedTotalKey(msg.Queue),
} }
argv := []interface{}{ argv := []interface{}{
msg.ID, msg.ID,
@@ -660,8 +688,9 @@ func (r *RDB) Retry(msg *base.TaskMessage, processAt time.Time, errMsg string, i
processAt.Unix(), processAt.Unix(),
expireAt.Unix(), expireAt.Unix(),
isFailure, isFailure,
math.MaxInt64,
} }
return r.runScript(op, retryCmd, keys, argv...) return r.runScript(ctx, op, retryCmd, keys, argv...)
} }
const ( const (
@@ -671,16 +700,20 @@ const (
// KEYS[1] -> asynq:{<qname>}:t:<task_id> // KEYS[1] -> asynq:{<qname>}:t:<task_id>
// KEYS[2] -> asynq:{<qname>}:active // KEYS[2] -> asynq:{<qname>}:active
// KEYS[3] -> asynq:{<qname>}:deadlines // KEYS[3] -> asynq:{<qname>}:lease
// KEYS[4] -> asynq:{<qname>}:archived // KEYS[4] -> asynq:{<qname>}:archived
// KEYS[5] -> asynq:{<qname>}:processed:<yyyy-mm-dd> // KEYS[5] -> asynq:{<qname>}:processed:<yyyy-mm-dd>
// KEYS[6] -> asynq:{<qname>}:failed:<yyyy-mm-dd> // KEYS[6] -> asynq:{<qname>}:failed:<yyyy-mm-dd>
// KEYS[7] -> asynq:{<qname>}:processed
// KEYS[8] -> asynq:{<qname>}:failed
// -------
// ARGV[1] -> task ID // ARGV[1] -> task ID
// ARGV[2] -> updated base.TaskMessage value // ARGV[2] -> updated base.TaskMessage value
// ARGV[3] -> died_at UNIX timestamp // ARGV[3] -> died_at UNIX timestamp
// ARGV[4] -> cutoff timestamp (e.g., 90 days ago) // ARGV[4] -> cutoff timestamp (e.g., 90 days ago)
// ARGV[5] -> max number of tasks in archive (e.g., 100) // ARGV[5] -> max number of tasks in archive (e.g., 100)
// ARGV[6] -> stats expiration timestamp // ARGV[6] -> stats expiration timestamp
// ARGV[7] -> max int64 value
var archiveCmd = redis.NewScript(` var archiveCmd = redis.NewScript(`
if redis.call("LREM", KEYS[2], 0, ARGV[1]) == 0 then if redis.call("LREM", KEYS[2], 0, ARGV[1]) == 0 then
return redis.error_reply("NOT FOUND") return redis.error_reply("NOT FOUND")
@@ -700,13 +733,21 @@ local m = redis.call("INCR", KEYS[6])
if tonumber(m) == 1 then if tonumber(m) == 1 then
redis.call("EXPIREAT", KEYS[6], ARGV[6]) redis.call("EXPIREAT", KEYS[6], ARGV[6])
end end
local total = redis.call("GET", KEYS[7])
if tonumber(total) == tonumber(ARGV[7]) then
redis.call("SET", KEYS[7], 1)
redis.call("SET", KEYS[8], 1)
else
redis.call("INCR", KEYS[7])
redis.call("INCR", KEYS[8])
end
return redis.status_reply("OK")`) return redis.status_reply("OK")`)
// Archive sends the given task to archive, attaching the error message to the task. // Archive sends the given task to archive, attaching the error message to the task.
// It also trims the archive by timestamp and set size. // It also trims the archive by timestamp and set size.
func (r *RDB) Archive(msg *base.TaskMessage, errMsg string) error { func (r *RDB) Archive(ctx context.Context, msg *base.TaskMessage, errMsg string) error {
var op errors.Op = "rdb.Archive" var op errors.Op = "rdb.Archive"
now := time.Now() now := r.clock.Now()
modified := *msg modified := *msg
modified.ErrorMsg = errMsg modified.ErrorMsg = errMsg
modified.LastFailedAt = now.Unix() modified.LastFailedAt = now.Unix()
@@ -719,10 +760,12 @@ func (r *RDB) Archive(msg *base.TaskMessage, errMsg string) error {
keys := []string{ keys := []string{
base.TaskKey(msg.Queue, msg.ID), base.TaskKey(msg.Queue, msg.ID),
base.ActiveKey(msg.Queue), base.ActiveKey(msg.Queue),
base.DeadlinesKey(msg.Queue), base.LeaseKey(msg.Queue),
base.ArchivedKey(msg.Queue), base.ArchivedKey(msg.Queue),
base.ProcessedKey(msg.Queue, now), base.ProcessedKey(msg.Queue, now),
base.FailedKey(msg.Queue, now), base.FailedKey(msg.Queue, now),
base.ProcessedTotalKey(msg.Queue),
base.FailedTotalKey(msg.Queue),
} }
argv := []interface{}{ argv := []interface{}{
msg.ID, msg.ID,
@@ -731,8 +774,9 @@ func (r *RDB) Archive(msg *base.TaskMessage, errMsg string) error {
cutoff.Unix(), cutoff.Unix(),
maxArchiveSize, maxArchiveSize,
expireAt.Unix(), expireAt.Unix(),
math.MaxInt64,
} }
return r.runScript(op, archiveCmd, keys, argv...) return r.runScript(ctx, op, archiveCmd, keys, argv...)
} }
// ForwardIfReady checks scheduled and retry sets of the given queues // ForwardIfReady checks scheduled and retry sets of the given queues
@@ -749,23 +793,27 @@ func (r *RDB) ForwardIfReady(qnames ...string) error {
// KEYS[1] -> source queue (e.g. asynq:{<qname>:scheduled or asynq:{<qname>}:retry}) // KEYS[1] -> source queue (e.g. asynq:{<qname>:scheduled or asynq:{<qname>}:retry})
// KEYS[2] -> asynq:{<qname>}:pending // KEYS[2] -> asynq:{<qname>}:pending
// ARGV[1] -> current unix time // ARGV[1] -> current unix time in seconds
// ARGV[2] -> task key prefix // ARGV[2] -> task key prefix
// ARGV[3] -> current unix time in nsec
// Note: Script moves tasks up to 100 at a time to keep the runtime of script short. // Note: Script moves tasks up to 100 at a time to keep the runtime of script short.
var forwardCmd = redis.NewScript(` var forwardCmd = redis.NewScript(`
local ids = redis.call("ZRANGEBYSCORE", KEYS[1], "-inf", ARGV[1], "LIMIT", 0, 100) local ids = redis.call("ZRANGEBYSCORE", KEYS[1], "-inf", ARGV[1], "LIMIT", 0, 100)
for _, id in ipairs(ids) do for _, id in ipairs(ids) do
redis.call("LPUSH", KEYS[2], id) redis.call("LPUSH", KEYS[2], id)
redis.call("ZREM", KEYS[1], id) redis.call("ZREM", KEYS[1], id)
redis.call("HSET", ARGV[2] .. id, "state", "pending") redis.call("HSET", ARGV[2] .. id,
"state", "pending",
"pending_since", ARGV[3])
end end
return table.getn(ids)`) return table.getn(ids)`)
// forward moves tasks with a score less than the current unix time // forward moves tasks with a score less than the current unix time
// from the src zset to the dst list. It returns the number of tasks moved. // from the src zset to the dst list. It returns the number of tasks moved.
func (r *RDB) forward(src, dst, taskKeyPrefix string) (int, error) { func (r *RDB) forward(src, dst, taskKeyPrefix string) (int, error) {
now := float64(time.Now().Unix()) now := r.clock.Now()
res, err := forwardCmd.Run(context.Background(), r.client, []string{src, dst}, now, taskKeyPrefix).Result() res, err := forwardCmd.Run(context.Background(), r.client,
[]string{src, dst}, now.Unix(), taskKeyPrefix, now.UnixNano()).Result()
if err != nil { if err != nil {
return 0, errors.E(errors.Internal, fmt.Sprintf("redis eval error: %v", err)) return 0, errors.E(errors.Internal, fmt.Sprintf("redis eval error: %v", err))
} }
@@ -830,7 +878,7 @@ func (r *RDB) deleteExpiredCompletedTasks(qname string, batchSize int) (int64, e
var op errors.Op = "rdb.DeleteExpiredCompletedTasks" var op errors.Op = "rdb.DeleteExpiredCompletedTasks"
keys := []string{base.CompletedKey(qname)} keys := []string{base.CompletedKey(qname)}
argv := []interface{}{ argv := []interface{}{
time.Now().Unix(), r.clock.Now().Unix(),
base.TaskKeyPrefix(qname), base.TaskKeyPrefix(qname),
batchSize, batchSize,
} }
@@ -845,10 +893,10 @@ func (r *RDB) deleteExpiredCompletedTasks(qname string, batchSize int) (int64, e
return n, nil return n, nil
} }
// KEYS[1] -> asynq:{<qname>}:deadlines // KEYS[1] -> asynq:{<qname>}:lease
// ARGV[1] -> deadline in unix time // ARGV[1] -> cutoff in unix time
// ARGV[2] -> task key prefix // ARGV[2] -> task key prefix
var listDeadlineExceededCmd = redis.NewScript(` var listLeaseExpiredCmd = redis.NewScript(`
local res = {} local res = {}
local ids = redis.call("ZRANGEBYSCORE", KEYS[1], "-inf", ARGV[1]) local ids = redis.call("ZRANGEBYSCORE", KEYS[1], "-inf", ARGV[1])
for _, id in ipairs(ids) do for _, id in ipairs(ids) do
@@ -858,14 +906,14 @@ end
return res return res
`) `)
// ListDeadlineExceeded returns a list of task messages that have exceeded the deadline from the given queues. // ListLeaseExpired returns a list of task messages with an expired lease from the given queues.
func (r *RDB) ListDeadlineExceeded(deadline time.Time, qnames ...string) ([]*base.TaskMessage, error) { func (r *RDB) ListLeaseExpired(cutoff time.Time, qnames ...string) ([]*base.TaskMessage, error) {
var op errors.Op = "rdb.ListDeadlineExceeded" var op errors.Op = "rdb.ListLeaseExpired"
var msgs []*base.TaskMessage var msgs []*base.TaskMessage
for _, qname := range qnames { for _, qname := range qnames {
res, err := listDeadlineExceededCmd.Run(context.Background(), r.client, res, err := listLeaseExpiredCmd.Run(context.Background(), r.client,
[]string{base.DeadlinesKey(qname)}, []string{base.LeaseKey(qname)},
deadline.Unix(), base.TaskKeyPrefix(qname)).Result() cutoff.Unix(), base.TaskKeyPrefix(qname)).Result()
if err != nil { if err != nil {
return nil, errors.E(op, errors.Internal, fmt.Sprintf("redis eval error: %v", err)) return nil, errors.E(op, errors.Internal, fmt.Sprintf("redis eval error: %v", err))
} }
@@ -884,6 +932,22 @@ func (r *RDB) ListDeadlineExceeded(deadline time.Time, qnames ...string) ([]*bas
return msgs, nil return msgs, nil
} }
// ExtendLease extends the lease for the given tasks by LeaseDuration (30s).
// It returns a new expiration time if the operation was successful.
func (r *RDB) ExtendLease(qname string, ids ...string) (expirationTime time.Time, err error) {
expireAt := r.clock.Now().Add(LeaseDuration)
var zs []redis.Z
for _, id := range ids {
zs = append(zs, redis.Z{Member: id, Score: float64(expireAt.Unix())})
}
// Use XX option to only update elements that already exist; Don't add new elements
err = r.client.ZAddArgs(context.Background(), base.LeaseKey(qname), redis.ZAddArgs{XX: true, GT: true, Members: zs}).Err()
if err != nil {
return time.Time{}, err
}
return expireAt, nil
}
// KEYS[1] -> asynq:servers:{<host:pid:sid>} // KEYS[1] -> asynq:servers:{<host:pid:sid>}
// KEYS[2] -> asynq:workers:{<host:pid:sid>} // KEYS[2] -> asynq:workers:{<host:pid:sid>}
// ARGV[1] -> TTL in seconds // ARGV[1] -> TTL in seconds
@@ -903,11 +967,12 @@ return redis.status_reply("OK")`)
// WriteServerState writes server state data to redis with expiration set to the value ttl. // WriteServerState writes server state data to redis with expiration set to the value ttl.
func (r *RDB) WriteServerState(info *base.ServerInfo, workers []*base.WorkerInfo, ttl time.Duration) error { func (r *RDB) WriteServerState(info *base.ServerInfo, workers []*base.WorkerInfo, ttl time.Duration) error {
var op errors.Op = "rdb.WriteServerState" var op errors.Op = "rdb.WriteServerState"
ctx := context.Background()
bytes, err := base.EncodeServerInfo(info) bytes, err := base.EncodeServerInfo(info)
if err != nil { if err != nil {
return errors.E(op, errors.Internal, fmt.Sprintf("cannot encode server info: %v", err)) return errors.E(op, errors.Internal, fmt.Sprintf("cannot encode server info: %v", err))
} }
exp := time.Now().Add(ttl).UTC() exp := r.clock.Now().Add(ttl).UTC()
args := []interface{}{ttl.Seconds(), bytes} // args to the lua script args := []interface{}{ttl.Seconds(), bytes} // args to the lua script
for _, w := range workers { for _, w := range workers {
bytes, err := base.EncodeWorkerInfo(w) bytes, err := base.EncodeWorkerInfo(w)
@@ -918,13 +983,13 @@ func (r *RDB) WriteServerState(info *base.ServerInfo, workers []*base.WorkerInfo
} }
skey := base.ServerInfoKey(info.Host, info.PID, info.ServerID) skey := base.ServerInfoKey(info.Host, info.PID, info.ServerID)
wkey := base.WorkersKey(info.Host, info.PID, info.ServerID) wkey := base.WorkersKey(info.Host, info.PID, info.ServerID)
if err := r.client.ZAdd(context.Background(), base.AllServers, &redis.Z{Score: float64(exp.Unix()), Member: skey}).Err(); err != nil { if err := r.client.ZAdd(ctx, base.AllServers, &redis.Z{Score: float64(exp.Unix()), Member: skey}).Err(); err != nil {
return errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "sadd", Err: err}) return errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "sadd", Err: err})
} }
if err := r.client.ZAdd(context.Background(), base.AllWorkers, &redis.Z{Score: float64(exp.Unix()), Member: wkey}).Err(); err != nil { if err := r.client.ZAdd(ctx, base.AllWorkers, &redis.Z{Score: float64(exp.Unix()), Member: wkey}).Err(); err != nil {
return errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "zadd", Err: err}) return errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "zadd", Err: err})
} }
return r.runScript(op, writeServerStateCmd, []string{skey, wkey}, args...) return r.runScript(ctx, op, writeServerStateCmd, []string{skey, wkey}, args...)
} }
// KEYS[1] -> asynq:servers:{<host:pid:sid>} // KEYS[1] -> asynq:servers:{<host:pid:sid>}
@@ -937,15 +1002,16 @@ return redis.status_reply("OK")`)
// ClearServerState deletes server state data from redis. // ClearServerState deletes server state data from redis.
func (r *RDB) ClearServerState(host string, pid int, serverID string) error { func (r *RDB) ClearServerState(host string, pid int, serverID string) error {
var op errors.Op = "rdb.ClearServerState" var op errors.Op = "rdb.ClearServerState"
ctx := context.Background()
skey := base.ServerInfoKey(host, pid, serverID) skey := base.ServerInfoKey(host, pid, serverID)
wkey := base.WorkersKey(host, pid, serverID) wkey := base.WorkersKey(host, pid, serverID)
if err := r.client.ZRem(context.Background(), base.AllServers, skey).Err(); err != nil { if err := r.client.ZRem(ctx, base.AllServers, skey).Err(); err != nil {
return errors.E(op, errors.Internal, &errors.RedisCommandError{Command: "zrem", Err: err}) return errors.E(op, errors.Internal, &errors.RedisCommandError{Command: "zrem", Err: err})
} }
if err := r.client.ZRem(context.Background(), base.AllWorkers, wkey).Err(); err != nil { if err := r.client.ZRem(ctx, base.AllWorkers, wkey).Err(); err != nil {
return errors.E(op, errors.Internal, &errors.RedisCommandError{Command: "zrem", Err: err}) return errors.E(op, errors.Internal, &errors.RedisCommandError{Command: "zrem", Err: err})
} }
return r.runScript(op, clearServerStateCmd, []string{skey, wkey}) return r.runScript(ctx, op, clearServerStateCmd, []string{skey, wkey})
} }
// KEYS[1] -> asynq:schedulers:{<schedulerID>} // KEYS[1] -> asynq:schedulers:{<schedulerID>}
@@ -962,6 +1028,7 @@ return redis.status_reply("OK")`)
// WriteSchedulerEntries writes scheduler entries data to redis with expiration set to the value ttl. // WriteSchedulerEntries writes scheduler entries data to redis with expiration set to the value ttl.
func (r *RDB) WriteSchedulerEntries(schedulerID string, entries []*base.SchedulerEntry, ttl time.Duration) error { func (r *RDB) WriteSchedulerEntries(schedulerID string, entries []*base.SchedulerEntry, ttl time.Duration) error {
var op errors.Op = "rdb.WriteSchedulerEntries" var op errors.Op = "rdb.WriteSchedulerEntries"
ctx := context.Background()
args := []interface{}{ttl.Seconds()} args := []interface{}{ttl.Seconds()}
for _, e := range entries { for _, e := range entries {
bytes, err := base.EncodeSchedulerEntry(e) bytes, err := base.EncodeSchedulerEntry(e)
@@ -970,23 +1037,24 @@ func (r *RDB) WriteSchedulerEntries(schedulerID string, entries []*base.Schedule
} }
args = append(args, bytes) args = append(args, bytes)
} }
exp := time.Now().Add(ttl).UTC() exp := r.clock.Now().Add(ttl).UTC()
key := base.SchedulerEntriesKey(schedulerID) key := base.SchedulerEntriesKey(schedulerID)
err := r.client.ZAdd(context.Background(), base.AllSchedulers, &redis.Z{Score: float64(exp.Unix()), Member: key}).Err() err := r.client.ZAdd(ctx, base.AllSchedulers, &redis.Z{Score: float64(exp.Unix()), Member: key}).Err()
if err != nil { if err != nil {
return errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "zadd", Err: err}) return errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "zadd", Err: err})
} }
return r.runScript(op, writeSchedulerEntriesCmd, []string{key}, args...) return r.runScript(ctx, op, writeSchedulerEntriesCmd, []string{key}, args...)
} }
// ClearSchedulerEntries deletes scheduler entries data from redis. // ClearSchedulerEntries deletes scheduler entries data from redis.
func (r *RDB) ClearSchedulerEntries(scheduelrID string) error { func (r *RDB) ClearSchedulerEntries(scheduelrID string) error {
var op errors.Op = "rdb.ClearSchedulerEntries" var op errors.Op = "rdb.ClearSchedulerEntries"
ctx := context.Background()
key := base.SchedulerEntriesKey(scheduelrID) key := base.SchedulerEntriesKey(scheduelrID)
if err := r.client.ZRem(context.Background(), base.AllSchedulers, key).Err(); err != nil { if err := r.client.ZRem(ctx, base.AllSchedulers, key).Err(); err != nil {
return errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "zrem", Err: err}) return errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "zrem", Err: err})
} }
if err := r.client.Del(context.Background(), key).Err(); err != nil { if err := r.client.Del(ctx, key).Err(); err != nil {
return errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "del", Err: err}) return errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "del", Err: err})
} }
return nil return nil
@@ -995,8 +1063,9 @@ func (r *RDB) ClearSchedulerEntries(scheduelrID string) error {
// CancelationPubSub returns a pubsub for cancelation messages. // CancelationPubSub returns a pubsub for cancelation messages.
func (r *RDB) CancelationPubSub() (*redis.PubSub, error) { func (r *RDB) CancelationPubSub() (*redis.PubSub, error) {
var op errors.Op = "rdb.CancelationPubSub" var op errors.Op = "rdb.CancelationPubSub"
pubsub := r.client.Subscribe(context.Background(), base.CancelChannel) ctx := context.Background()
_, err := pubsub.Receive(context.Background()) pubsub := r.client.Subscribe(ctx, base.CancelChannel)
_, err := pubsub.Receive(ctx)
if err != nil { if err != nil {
return nil, errors.E(op, errors.Unknown, fmt.Sprintf("redis pubsub receive error: %v", err)) return nil, errors.E(op, errors.Unknown, fmt.Sprintf("redis pubsub receive error: %v", err))
} }
@@ -1007,7 +1076,8 @@ func (r *RDB) CancelationPubSub() (*redis.PubSub, error) {
// The message is the ID for the task to be canceled. // The message is the ID for the task to be canceled.
func (r *RDB) PublishCancelation(id string) error { func (r *RDB) PublishCancelation(id string) error {
var op errors.Op = "rdb.PublishCancelation" var op errors.Op = "rdb.PublishCancelation"
if err := r.client.Publish(context.Background(), base.CancelChannel, id).Err(); err != nil { ctx := context.Background()
if err := r.client.Publish(ctx, base.CancelChannel, id).Err(); err != nil {
return errors.E(op, errors.Unknown, fmt.Sprintf("redis pubsub publish error: %v", err)) return errors.E(op, errors.Unknown, fmt.Sprintf("redis pubsub publish error: %v", err))
} }
return nil return nil
@@ -1028,6 +1098,7 @@ const maxEvents = 1000
// RecordSchedulerEnqueueEvent records the time when the given task was enqueued. // RecordSchedulerEnqueueEvent records the time when the given task was enqueued.
func (r *RDB) RecordSchedulerEnqueueEvent(entryID string, event *base.SchedulerEnqueueEvent) error { func (r *RDB) RecordSchedulerEnqueueEvent(entryID string, event *base.SchedulerEnqueueEvent) error {
var op errors.Op = "rdb.RecordSchedulerEnqueueEvent" var op errors.Op = "rdb.RecordSchedulerEnqueueEvent"
ctx := context.Background()
data, err := base.EncodeSchedulerEnqueueEvent(event) data, err := base.EncodeSchedulerEnqueueEvent(event)
if err != nil { if err != nil {
return errors.E(op, errors.Internal, fmt.Sprintf("cannot encode scheduler enqueue event: %v", err)) return errors.E(op, errors.Internal, fmt.Sprintf("cannot encode scheduler enqueue event: %v", err))
@@ -1040,14 +1111,15 @@ func (r *RDB) RecordSchedulerEnqueueEvent(entryID string, event *base.SchedulerE
data, data,
maxEvents, maxEvents,
} }
return r.runScript(op, recordSchedulerEnqueueEventCmd, keys, argv...) return r.runScript(ctx, op, recordSchedulerEnqueueEventCmd, keys, argv...)
} }
// ClearSchedulerHistory deletes the enqueue event history for the given scheduler entry. // ClearSchedulerHistory deletes the enqueue event history for the given scheduler entry.
func (r *RDB) ClearSchedulerHistory(entryID string) error { func (r *RDB) ClearSchedulerHistory(entryID string) error {
var op errors.Op = "rdb.ClearSchedulerHistory" var op errors.Op = "rdb.ClearSchedulerHistory"
ctx := context.Background()
key := base.SchedulerHistoryKey(entryID) key := base.SchedulerHistoryKey(entryID)
if err := r.client.Del(context.Background(), key).Err(); err != nil { if err := r.client.Del(ctx, key).Err(); err != nil {
return errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "del", Err: err}) return errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "del", Err: err})
} }
return nil return nil
@@ -1056,8 +1128,9 @@ func (r *RDB) ClearSchedulerHistory(entryID string) error {
// WriteResult writes the given result data for the specified task. // WriteResult writes the given result data for the specified task.
func (r *RDB) WriteResult(qname, taskID string, data []byte) (int, error) { func (r *RDB) WriteResult(qname, taskID string, data []byte) (int, error) {
var op errors.Op = "rdb.WriteResult" var op errors.Op = "rdb.WriteResult"
ctx := context.Background()
taskKey := base.TaskKey(qname, taskID) taskKey := base.TaskKey(qname, taskID)
if err := r.client.HSet(context.Background(), taskKey, "result", data).Err(); err != nil { if err := r.client.HSet(ctx, taskKey, "result", data).Err(); err != nil {
return 0, errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "hset", Err: err}) return 0, errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "hset", Err: err})
} }
return len(data), nil return len(data), nil

File diff suppressed because it is too large Load Diff

View File

@@ -6,6 +6,7 @@
package testbroker package testbroker
import ( import (
"context"
"errors" "errors"
"sync" "sync"
"time" "time"
@@ -45,22 +46,22 @@ func (tb *TestBroker) Wakeup() {
tb.sleeping = false tb.sleeping = false
} }
func (tb *TestBroker) Enqueue(msg *base.TaskMessage) error { func (tb *TestBroker) Enqueue(ctx context.Context, msg *base.TaskMessage) error {
tb.mu.Lock() tb.mu.Lock()
defer tb.mu.Unlock() defer tb.mu.Unlock()
if tb.sleeping { if tb.sleeping {
return errRedisDown return errRedisDown
} }
return tb.real.Enqueue(msg) return tb.real.Enqueue(ctx, msg)
} }
func (tb *TestBroker) EnqueueUnique(msg *base.TaskMessage, ttl time.Duration) error { func (tb *TestBroker) EnqueueUnique(ctx context.Context, msg *base.TaskMessage, ttl time.Duration) error {
tb.mu.Lock() tb.mu.Lock()
defer tb.mu.Unlock() defer tb.mu.Unlock()
if tb.sleeping { if tb.sleeping {
return errRedisDown return errRedisDown
} }
return tb.real.EnqueueUnique(msg, ttl) return tb.real.EnqueueUnique(ctx, msg, ttl)
} }
func (tb *TestBroker) Dequeue(qnames ...string) (*base.TaskMessage, time.Time, error) { func (tb *TestBroker) Dequeue(qnames ...string) (*base.TaskMessage, time.Time, error) {
@@ -72,67 +73,67 @@ func (tb *TestBroker) Dequeue(qnames ...string) (*base.TaskMessage, time.Time, e
return tb.real.Dequeue(qnames...) return tb.real.Dequeue(qnames...)
} }
func (tb *TestBroker) Done(msg *base.TaskMessage) error { func (tb *TestBroker) Done(ctx context.Context, msg *base.TaskMessage) error {
tb.mu.Lock() tb.mu.Lock()
defer tb.mu.Unlock() defer tb.mu.Unlock()
if tb.sleeping { if tb.sleeping {
return errRedisDown return errRedisDown
} }
return tb.real.Done(msg) return tb.real.Done(ctx, msg)
} }
func (tb *TestBroker) MarkAsComplete(msg *base.TaskMessage) error { func (tb *TestBroker) MarkAsComplete(ctx context.Context, msg *base.TaskMessage) error {
tb.mu.Lock() tb.mu.Lock()
defer tb.mu.Unlock() defer tb.mu.Unlock()
if tb.sleeping { if tb.sleeping {
return errRedisDown return errRedisDown
} }
return tb.real.MarkAsComplete(msg) return tb.real.MarkAsComplete(ctx, msg)
} }
func (tb *TestBroker) Requeue(msg *base.TaskMessage) error { func (tb *TestBroker) Requeue(ctx context.Context, msg *base.TaskMessage) error {
tb.mu.Lock() tb.mu.Lock()
defer tb.mu.Unlock() defer tb.mu.Unlock()
if tb.sleeping { if tb.sleeping {
return errRedisDown return errRedisDown
} }
return tb.real.Requeue(msg) return tb.real.Requeue(ctx, msg)
} }
func (tb *TestBroker) Schedule(msg *base.TaskMessage, processAt time.Time) error { func (tb *TestBroker) Schedule(ctx context.Context, msg *base.TaskMessage, processAt time.Time) error {
tb.mu.Lock() tb.mu.Lock()
defer tb.mu.Unlock() defer tb.mu.Unlock()
if tb.sleeping { if tb.sleeping {
return errRedisDown return errRedisDown
} }
return tb.real.Schedule(msg, processAt) return tb.real.Schedule(ctx, msg, processAt)
} }
func (tb *TestBroker) ScheduleUnique(msg *base.TaskMessage, processAt time.Time, ttl time.Duration) error { func (tb *TestBroker) ScheduleUnique(ctx context.Context, msg *base.TaskMessage, processAt time.Time, ttl time.Duration) error {
tb.mu.Lock() tb.mu.Lock()
defer tb.mu.Unlock() defer tb.mu.Unlock()
if tb.sleeping { if tb.sleeping {
return errRedisDown return errRedisDown
} }
return tb.real.ScheduleUnique(msg, processAt, ttl) return tb.real.ScheduleUnique(ctx, msg, processAt, ttl)
} }
func (tb *TestBroker) Retry(msg *base.TaskMessage, processAt time.Time, errMsg string, isFailure bool) error { func (tb *TestBroker) Retry(ctx context.Context, msg *base.TaskMessage, processAt time.Time, errMsg string, isFailure bool) error {
tb.mu.Lock() tb.mu.Lock()
defer tb.mu.Unlock() defer tb.mu.Unlock()
if tb.sleeping { if tb.sleeping {
return errRedisDown return errRedisDown
} }
return tb.real.Retry(msg, processAt, errMsg, isFailure) return tb.real.Retry(ctx, msg, processAt, errMsg, isFailure)
} }
func (tb *TestBroker) Archive(msg *base.TaskMessage, errMsg string) error { func (tb *TestBroker) Archive(ctx context.Context, msg *base.TaskMessage, errMsg string) error {
tb.mu.Lock() tb.mu.Lock()
defer tb.mu.Unlock() defer tb.mu.Unlock()
if tb.sleeping { if tb.sleeping {
return errRedisDown return errRedisDown
} }
return tb.real.Archive(msg, errMsg) return tb.real.Archive(ctx, msg, errMsg)
} }
func (tb *TestBroker) ForwardIfReady(qnames ...string) error { func (tb *TestBroker) ForwardIfReady(qnames ...string) error {
@@ -153,13 +154,22 @@ func (tb *TestBroker) DeleteExpiredCompletedTasks(qname string) error {
return tb.real.DeleteExpiredCompletedTasks(qname) return tb.real.DeleteExpiredCompletedTasks(qname)
} }
func (tb *TestBroker) ListDeadlineExceeded(deadline time.Time, qnames ...string) ([]*base.TaskMessage, error) { func (tb *TestBroker) ListLeaseExpired(cutoff time.Time, qnames ...string) ([]*base.TaskMessage, error) {
tb.mu.Lock() tb.mu.Lock()
defer tb.mu.Unlock() defer tb.mu.Unlock()
if tb.sleeping { if tb.sleeping {
return nil, errRedisDown return nil, errRedisDown
} }
return tb.real.ListDeadlineExceeded(deadline, qnames...) return tb.real.ListLeaseExpired(cutoff, qnames...)
}
func (tb *TestBroker) ExtendLease(qname string, ids ...string) (time.Time, error) {
tb.mu.Lock()
defer tb.mu.Unlock()
if tb.sleeping {
return time.Time{}, errRedisDown
}
return tb.real.ExtendLease(qname, ids...)
} }
func (tb *TestBroker) WriteServerState(info *base.ServerInfo, workers []*base.WorkerInfo, ttl time.Duration) error { func (tb *TestBroker) WriteServerState(info *base.ServerInfo, workers []*base.WorkerInfo, ttl time.Duration) error {

View File

@@ -0,0 +1,59 @@
// Copyright 2022 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
// Package timeutil exports functions and types related to time and date.
package timeutil
import (
"sync"
"time"
)
// A Clock is an object that can tell you the current time.
//
// This interface allows decoupling code that uses time from the code that creates
// a point in time. You can use this to your advantage by injecting Clocks into interfaces
// rather than having implementations call time.Now() directly.
//
// Use RealClock() in production.
// Use SimulatedClock() in test.
type Clock interface {
Now() time.Time
}
func NewRealClock() Clock { return &realTimeClock{} }
type realTimeClock struct{}
func (_ *realTimeClock) Now() time.Time { return time.Now() }
// A SimulatedClock is a concrete Clock implementation that doesn't "tick" on its own.
// Time is advanced by explicit call to the AdvanceTime() or SetTime() functions.
// This object is concurrency safe.
type SimulatedClock struct {
mu sync.Mutex
t time.Time // guarded by mu
}
func NewSimulatedClock(t time.Time) *SimulatedClock {
return &SimulatedClock{t: t}
}
func (c *SimulatedClock) Now() time.Time {
c.mu.Lock()
defer c.mu.Unlock()
return c.t
}
func (c *SimulatedClock) SetTime(t time.Time) {
c.mu.Lock()
defer c.mu.Unlock()
c.t = t
}
func (c *SimulatedClock) AdvanceTime(d time.Duration) {
c.mu.Lock()
defer c.mu.Unlock()
c.t = c.t.Add(d)
}

View File

@@ -0,0 +1,48 @@
// Copyright 2022 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
package timeutil
import (
"testing"
"time"
)
func TestSimulatedClock(t *testing.T) {
now := time.Now()
tests := []struct {
desc string
initTime time.Time
advanceBy time.Duration
wantTime time.Time
}{
{
desc: "advance time forward",
initTime: now,
advanceBy: 30 * time.Second,
wantTime: now.Add(30 * time.Second),
},
{
desc: "advance time backward",
initTime: now,
advanceBy: -10 * time.Second,
wantTime: now.Add(-10 * time.Second),
},
}
for _, tc := range tests {
c := NewSimulatedClock(tc.initTime)
if c.Now() != tc.initTime {
t.Errorf("%s: Before Advance; SimulatedClock.Now() = %v, want %v", tc.desc, c.Now(), tc.initTime)
}
c.AdvanceTime(tc.advanceBy)
if c.Now() != tc.wantTime {
t.Errorf("%s: After Advance; SimulatedClock.Now() = %v, want %v", tc.desc, c.Now(), tc.wantTime)
}
}
}

View File

@@ -74,7 +74,7 @@ func (j *janitor) start(wg *sync.WaitGroup) {
func (j *janitor) exec() { func (j *janitor) exec() {
for _, qname := range j.queues { for _, qname := range j.queues {
if err := j.broker.DeleteExpiredCompletedTasks(qname); err != nil { if err := j.broker.DeleteExpiredCompletedTasks(qname); err != nil {
j.logger.Errorf("Could not delete expired completed tasks from queue %q: %v", j.logger.Errorf("Failed to delete expired completed tasks from queue %q: %v",
qname, err) qname, err)
} }
} }

243
periodic_task_manager.go Normal file
View File

@@ -0,0 +1,243 @@
// Copyright 2022 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
package asynq
import (
"crypto/sha256"
"fmt"
"io"
"sort"
"sync"
"time"
)
// PeriodicTaskManager manages scheduling of periodic tasks.
// It syncs scheduler's entries by calling the config provider periodically.
type PeriodicTaskManager struct {
s *Scheduler
p PeriodicTaskConfigProvider
syncInterval time.Duration
done chan (struct{})
wg sync.WaitGroup
m map[string]string // map[hash]entryID
}
type PeriodicTaskManagerOpts struct {
// Required: must be non nil
PeriodicTaskConfigProvider PeriodicTaskConfigProvider
// Required: must be non nil
RedisConnOpt RedisConnOpt
// Optional: scheduler options
*SchedulerOpts
// Optional: default is 3m
SyncInterval time.Duration
}
const defaultSyncInterval = 3 * time.Minute
// NewPeriodicTaskManager returns a new PeriodicTaskManager instance.
// The given opts should specify the RedisConnOp and PeriodicTaskConfigProvider at minimum.
func NewPeriodicTaskManager(opts PeriodicTaskManagerOpts) (*PeriodicTaskManager, error) {
if opts.PeriodicTaskConfigProvider == nil {
return nil, fmt.Errorf("PeriodicTaskConfigProvider cannot be nil")
}
if opts.RedisConnOpt == nil {
return nil, fmt.Errorf("RedisConnOpt cannot be nil")
}
scheduler := NewScheduler(opts.RedisConnOpt, opts.SchedulerOpts)
syncInterval := opts.SyncInterval
if syncInterval == 0 {
syncInterval = defaultSyncInterval
}
return &PeriodicTaskManager{
s: scheduler,
p: opts.PeriodicTaskConfigProvider,
syncInterval: syncInterval,
done: make(chan struct{}),
m: make(map[string]string),
}, nil
}
// PeriodicTaskConfigProvider provides configs for periodic tasks.
// GetConfigs will be called by a PeriodicTaskManager periodically to
// sync the scheduler's entries with the configs returned by the provider.
type PeriodicTaskConfigProvider interface {
GetConfigs() ([]*PeriodicTaskConfig, error)
}
// PeriodicTaskConfig specifies the details of a periodic task.
type PeriodicTaskConfig struct {
Cronspec string // required: must be non empty string
Task *Task // required: must be non nil
Opts []Option // optional: can be nil
}
func (c *PeriodicTaskConfig) hash() string {
h := sha256.New()
io.WriteString(h, c.Cronspec)
io.WriteString(h, c.Task.Type())
h.Write(c.Task.Payload())
opts := stringifyOptions(c.Opts)
sort.Strings(opts)
for _, opt := range opts {
io.WriteString(h, opt)
}
return fmt.Sprintf("%x", h.Sum(nil))
}
func validatePeriodicTaskConfig(c *PeriodicTaskConfig) error {
if c == nil {
return fmt.Errorf("PeriodicTaskConfig cannot be nil")
}
if c.Task == nil {
return fmt.Errorf("PeriodicTaskConfig.Task cannot be nil")
}
if c.Cronspec == "" {
return fmt.Errorf("PeriodicTaskConfig.Cronspec cannot be empty")
}
return nil
}
// Start starts a scheduler and background goroutine to sync the scheduler with the configs
// returned by the provider.
//
// Start returns any error encountered at start up time.
func (mgr *PeriodicTaskManager) Start() error {
if mgr.s == nil || mgr.p == nil {
panic("asynq: cannot start uninitialized PeriodicTaskManager; use NewPeriodicTaskManager to initialize")
}
if err := mgr.initialSync(); err != nil {
return fmt.Errorf("asynq: %v", err)
}
if err := mgr.s.Start(); err != nil {
return fmt.Errorf("asynq: %v", err)
}
mgr.wg.Add(1)
go func() {
defer mgr.wg.Done()
ticker := time.NewTicker(mgr.syncInterval)
for {
select {
case <-mgr.done:
mgr.s.logger.Debugf("Stopping syncer goroutine")
ticker.Stop()
return
case <-ticker.C:
mgr.sync()
}
}
}()
return nil
}
// Shutdown gracefully shuts down the manager.
// It notifies a background syncer goroutine to stop and stops scheduler.
func (mgr *PeriodicTaskManager) Shutdown() {
close(mgr.done)
mgr.wg.Wait()
mgr.s.Shutdown()
}
// Run starts the manager and blocks until an os signal to exit the program is received.
// Once it receives a signal, it gracefully shuts down the manager.
func (mgr *PeriodicTaskManager) Run() error {
if err := mgr.Start(); err != nil {
return err
}
mgr.s.waitForSignals()
mgr.Shutdown()
mgr.s.logger.Debugf("PeriodicTaskManager exiting")
return nil
}
func (mgr *PeriodicTaskManager) initialSync() error {
configs, err := mgr.p.GetConfigs()
if err != nil {
return fmt.Errorf("initial call to GetConfigs failed: %v", err)
}
for _, c := range configs {
if err := validatePeriodicTaskConfig(c); err != nil {
return fmt.Errorf("initial call to GetConfigs contained an invalid config: %v", err)
}
}
mgr.add(configs)
return nil
}
func (mgr *PeriodicTaskManager) add(configs []*PeriodicTaskConfig) {
for _, c := range configs {
entryID, err := mgr.s.Register(c.Cronspec, c.Task, c.Opts...)
if err != nil {
mgr.s.logger.Errorf("Failed to register periodic task: cronspec=%q task=%q",
c.Cronspec, c.Task.Type())
continue
}
mgr.m[c.hash()] = entryID
mgr.s.logger.Infof("Successfully registered periodic task: cronspec=%q task=%q, entryID=%s",
c.Cronspec, c.Task.Type(), entryID)
}
}
func (mgr *PeriodicTaskManager) remove(removed map[string]string) {
for hash, entryID := range removed {
if err := mgr.s.Unregister(entryID); err != nil {
mgr.s.logger.Errorf("Failed to unregister periodic task: %v", err)
continue
}
delete(mgr.m, hash)
mgr.s.logger.Infof("Successfully unregistered periodic task: entryID=%s", entryID)
}
}
func (mgr *PeriodicTaskManager) sync() {
configs, err := mgr.p.GetConfigs()
if err != nil {
mgr.s.logger.Errorf("Failed to get periodic task configs: %v", err)
return
}
for _, c := range configs {
if err := validatePeriodicTaskConfig(c); err != nil {
mgr.s.logger.Errorf("Failed to sync: GetConfigs returned an invalid config: %v", err)
return
}
}
// Diff and only register/unregister the newly added/removed entries.
removed := mgr.diffRemoved(configs)
added := mgr.diffAdded(configs)
mgr.remove(removed)
mgr.add(added)
}
// diffRemoved diffs the incoming configs with the registered config and returns
// a map containing hash and entryID of each config that was removed.
func (mgr *PeriodicTaskManager) diffRemoved(configs []*PeriodicTaskConfig) map[string]string {
newConfigs := make(map[string]string)
for _, c := range configs {
newConfigs[c.hash()] = "" // empty value since we don't have entryID yet
}
removed := make(map[string]string)
for k, v := range mgr.m {
// test whether existing config is present in the incoming configs
if _, found := newConfigs[k]; !found {
removed[k] = v
}
}
return removed
}
// diffAdded diffs the incoming configs with the registered configs and returns
// a list of configs that were added.
func (mgr *PeriodicTaskManager) diffAdded(configs []*PeriodicTaskConfig) []*PeriodicTaskConfig {
var added []*PeriodicTaskConfig
for _, c := range configs {
if _, found := mgr.m[c.hash()]; !found {
added = append(added, c)
}
}
return added
}

View File

@@ -0,0 +1,343 @@
// Copyright 2022 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
package asynq
import (
"sort"
"sync"
"testing"
"time"
"github.com/google/go-cmp/cmp"
)
// Trivial implementation of PeriodicTaskConfigProvider for testing purpose.
type FakeConfigProvider struct {
mu sync.Mutex
cfgs []*PeriodicTaskConfig
}
func (p *FakeConfigProvider) SetConfigs(cfgs []*PeriodicTaskConfig) {
p.mu.Lock()
defer p.mu.Unlock()
p.cfgs = cfgs
}
func (p *FakeConfigProvider) GetConfigs() ([]*PeriodicTaskConfig, error) {
p.mu.Lock()
defer p.mu.Unlock()
return p.cfgs, nil
}
func TestNewPeriodicTaskManager(t *testing.T) {
cfgs := []*PeriodicTaskConfig{
{Cronspec: "* * * * *", Task: NewTask("foo", nil)},
{Cronspec: "* * * * *", Task: NewTask("bar", nil)},
}
tests := []struct {
desc string
opts PeriodicTaskManagerOpts
}{
{
desc: "with provider and redisConnOpt",
opts: PeriodicTaskManagerOpts{
RedisConnOpt: RedisClientOpt{Addr: ":6379"},
PeriodicTaskConfigProvider: &FakeConfigProvider{cfgs: cfgs},
},
},
{
desc: "with sync option",
opts: PeriodicTaskManagerOpts{
RedisConnOpt: RedisClientOpt{Addr: ":6379"},
PeriodicTaskConfigProvider: &FakeConfigProvider{cfgs: cfgs},
SyncInterval: 5 * time.Minute,
},
},
{
desc: "with scheduler option",
opts: PeriodicTaskManagerOpts{
RedisConnOpt: RedisClientOpt{Addr: ":6379"},
PeriodicTaskConfigProvider: &FakeConfigProvider{cfgs: cfgs},
SyncInterval: 5 * time.Minute,
SchedulerOpts: &SchedulerOpts{
LogLevel: DebugLevel,
},
},
},
}
for _, tc := range tests {
_, err := NewPeriodicTaskManager(tc.opts)
if err != nil {
t.Errorf("%s; NewPeriodicTaskManager returned error: %v", tc.desc, err)
}
}
}
func TestNewPeriodicTaskManagerError(t *testing.T) {
cfgs := []*PeriodicTaskConfig{
{Cronspec: "* * * * *", Task: NewTask("foo", nil)},
{Cronspec: "* * * * *", Task: NewTask("bar", nil)},
}
tests := []struct {
desc string
opts PeriodicTaskManagerOpts
}{
{
desc: "without provider",
opts: PeriodicTaskManagerOpts{
RedisConnOpt: RedisClientOpt{Addr: ":6379"},
},
},
{
desc: "without redisConOpt",
opts: PeriodicTaskManagerOpts{
PeriodicTaskConfigProvider: &FakeConfigProvider{cfgs: cfgs},
},
},
}
for _, tc := range tests {
_, err := NewPeriodicTaskManager(tc.opts)
if err == nil {
t.Errorf("%s; NewPeriodicTaskManager did not return error", tc.desc)
}
}
}
func TestPeriodicTaskConfigHash(t *testing.T) {
tests := []struct {
desc string
a *PeriodicTaskConfig
b *PeriodicTaskConfig
isSame bool
}{
{
desc: "basic identity test",
a: &PeriodicTaskConfig{
Cronspec: "* * * * *",
Task: NewTask("foo", nil),
},
b: &PeriodicTaskConfig{
Cronspec: "* * * * *",
Task: NewTask("foo", nil),
},
isSame: true,
},
{
desc: "with a option",
a: &PeriodicTaskConfig{
Cronspec: "* * * * *",
Task: NewTask("foo", nil),
Opts: []Option{Queue("myqueue")},
},
b: &PeriodicTaskConfig{
Cronspec: "* * * * *",
Task: NewTask("foo", nil),
Opts: []Option{Queue("myqueue")},
},
isSame: true,
},
{
desc: "with multiple options (different order)",
a: &PeriodicTaskConfig{
Cronspec: "* * * * *",
Task: NewTask("foo", nil),
Opts: []Option{Unique(5 * time.Minute), Queue("myqueue")},
},
b: &PeriodicTaskConfig{
Cronspec: "* * * * *",
Task: NewTask("foo", nil),
Opts: []Option{Queue("myqueue"), Unique(5 * time.Minute)},
},
isSame: true,
},
{
desc: "with payload",
a: &PeriodicTaskConfig{
Cronspec: "* * * * *",
Task: NewTask("foo", []byte("hello world!")),
Opts: []Option{Queue("myqueue")},
},
b: &PeriodicTaskConfig{
Cronspec: "* * * * *",
Task: NewTask("foo", []byte("hello world!")),
Opts: []Option{Queue("myqueue")},
},
isSame: true,
},
{
desc: "with different cronspecs",
a: &PeriodicTaskConfig{
Cronspec: "* * * * *",
Task: NewTask("foo", nil),
},
b: &PeriodicTaskConfig{
Cronspec: "5 * * * *",
Task: NewTask("foo", nil),
},
isSame: false,
},
{
desc: "with different task type",
a: &PeriodicTaskConfig{
Cronspec: "* * * * *",
Task: NewTask("foo", nil),
},
b: &PeriodicTaskConfig{
Cronspec: "* * * * *",
Task: NewTask("bar", nil),
},
isSame: false,
},
{
desc: "with different options",
a: &PeriodicTaskConfig{
Cronspec: "* * * * *",
Task: NewTask("foo", nil),
Opts: []Option{Queue("myqueue")},
},
b: &PeriodicTaskConfig{
Cronspec: "* * * * *",
Task: NewTask("foo", nil),
Opts: []Option{Unique(10 * time.Minute)},
},
isSame: false,
},
{
desc: "with different options (one is subset of the other)",
a: &PeriodicTaskConfig{
Cronspec: "* * * * *",
Task: NewTask("foo", nil),
Opts: []Option{Queue("myqueue")},
},
b: &PeriodicTaskConfig{
Cronspec: "* * * * *",
Task: NewTask("foo", nil),
Opts: []Option{Queue("myqueue"), Unique(10 * time.Minute)},
},
isSame: false,
},
{
desc: "with different payload",
a: &PeriodicTaskConfig{
Cronspec: "* * * * *",
Task: NewTask("foo", []byte("hello!")),
Opts: []Option{Queue("myqueue")},
},
b: &PeriodicTaskConfig{
Cronspec: "* * * * *",
Task: NewTask("foo", []byte("HELLO!")),
Opts: []Option{Queue("myqueue"), Unique(10 * time.Minute)},
},
isSame: false,
},
}
for _, tc := range tests {
if tc.isSame && tc.a.hash() != tc.b.hash() {
t.Errorf("%s: a.hash=%s b.hash=%s expected to be equal",
tc.desc, tc.a.hash(), tc.b.hash())
}
if !tc.isSame && tc.a.hash() == tc.b.hash() {
t.Errorf("%s: a.hash=%s b.hash=%s expected to be not equal",
tc.desc, tc.a.hash(), tc.b.hash())
}
}
}
// Things to test.
// - Run the manager
// - Change provider to return new configs
// - Verify that the scheduler synced with the new config
func TestPeriodicTaskManager(t *testing.T) {
// Note: In this test, we'll use task type as an ID for each config.
cfgs := []*PeriodicTaskConfig{
{Task: NewTask("task1", nil), Cronspec: "* * * * 1"},
{Task: NewTask("task2", nil), Cronspec: "* * * * 2"},
}
const syncInterval = 3 * time.Second
provider := &FakeConfigProvider{cfgs: cfgs}
mgr, err := NewPeriodicTaskManager(PeriodicTaskManagerOpts{
RedisConnOpt: getRedisConnOpt(t),
PeriodicTaskConfigProvider: provider,
SyncInterval: syncInterval,
})
if err != nil {
t.Fatalf("Failed to initialize PeriodicTaskManager: %v", err)
}
if err := mgr.Start(); err != nil {
t.Fatalf("Failed to start PeriodicTaskManager: %v", err)
}
defer mgr.Shutdown()
got := extractCronEntries(mgr.s)
want := []*cronEntry{
{Cronspec: "* * * * 1", TaskType: "task1"},
{Cronspec: "* * * * 2", TaskType: "task2"},
}
if diff := cmp.Diff(want, got, sortCronEntry); diff != "" {
t.Errorf("Diff found in scheduler's registered entries: %s", diff)
}
// Change the underlying configs
// - task2 removed
// - task3 added
provider.SetConfigs([]*PeriodicTaskConfig{
{Task: NewTask("task1", nil), Cronspec: "* * * * 1"},
{Task: NewTask("task3", nil), Cronspec: "* * * * 3"},
})
// Wait for the next sync
time.Sleep(syncInterval * 2)
// Verify the entries are synced
got = extractCronEntries(mgr.s)
want = []*cronEntry{
{Cronspec: "* * * * 1", TaskType: "task1"},
{Cronspec: "* * * * 3", TaskType: "task3"},
}
if diff := cmp.Diff(want, got, sortCronEntry); diff != "" {
t.Errorf("Diff found in scheduler's registered entries: %s", diff)
}
// Change the underlying configs
// All configs removed, empty set.
provider.SetConfigs([]*PeriodicTaskConfig{})
// Wait for the next sync
time.Sleep(syncInterval * 2)
// Verify the entries are synced
got = extractCronEntries(mgr.s)
want = []*cronEntry{}
if diff := cmp.Diff(want, got, sortCronEntry); diff != "" {
t.Errorf("Diff found in scheduler's registered entries: %s", diff)
}
}
func extractCronEntries(s *Scheduler) []*cronEntry {
var out []*cronEntry
for _, e := range s.cron.Entries() {
job := e.Job.(*enqueueJob)
out = append(out, &cronEntry{Cronspec: job.cronspec, TaskType: job.task.Type()})
}
return out
}
var sortCronEntry = cmp.Transformer("sortCronEntry", func(in []*cronEntry) []*cronEntry {
out := append([]*cronEntry(nil), in...)
sort.Slice(out, func(i, j int) bool {
return out[i].TaskType < out[j].TaskType
})
return out
})
// A simple struct to allow for simpler comparison in test.
type cronEntry struct {
Cronspec string
TaskType string
}

View File

@@ -7,6 +7,7 @@ package asynq
import ( import (
"context" "context"
"fmt" "fmt"
"math"
"math/rand" "math/rand"
"runtime" "runtime"
"runtime/debug" "runtime/debug"
@@ -19,14 +20,17 @@ import (
asynqcontext "github.com/hibiken/asynq/internal/context" asynqcontext "github.com/hibiken/asynq/internal/context"
"github.com/hibiken/asynq/internal/errors" "github.com/hibiken/asynq/internal/errors"
"github.com/hibiken/asynq/internal/log" "github.com/hibiken/asynq/internal/log"
"github.com/hibiken/asynq/internal/timeutil"
"golang.org/x/time/rate" "golang.org/x/time/rate"
) )
type processor struct { type processor struct {
logger *log.Logger logger *log.Logger
broker base.Broker broker base.Broker
clock timeutil.Clock
handler Handler handler Handler
baseCtxFn func() context.Context
queueConfig map[string]int queueConfig map[string]int
@@ -71,6 +75,7 @@ type processor struct {
type processorParams struct { type processorParams struct {
logger *log.Logger logger *log.Logger
broker base.Broker broker base.Broker
baseCtxFn func() context.Context
retryDelayFunc RetryDelayFunc retryDelayFunc RetryDelayFunc
isFailureFunc func(error) bool isFailureFunc func(error) bool
syncCh chan<- *syncRequest syncCh chan<- *syncRequest
@@ -94,6 +99,8 @@ func newProcessor(params processorParams) *processor {
return &processor{ return &processor{
logger: params.logger, logger: params.logger,
broker: params.broker, broker: params.broker,
baseCtxFn: params.baseCtxFn,
clock: timeutil.NewRealClock(),
queueConfig: queues, queueConfig: queues,
orderedQueues: orderedQueues, orderedQueues: orderedQueues,
retryDelayFunc: params.retryDelayFunc, retryDelayFunc: params.retryDelayFunc,
@@ -164,7 +171,7 @@ func (p *processor) exec() {
return return
case p.sema <- struct{}{}: // acquire token case p.sema <- struct{}{}: // acquire token
qnames := p.queues() qnames := p.queues()
msg, deadline, err := p.broker.Dequeue(qnames...) msg, leaseExpirationTime, err := p.broker.Dequeue(qnames...)
switch { switch {
case errors.Is(err, errors.ErrNoProcessableTask): case errors.Is(err, errors.ErrNoProcessableTask):
p.logger.Debug("All queues are empty") p.logger.Debug("All queues are empty")
@@ -183,14 +190,16 @@ func (p *processor) exec() {
return return
} }
p.starting <- &workerInfo{msg, time.Now(), deadline} lease := base.NewLease(leaseExpirationTime)
deadline := p.computeDeadline(msg)
p.starting <- &workerInfo{msg, time.Now(), deadline, lease}
go func() { go func() {
defer func() { defer func() {
p.finished <- msg p.finished <- msg
<-p.sema // release token <-p.sema // release token
}() }()
ctx, cancel := asynqcontext.New(msg, deadline) ctx, cancel := asynqcontext.New(p.baseCtxFn(), msg, deadline)
p.cancelations.Add(msg.ID, cancel) p.cancelations.Add(msg.ID, cancel)
defer func() { defer func() {
cancel() cancel()
@@ -201,7 +210,7 @@ func (p *processor) exec() {
select { select {
case <-ctx.Done(): case <-ctx.Done():
// already canceled (e.g. deadline exceeded). // already canceled (e.g. deadline exceeded).
p.handleFailedMessage(ctx, msg, ctx.Err()) p.handleFailedMessage(ctx, lease, msg, ctx.Err())
return return
default: default:
} }
@@ -225,24 +234,33 @@ func (p *processor) exec() {
case <-p.abort: case <-p.abort:
// time is up, push the message back to queue and quit this worker goroutine. // time is up, push the message back to queue and quit this worker goroutine.
p.logger.Warnf("Quitting worker. task id=%s", msg.ID) p.logger.Warnf("Quitting worker. task id=%s", msg.ID)
p.requeue(msg) p.requeue(lease, msg)
return
case <-lease.Done():
cancel()
p.handleFailedMessage(ctx, lease, msg, ErrLeaseExpired)
return return
case <-ctx.Done(): case <-ctx.Done():
p.handleFailedMessage(ctx, msg, ctx.Err()) p.handleFailedMessage(ctx, lease, msg, ctx.Err())
return return
case resErr := <-resCh: case resErr := <-resCh:
if resErr != nil { if resErr != nil {
p.handleFailedMessage(ctx, msg, resErr) p.handleFailedMessage(ctx, lease, msg, resErr)
return return
} }
p.handleSucceededMessage(ctx, msg) p.handleSucceededMessage(lease, msg)
} }
}() }()
} }
} }
func (p *processor) requeue(msg *base.TaskMessage) { func (p *processor) requeue(l *base.Lease, msg *base.TaskMessage) {
err := p.broker.Requeue(msg) if !l.IsValid() {
// If lease is not valid, do not write to redis; Let recoverer take care of it.
return
}
ctx, _ := context.WithDeadline(context.Background(), l.Deadline())
err := p.broker.Requeue(ctx, msg)
if err != nil { if err != nil {
p.logger.Errorf("Could not push task id=%s back to queue: %v", msg.ID, err) p.logger.Errorf("Could not push task id=%s back to queue: %v", msg.ID, err)
} else { } else {
@@ -250,49 +268,51 @@ func (p *processor) requeue(msg *base.TaskMessage) {
} }
} }
func (p *processor) handleSucceededMessage(ctx context.Context, msg *base.TaskMessage) { func (p *processor) handleSucceededMessage(l *base.Lease, msg *base.TaskMessage) {
if msg.Retention > 0 { if msg.Retention > 0 {
p.markAsComplete(ctx, msg) p.markAsComplete(l, msg)
} else { } else {
p.markAsDone(ctx, msg) p.markAsDone(l, msg)
} }
} }
func (p *processor) markAsComplete(ctx context.Context, msg *base.TaskMessage) { func (p *processor) markAsComplete(l *base.Lease, msg *base.TaskMessage) {
err := p.broker.MarkAsComplete(msg) if !l.IsValid() {
// If lease is not valid, do not write to redis; Let recoverer take care of it.
return
}
ctx, _ := context.WithDeadline(context.Background(), l.Deadline())
err := p.broker.MarkAsComplete(ctx, msg)
if err != nil { if err != nil {
errMsg := fmt.Sprintf("Could not move task id=%s type=%q from %q to %q: %+v", errMsg := fmt.Sprintf("Could not move task id=%s type=%q from %q to %q: %+v",
msg.ID, msg.Type, base.ActiveKey(msg.Queue), base.CompletedKey(msg.Queue), err) msg.ID, msg.Type, base.ActiveKey(msg.Queue), base.CompletedKey(msg.Queue), err)
deadline, ok := ctx.Deadline()
if !ok {
panic("asynq: internal error: missing deadline in context")
}
p.logger.Warnf("%s; Will retry syncing", errMsg) p.logger.Warnf("%s; Will retry syncing", errMsg)
p.syncRequestCh <- &syncRequest{ p.syncRequestCh <- &syncRequest{
fn: func() error { fn: func() error {
return p.broker.MarkAsComplete(msg) return p.broker.MarkAsComplete(ctx, msg)
}, },
errMsg: errMsg, errMsg: errMsg,
deadline: deadline, deadline: l.Deadline(),
} }
} }
} }
func (p *processor) markAsDone(ctx context.Context, msg *base.TaskMessage) { func (p *processor) markAsDone(l *base.Lease, msg *base.TaskMessage) {
err := p.broker.Done(msg) if !l.IsValid() {
// If lease is not valid, do not write to redis; Let recoverer take care of it.
return
}
ctx, _ := context.WithDeadline(context.Background(), l.Deadline())
err := p.broker.Done(ctx, msg)
if err != nil { if err != nil {
errMsg := fmt.Sprintf("Could not remove task id=%s type=%q from %q err: %+v", msg.ID, msg.Type, base.ActiveKey(msg.Queue), err) errMsg := fmt.Sprintf("Could not remove task id=%s type=%q from %q err: %+v", msg.ID, msg.Type, base.ActiveKey(msg.Queue), err)
deadline, ok := ctx.Deadline()
if !ok {
panic("asynq: internal error: missing deadline in context")
}
p.logger.Warnf("%s; Will retry syncing", errMsg) p.logger.Warnf("%s; Will retry syncing", errMsg)
p.syncRequestCh <- &syncRequest{ p.syncRequestCh <- &syncRequest{
fn: func() error { fn: func() error {
return p.broker.Done(msg) return p.broker.Done(ctx, msg)
}, },
errMsg: errMsg, errMsg: errMsg,
deadline: deadline, deadline: l.Deadline(),
} }
} }
} }
@@ -301,59 +321,61 @@ func (p *processor) markAsDone(ctx context.Context, msg *base.TaskMessage) {
// the task should not be retried and should be archived instead. // the task should not be retried and should be archived instead.
var SkipRetry = errors.New("skip retry for the task") var SkipRetry = errors.New("skip retry for the task")
func (p *processor) handleFailedMessage(ctx context.Context, msg *base.TaskMessage, err error) { func (p *processor) handleFailedMessage(ctx context.Context, l *base.Lease, msg *base.TaskMessage, err error) {
if p.errHandler != nil { if p.errHandler != nil {
p.errHandler.HandleError(ctx, NewTask(msg.Type, msg.Payload), err) p.errHandler.HandleError(ctx, NewTask(msg.Type, msg.Payload), err)
} }
if !p.isFailureFunc(err) { if !p.isFailureFunc(err) {
// retry the task without marking it as failed // retry the task without marking it as failed
p.retry(ctx, msg, err, false /*isFailure*/) p.retry(l, msg, err, false /*isFailure*/)
return return
} }
if msg.Retried >= msg.Retry || errors.Is(err, SkipRetry) { if msg.Retried >= msg.Retry || errors.Is(err, SkipRetry) {
p.logger.Warnf("Retry exhausted for task id=%s", msg.ID) p.logger.Warnf("Retry exhausted for task id=%s", msg.ID)
p.archive(ctx, msg, err) p.archive(l, msg, err)
} else { } else {
p.retry(ctx, msg, err, true /*isFailure*/) p.retry(l, msg, err, true /*isFailure*/)
} }
} }
func (p *processor) retry(ctx context.Context, msg *base.TaskMessage, e error, isFailure bool) { func (p *processor) retry(l *base.Lease, msg *base.TaskMessage, e error, isFailure bool) {
if !l.IsValid() {
// If lease is not valid, do not write to redis; Let recoverer take care of it.
return
}
ctx, _ := context.WithDeadline(context.Background(), l.Deadline())
d := p.retryDelayFunc(msg.Retried, e, NewTask(msg.Type, msg.Payload)) d := p.retryDelayFunc(msg.Retried, e, NewTask(msg.Type, msg.Payload))
retryAt := time.Now().Add(d) retryAt := time.Now().Add(d)
err := p.broker.Retry(msg, retryAt, e.Error(), isFailure) err := p.broker.Retry(ctx, msg, retryAt, e.Error(), isFailure)
if err != nil { if err != nil {
errMsg := fmt.Sprintf("Could not move task id=%s from %q to %q", msg.ID, base.ActiveKey(msg.Queue), base.RetryKey(msg.Queue)) errMsg := fmt.Sprintf("Could not move task id=%s from %q to %q", msg.ID, base.ActiveKey(msg.Queue), base.RetryKey(msg.Queue))
deadline, ok := ctx.Deadline()
if !ok {
panic("asynq: internal error: missing deadline in context")
}
p.logger.Warnf("%s; Will retry syncing", errMsg) p.logger.Warnf("%s; Will retry syncing", errMsg)
p.syncRequestCh <- &syncRequest{ p.syncRequestCh <- &syncRequest{
fn: func() error { fn: func() error {
return p.broker.Retry(msg, retryAt, e.Error(), isFailure) return p.broker.Retry(ctx, msg, retryAt, e.Error(), isFailure)
}, },
errMsg: errMsg, errMsg: errMsg,
deadline: deadline, deadline: l.Deadline(),
} }
} }
} }
func (p *processor) archive(ctx context.Context, msg *base.TaskMessage, e error) { func (p *processor) archive(l *base.Lease, msg *base.TaskMessage, e error) {
err := p.broker.Archive(msg, e.Error()) if !l.IsValid() {
// If lease is not valid, do not write to redis; Let recoverer take care of it.
return
}
ctx, _ := context.WithDeadline(context.Background(), l.Deadline())
err := p.broker.Archive(ctx, msg, e.Error())
if err != nil { if err != nil {
errMsg := fmt.Sprintf("Could not move task id=%s from %q to %q", msg.ID, base.ActiveKey(msg.Queue), base.ArchivedKey(msg.Queue)) errMsg := fmt.Sprintf("Could not move task id=%s from %q to %q", msg.ID, base.ActiveKey(msg.Queue), base.ArchivedKey(msg.Queue))
deadline, ok := ctx.Deadline()
if !ok {
panic("asynq: internal error: missing deadline in context")
}
p.logger.Warnf("%s; Will retry syncing", errMsg) p.logger.Warnf("%s; Will retry syncing", errMsg)
p.syncRequestCh <- &syncRequest{ p.syncRequestCh <- &syncRequest{
fn: func() error { fn: func() error {
return p.broker.Archive(msg, e.Error()) return p.broker.Archive(ctx, msg, e.Error())
}, },
errMsg: errMsg, errMsg: errMsg,
deadline: deadline, deadline: l.Deadline(),
} }
} }
} }
@@ -483,3 +505,19 @@ func gcd(xs ...int) int {
} }
return res return res
} }
// computeDeadline returns the given task's deadline,
func (p *processor) computeDeadline(msg *base.TaskMessage) time.Time {
if msg.Timeout == 0 && msg.Deadline == 0 {
p.logger.Errorf("asynq: internal error: both timeout and deadline are not set for the task message: %s", msg.ID)
return p.clock.Now().Add(defaultTimeout)
}
if msg.Timeout != 0 && msg.Deadline != 0 {
deadlineUnix := math.Min(float64(p.clock.Now().Unix()+msg.Timeout), float64(msg.Deadline))
return time.Unix(int64(deadlineUnix), 0)
}
if msg.Timeout != 0 {
return p.clock.Now().Add(time.Duration(msg.Timeout) * time.Second)
}
return time.Unix(msg.Deadline, 0)
}

View File

@@ -17,7 +17,10 @@ import (
"github.com/google/go-cmp/cmp/cmpopts" "github.com/google/go-cmp/cmp/cmpopts"
h "github.com/hibiken/asynq/internal/asynqtest" h "github.com/hibiken/asynq/internal/asynqtest"
"github.com/hibiken/asynq/internal/base" "github.com/hibiken/asynq/internal/base"
"github.com/hibiken/asynq/internal/errors"
"github.com/hibiken/asynq/internal/log"
"github.com/hibiken/asynq/internal/rdb" "github.com/hibiken/asynq/internal/rdb"
"github.com/hibiken/asynq/internal/timeutil"
) )
var taskCmpOpts = []cmp.Option{ var taskCmpOpts = []cmp.Option{
@@ -61,6 +64,7 @@ func newProcessorForTest(t *testing.T, r *rdb.RDB, h Handler) *processor {
p := newProcessor(processorParams{ p := newProcessor(processorParams{
logger: testLogger, logger: testLogger,
broker: r, broker: r,
baseCtxFn: context.Background,
retryDelayFunc: DefaultRetryDelayFunc, retryDelayFunc: DefaultRetryDelayFunc,
isFailureFunc: defaultIsFailureFunc, isFailureFunc: defaultIsFailureFunc,
syncCh: syncCh, syncCh: syncCh,
@@ -126,7 +130,7 @@ func TestProcessorSuccessWithSingleQueue(t *testing.T) {
p.start(&sync.WaitGroup{}) p.start(&sync.WaitGroup{})
for _, msg := range tc.incoming { for _, msg := range tc.incoming {
err := rdbClient.Enqueue(msg) err := rdbClient.Enqueue(context.Background(), msg)
if err != nil { if err != nil {
p.shutdown() p.shutdown()
t.Fatal(err) t.Fatal(err)
@@ -480,6 +484,104 @@ func TestProcessorMarkAsComplete(t *testing.T) {
} }
} }
// Test a scenario where the worker server cannot communicate with redis due to a network failure
// and the lease expires
func TestProcessorWithExpiredLease(t *testing.T) {
r := setup(t)
defer r.Close()
rdbClient := rdb.NewRDB(r)
m1 := h.NewTaskMessage("task1", nil)
tests := []struct {
pending []*base.TaskMessage
handler Handler
wantErrCount int
}{
{
pending: []*base.TaskMessage{m1},
handler: HandlerFunc(func(ctx context.Context, task *Task) error {
// make sure the task processing time exceeds lease duration
// to test expired lease.
time.Sleep(rdb.LeaseDuration + 10*time.Second)
return nil
}),
wantErrCount: 1, // ErrorHandler should still be called with ErrLeaseExpired
},
}
for _, tc := range tests {
h.FlushDB(t, r)
h.SeedPendingQueue(t, r, tc.pending, base.DefaultQueueName)
starting := make(chan *workerInfo)
finished := make(chan *base.TaskMessage)
syncCh := make(chan *syncRequest)
done := make(chan struct{})
t.Cleanup(func() { close(done) })
// fake heartbeater which notifies lease expiration
go func() {
for {
select {
case w := <-starting:
// simulate expiration by resetting to some time in the past
w.lease.Reset(time.Now().Add(-5 * time.Second))
if !w.lease.NotifyExpiration() {
panic("Failed to notifiy lease expiration")
}
case <-finished:
// do nothing
case <-done:
return
}
}
}()
go fakeSyncer(syncCh, done)
p := newProcessor(processorParams{
logger: testLogger,
broker: rdbClient,
baseCtxFn: context.Background,
retryDelayFunc: DefaultRetryDelayFunc,
isFailureFunc: defaultIsFailureFunc,
syncCh: syncCh,
cancelations: base.NewCancelations(),
concurrency: 10,
queues: defaultQueueConfig,
strictPriority: false,
errHandler: nil,
shutdownTimeout: defaultShutdownTimeout,
starting: starting,
finished: finished,
})
p.handler = tc.handler
var (
mu sync.Mutex // guards n and errs
n int // number of times error handler is called
errs []error // error passed to error handler
)
p.errHandler = ErrorHandlerFunc(func(ctx context.Context, t *Task, err error) {
mu.Lock()
defer mu.Unlock()
n++
errs = append(errs, err)
})
p.start(&sync.WaitGroup{})
time.Sleep(4 * time.Second)
p.shutdown()
if n != tc.wantErrCount {
t.Errorf("Unexpected number of error count: got %d, want %d", n, tc.wantErrCount)
continue
}
for i := 0; i < tc.wantErrCount; i++ {
if !errors.Is(errs[i], ErrLeaseExpired) {
t.Errorf("Unexpected error was passed to ErrorHandler: got %v want %v", errs[i], ErrLeaseExpired)
}
}
}
}
func TestProcessorQueues(t *testing.T) { func TestProcessorQueues(t *testing.T) {
sortOpt := cmp.Transformer("SortStrings", func(in []string) []string { sortOpt := cmp.Transformer("SortStrings", func(in []string) []string {
out := append([]string(nil), in...) // Copy input to avoid mutating it out := append([]string(nil), in...) // Copy input to avoid mutating it
@@ -592,6 +694,7 @@ func TestProcessorWithStrictPriority(t *testing.T) {
p := newProcessor(processorParams{ p := newProcessor(processorParams{
logger: testLogger, logger: testLogger,
broker: rdbClient, broker: rdbClient,
baseCtxFn: context.Background,
retryDelayFunc: DefaultRetryDelayFunc, retryDelayFunc: DefaultRetryDelayFunc,
isFailureFunc: defaultIsFailureFunc, isFailureFunc: defaultIsFailureFunc,
syncCh: syncCh, syncCh: syncCh,
@@ -752,3 +855,69 @@ func TestNormalizeQueues(t *testing.T) {
} }
} }
} }
func TestProcessorComputeDeadline(t *testing.T) {
now := time.Now()
p := processor{
logger: log.NewLogger(nil),
clock: timeutil.NewSimulatedClock(now),
}
tests := []struct {
desc string
msg *base.TaskMessage
want time.Time
}{
{
desc: "message with only timeout specified",
msg: &base.TaskMessage{
Timeout: int64((30 * time.Minute).Seconds()),
},
want: now.Add(30 * time.Minute),
},
{
desc: "message with only deadline specified",
msg: &base.TaskMessage{
Deadline: now.Add(24 * time.Hour).Unix(),
},
want: now.Add(24 * time.Hour),
},
{
desc: "message with both timeout and deadline set (now+timeout < deadline)",
msg: &base.TaskMessage{
Deadline: now.Add(24 * time.Hour).Unix(),
Timeout: int64((30 * time.Minute).Seconds()),
},
want: now.Add(30 * time.Minute),
},
{
desc: "message with both timeout and deadline set (now+timeout > deadline)",
msg: &base.TaskMessage{
Deadline: now.Add(10 * time.Minute).Unix(),
Timeout: int64((30 * time.Minute).Seconds()),
},
want: now.Add(10 * time.Minute),
},
{
desc: "message with both timeout and deadline set (now+timeout == deadline)",
msg: &base.TaskMessage{
Deadline: now.Add(30 * time.Minute).Unix(),
Timeout: int64((30 * time.Minute).Seconds()),
},
want: now.Add(30 * time.Minute),
},
{
desc: "message without timeout and deadline",
msg: &base.TaskMessage{},
want: now.Add(defaultTimeout),
},
}
for _, tc := range tests {
got := p.computeDeadline(tc.msg)
// Compare the Unix epoch with seconds granularity
if got.Unix() != tc.want.Unix() {
t.Errorf("%s: got=%v, want=%v", tc.desc, got.Unix(), tc.want.Unix())
}
}
}

View File

@@ -10,6 +10,7 @@ import (
"time" "time"
"github.com/hibiken/asynq/internal/base" "github.com/hibiken/asynq/internal/base"
"github.com/hibiken/asynq/internal/errors"
"github.com/hibiken/asynq/internal/log" "github.com/hibiken/asynq/internal/log"
) )
@@ -76,19 +77,23 @@ func (r *recoverer) start(wg *sync.WaitGroup) {
}() }()
} }
// ErrLeaseExpired error indicates that the task failed because the worker working on the task
// could not extend its lease due to missing heartbeats. The worker may have crashed or got cutoff from the network.
var ErrLeaseExpired = errors.New("asynq: task lease expired")
func (r *recoverer) recover() { func (r *recoverer) recover() {
// Get all tasks which have expired 30 seconds ago or earlier. // Get all tasks which have expired 30 seconds ago or earlier to accomodate certain amount of clock skew.
deadline := time.Now().Add(-30 * time.Second) cutoff := time.Now().Add(-30 * time.Second)
msgs, err := r.broker.ListDeadlineExceeded(deadline, r.queues...) msgs, err := r.broker.ListLeaseExpired(cutoff, r.queues...)
if err != nil { if err != nil {
r.logger.Warn("recoverer: could not list deadline exceeded tasks") r.logger.Warn("recoverer: could not list lease expired tasks")
return return
} }
for _, msg := range msgs { for _, msg := range msgs {
if msg.Retried >= msg.Retry { if msg.Retried >= msg.Retry {
r.archive(msg, context.DeadlineExceeded) r.archive(msg, ErrLeaseExpired)
} else { } else {
r.retry(msg, context.DeadlineExceeded) r.retry(msg, ErrLeaseExpired)
} }
} }
} }
@@ -96,13 +101,13 @@ func (r *recoverer) recover() {
func (r *recoverer) retry(msg *base.TaskMessage, err error) { func (r *recoverer) retry(msg *base.TaskMessage, err error) {
delay := r.retryDelayFunc(msg.Retried, err, NewTask(msg.Type, msg.Payload)) delay := r.retryDelayFunc(msg.Retried, err, NewTask(msg.Type, msg.Payload))
retryAt := time.Now().Add(delay) retryAt := time.Now().Add(delay)
if err := r.broker.Retry(msg, retryAt, err.Error(), r.isFailureFunc(err)); err != nil { if err := r.broker.Retry(context.Background(), msg, retryAt, err.Error(), r.isFailureFunc(err)); err != nil {
r.logger.Warnf("recoverer: could not retry deadline exceeded task: %v", err) r.logger.Warnf("recoverer: could not retry lease expired task: %v", err)
} }
} }
func (r *recoverer) archive(msg *base.TaskMessage, err error) { func (r *recoverer) archive(msg *base.TaskMessage, err error) {
if err := r.broker.Archive(msg, err.Error()); err != nil { if err := r.broker.Archive(context.Background(), msg, err.Error()); err != nil {
r.logger.Warnf("recoverer: could not move task to archive: %v", err) r.logger.Warnf("recoverer: could not move task to archive: %v", err)
} }
} }

View File

@@ -27,29 +27,25 @@ func TestRecoverer(t *testing.T) {
t4.Retried = t4.Retry // t4 has reached its max retry count t4.Retried = t4.Retry // t4 has reached its max retry count
now := time.Now() now := time.Now()
oneHourFromNow := now.Add(1 * time.Hour)
fiveMinutesFromNow := now.Add(5 * time.Minute)
fiveMinutesAgo := now.Add(-5 * time.Minute)
oneHourAgo := now.Add(-1 * time.Hour)
tests := []struct { tests := []struct {
desc string desc string
inProgress map[string][]*base.TaskMessage active map[string][]*base.TaskMessage
deadlines map[string][]base.Z lease map[string][]base.Z
retry map[string][]base.Z retry map[string][]base.Z
archived map[string][]base.Z archived map[string][]base.Z
wantActive map[string][]*base.TaskMessage wantActive map[string][]*base.TaskMessage
wantDeadlines map[string][]base.Z wantLease map[string][]base.Z
wantRetry map[string][]*base.TaskMessage wantRetry map[string][]*base.TaskMessage
wantArchived map[string][]*base.TaskMessage wantArchived map[string][]*base.TaskMessage
}{ }{
{ {
desc: "with one active task", desc: "with one active task",
inProgress: map[string][]*base.TaskMessage{ active: map[string][]*base.TaskMessage{
"default": {t1}, "default": {t1},
}, },
deadlines: map[string][]base.Z{ lease: map[string][]base.Z{
"default": {{Message: t1, Score: fiveMinutesAgo.Unix()}}, "default": {{Message: t1, Score: now.Add(-1 * time.Minute).Unix()}},
}, },
retry: map[string][]base.Z{ retry: map[string][]base.Z{
"default": {}, "default": {},
@@ -60,7 +56,7 @@ func TestRecoverer(t *testing.T) {
wantActive: map[string][]*base.TaskMessage{ wantActive: map[string][]*base.TaskMessage{
"default": {}, "default": {},
}, },
wantDeadlines: map[string][]base.Z{ wantLease: map[string][]base.Z{
"default": {}, "default": {},
}, },
wantRetry: map[string][]*base.TaskMessage{ wantRetry: map[string][]*base.TaskMessage{
@@ -72,12 +68,12 @@ func TestRecoverer(t *testing.T) {
}, },
{ {
desc: "with a task with max-retry reached", desc: "with a task with max-retry reached",
inProgress: map[string][]*base.TaskMessage{ active: map[string][]*base.TaskMessage{
"default": {t4}, "default": {t4},
"critical": {}, "critical": {},
}, },
deadlines: map[string][]base.Z{ lease: map[string][]base.Z{
"default": {{Message: t4, Score: fiveMinutesAgo.Unix()}}, "default": {{Message: t4, Score: now.Add(-40 * time.Second).Unix()}},
"critical": {}, "critical": {},
}, },
retry: map[string][]base.Z{ retry: map[string][]base.Z{
@@ -92,7 +88,7 @@ func TestRecoverer(t *testing.T) {
"default": {}, "default": {},
"critical": {}, "critical": {},
}, },
wantDeadlines: map[string][]base.Z{ wantLease: map[string][]base.Z{
"default": {}, "default": {},
"critical": {}, "critical": {},
}, },
@@ -107,17 +103,17 @@ func TestRecoverer(t *testing.T) {
}, },
{ {
desc: "with multiple active tasks, and one expired", desc: "with multiple active tasks, and one expired",
inProgress: map[string][]*base.TaskMessage{ active: map[string][]*base.TaskMessage{
"default": {t1, t2}, "default": {t1, t2},
"critical": {t3}, "critical": {t3},
}, },
deadlines: map[string][]base.Z{ lease: map[string][]base.Z{
"default": { "default": {
{Message: t1, Score: oneHourAgo.Unix()}, {Message: t1, Score: now.Add(-2 * time.Minute).Unix()},
{Message: t2, Score: fiveMinutesFromNow.Unix()}, {Message: t2, Score: now.Add(20 * time.Second).Unix()},
}, },
"critical": { "critical": {
{Message: t3, Score: oneHourFromNow.Unix()}, {Message: t3, Score: now.Add(20 * time.Second).Unix()},
}, },
}, },
retry: map[string][]base.Z{ retry: map[string][]base.Z{
@@ -132,9 +128,9 @@ func TestRecoverer(t *testing.T) {
"default": {t2}, "default": {t2},
"critical": {t3}, "critical": {t3},
}, },
wantDeadlines: map[string][]base.Z{ wantLease: map[string][]base.Z{
"default": {{Message: t2, Score: fiveMinutesFromNow.Unix()}}, "default": {{Message: t2, Score: now.Add(20 * time.Second).Unix()}},
"critical": {{Message: t3, Score: oneHourFromNow.Unix()}}, "critical": {{Message: t3, Score: now.Add(20 * time.Second).Unix()}},
}, },
wantRetry: map[string][]*base.TaskMessage{ wantRetry: map[string][]*base.TaskMessage{
"default": {t1}, "default": {t1},
@@ -147,17 +143,17 @@ func TestRecoverer(t *testing.T) {
}, },
{ {
desc: "with multiple expired active tasks", desc: "with multiple expired active tasks",
inProgress: map[string][]*base.TaskMessage{ active: map[string][]*base.TaskMessage{
"default": {t1, t2}, "default": {t1, t2},
"critical": {t3}, "critical": {t3},
}, },
deadlines: map[string][]base.Z{ lease: map[string][]base.Z{
"default": { "default": {
{Message: t1, Score: oneHourAgo.Unix()}, {Message: t1, Score: now.Add(-1 * time.Minute).Unix()},
{Message: t2, Score: oneHourFromNow.Unix()}, {Message: t2, Score: now.Add(10 * time.Second).Unix()},
}, },
"critical": { "critical": {
{Message: t3, Score: fiveMinutesAgo.Unix()}, {Message: t3, Score: now.Add(-1 * time.Minute).Unix()},
}, },
}, },
retry: map[string][]base.Z{ retry: map[string][]base.Z{
@@ -172,8 +168,8 @@ func TestRecoverer(t *testing.T) {
"default": {t2}, "default": {t2},
"critical": {}, "critical": {},
}, },
wantDeadlines: map[string][]base.Z{ wantLease: map[string][]base.Z{
"default": {{Message: t2, Score: oneHourFromNow.Unix()}}, "default": {{Message: t2, Score: now.Add(10 * time.Second).Unix()}},
}, },
wantRetry: map[string][]*base.TaskMessage{ wantRetry: map[string][]*base.TaskMessage{
"default": {t1}, "default": {t1},
@@ -186,11 +182,11 @@ func TestRecoverer(t *testing.T) {
}, },
{ {
desc: "with empty active queue", desc: "with empty active queue",
inProgress: map[string][]*base.TaskMessage{ active: map[string][]*base.TaskMessage{
"default": {}, "default": {},
"critical": {}, "critical": {},
}, },
deadlines: map[string][]base.Z{ lease: map[string][]base.Z{
"default": {}, "default": {},
"critical": {}, "critical": {},
}, },
@@ -206,7 +202,7 @@ func TestRecoverer(t *testing.T) {
"default": {}, "default": {},
"critical": {}, "critical": {},
}, },
wantDeadlines: map[string][]base.Z{ wantLease: map[string][]base.Z{
"default": {}, "default": {},
"critical": {}, "critical": {},
}, },
@@ -223,8 +219,8 @@ func TestRecoverer(t *testing.T) {
for _, tc := range tests { for _, tc := range tests {
h.FlushDB(t, r) h.FlushDB(t, r)
h.SeedAllActiveQueues(t, r, tc.inProgress) h.SeedAllActiveQueues(t, r, tc.active)
h.SeedAllDeadlines(t, r, tc.deadlines) h.SeedAllLease(t, r, tc.lease)
h.SeedAllRetryQueues(t, r, tc.retry) h.SeedAllRetryQueues(t, r, tc.retry)
h.SeedAllArchivedQueues(t, r, tc.archived) h.SeedAllArchivedQueues(t, r, tc.archived)
@@ -249,10 +245,10 @@ func TestRecoverer(t *testing.T) {
t.Errorf("%s; mismatch found in %q; (-want,+got)\n%s", tc.desc, base.ActiveKey(qname), diff) t.Errorf("%s; mismatch found in %q; (-want,+got)\n%s", tc.desc, base.ActiveKey(qname), diff)
} }
} }
for qname, want := range tc.wantDeadlines { for qname, want := range tc.wantLease {
gotDeadlines := h.GetDeadlinesEntries(t, r, qname) gotLease := h.GetLeaseEntries(t, r, qname)
if diff := cmp.Diff(want, gotDeadlines, h.SortZSetEntryOpt); diff != "" { if diff := cmp.Diff(want, gotLease, h.SortZSetEntryOpt); diff != "" {
t.Errorf("%s; mismatch found in %q; (-want,+got)\n%s", tc.desc, base.DeadlinesKey(qname), diff) t.Errorf("%s; mismatch found in %q; (-want,+got)\n%s", tc.desc, base.LeaseKey(qname), diff)
} }
} }
cmpOpt := h.EquateInt64Approx(2) // allow up to two-second difference in `LastFailedAt` cmpOpt := h.EquateInt64Approx(2) // allow up to two-second difference in `LastFailedAt`
@@ -260,7 +256,7 @@ func TestRecoverer(t *testing.T) {
gotRetry := h.GetRetryMessages(t, r, qname) gotRetry := h.GetRetryMessages(t, r, qname)
var wantRetry []*base.TaskMessage // Note: construct message here since `LastFailedAt` is relative to each test run var wantRetry []*base.TaskMessage // Note: construct message here since `LastFailedAt` is relative to each test run
for _, msg := range msgs { for _, msg := range msgs {
wantRetry = append(wantRetry, h.TaskMessageAfterRetry(*msg, "context deadline exceeded", runTime)) wantRetry = append(wantRetry, h.TaskMessageAfterRetry(*msg, ErrLeaseExpired.Error(), runTime))
} }
if diff := cmp.Diff(wantRetry, gotRetry, h.SortMsgOpt, cmpOpt); diff != "" { if diff := cmp.Diff(wantRetry, gotRetry, h.SortMsgOpt, cmpOpt); diff != "" {
t.Errorf("%s; mismatch found in %q: (-want, +got)\n%s", tc.desc, base.RetryKey(qname), diff) t.Errorf("%s; mismatch found in %q: (-want, +got)\n%s", tc.desc, base.RetryKey(qname), diff)
@@ -270,7 +266,7 @@ func TestRecoverer(t *testing.T) {
gotArchived := h.GetArchivedMessages(t, r, qname) gotArchived := h.GetArchivedMessages(t, r, qname)
var wantArchived []*base.TaskMessage var wantArchived []*base.TaskMessage
for _, msg := range msgs { for _, msg := range msgs {
wantArchived = append(wantArchived, h.TaskMessageWithError(*msg, "context deadline exceeded", runTime)) wantArchived = append(wantArchived, h.TaskMessageWithError(*msg, ErrLeaseExpired.Error(), runTime))
} }
if diff := cmp.Diff(wantArchived, gotArchived, h.SortMsgOpt, cmpOpt); diff != "" { if diff := cmp.Diff(wantArchived, gotArchived, h.SortMsgOpt, cmpOpt); diff != "" {
t.Errorf("%s; mismatch found in %q: (-want, +got)\n%s", tc.desc, base.ArchivedKey(qname), diff) t.Errorf("%s; mismatch found in %q: (-want, +got)\n%s", tc.desc, base.ArchivedKey(qname), diff)

View File

@@ -23,7 +23,9 @@ import (
// Schedulers are safe for concurrent use by multiple goroutines. // Schedulers are safe for concurrent use by multiple goroutines.
type Scheduler struct { type Scheduler struct {
id string id string
state *base.ServerState
state *serverState
logger *log.Logger logger *log.Logger
client *Client client *Client
rdb *rdb.RDB rdb *rdb.RDB
@@ -66,7 +68,7 @@ func NewScheduler(r RedisConnOpt, opts *SchedulerOpts) *Scheduler {
return &Scheduler{ return &Scheduler{
id: generateSchedulerID(), id: generateSchedulerID(),
state: base.NewServerState(), state: &serverState{value: srvStateNew},
logger: logger, logger: logger,
client: NewClient(r), client: NewClient(r),
rdb: rdb.NewRDB(c), rdb: rdb.NewRDB(c),
@@ -193,23 +195,43 @@ func (s *Scheduler) Run() error {
// Start starts the scheduler. // Start starts the scheduler.
// It returns an error if the scheduler is already running or has been shutdown. // It returns an error if the scheduler is already running or has been shutdown.
func (s *Scheduler) Start() error { func (s *Scheduler) Start() error {
switch s.state.Get() { if err := s.start(); err != nil {
case base.StateActive: return err
return fmt.Errorf("asynq: the scheduler is already running")
case base.StateClosed:
return fmt.Errorf("asynq: the scheduler has already been stopped")
} }
s.logger.Info("Scheduler starting") s.logger.Info("Scheduler starting")
s.logger.Infof("Scheduler timezone is set to %v", s.location) s.logger.Infof("Scheduler timezone is set to %v", s.location)
s.cron.Start() s.cron.Start()
s.wg.Add(1) s.wg.Add(1)
go s.runHeartbeater() go s.runHeartbeater()
s.state.Set(base.StateActive) return nil
}
// Checks server state and returns an error if pre-condition is not met.
// Otherwise it sets the server state to active.
func (s *Scheduler) start() error {
s.state.mu.Lock()
defer s.state.mu.Unlock()
switch s.state.value {
case srvStateActive:
return fmt.Errorf("asynq: the scheduler is already running")
case srvStateClosed:
return fmt.Errorf("asynq: the scheduler has already been stopped")
}
s.state.value = srvStateActive
return nil return nil
} }
// Shutdown stops and shuts down the scheduler. // Shutdown stops and shuts down the scheduler.
func (s *Scheduler) Shutdown() { func (s *Scheduler) Shutdown() {
s.state.mu.Lock()
if s.state.value == srvStateNew || s.state.value == srvStateClosed {
// scheduler is not running, do nothing and return.
s.state.mu.Unlock()
return
}
s.state.value = srvStateClosed
s.state.mu.Unlock()
s.logger.Info("Scheduler shutting down") s.logger.Info("Scheduler shutting down")
close(s.done) // signal heartbeater to stop close(s.done) // signal heartbeater to stop
ctx := s.cron.Stop() ctx := s.cron.Stop()
@@ -219,7 +241,6 @@ func (s *Scheduler) Shutdown() {
s.clearHistory() s.clearHistory()
s.client.Close() s.client.Close()
s.rdb.Close() s.rdb.Close()
s.state.Set(base.StateClosed)
s.logger.Info("Scheduler stopped") s.logger.Info("Scheduler stopped")
} }

121
server.go
View File

@@ -38,7 +38,7 @@ type Server struct {
broker base.Broker broker base.Broker
state *base.ServerState state *serverState
// wait group to wait for all goroutines to finish. // wait group to wait for all goroutines to finish.
wg sync.WaitGroup wg sync.WaitGroup
@@ -52,14 +52,57 @@ type Server struct {
janitor *janitor janitor *janitor
} }
type serverState struct {
mu sync.Mutex
value serverStateValue
}
type serverStateValue int
const (
// StateNew represents a new server. Server begins in
// this state and then transition to StatusActive when
// Start or Run is callled.
srvStateNew serverStateValue = iota
// StateActive indicates the server is up and active.
srvStateActive
// StateStopped indicates the server is up but no longer processing new tasks.
srvStateStopped
// StateClosed indicates the server has been shutdown.
srvStateClosed
)
var serverStates = []string{
"new",
"active",
"stopped",
"closed",
}
func (s serverStateValue) String() string {
if srvStateNew <= s && s <= srvStateClosed {
return serverStates[s]
}
return "unknown status"
}
// Config specifies the server's background-task processing behavior. // Config specifies the server's background-task processing behavior.
type Config struct { type Config struct {
// Maximum number of concurrent processing of tasks. // Maximum number of concurrent processing of tasks.
// //
// If set to a zero or negative value, NewServer will overwrite the value // If set to a zero or negative value, NewServer will overwrite the value
// to the number of CPUs usable by the currennt process. // to the number of CPUs usable by the current process.
Concurrency int Concurrency int
// BaseContext optionally specifies a function that returns the base context for Handler invocations on this server.
//
// If BaseContext is nil, the default is context.Background().
// If this is defined, then it MUST return a non-nil context
BaseContext func() context.Context
// Function to calculate retry delay for a failed task. // Function to calculate retry delay for a failed task.
// //
// By default, it uses exponential backoff algorithm to calculate the delay. // By default, it uses exponential backoff algorithm to calculate the delay.
@@ -144,6 +187,12 @@ type Config struct {
// //
// If unset or zero, the interval is set to 15 seconds. // If unset or zero, the interval is set to 15 seconds.
HealthCheckInterval time.Duration HealthCheckInterval time.Duration
// DelayedTaskCheckInterval specifies the interval between checks run on 'scheduled' and 'retry'
// tasks, and forwarding them to 'pending' state if they are ready to be processed.
//
// If unset or zero, the interval is set to 5 seconds.
DelayedTaskCheckInterval time.Duration
} }
// An ErrorHandler handles an error occured during task processing. // An ErrorHandler handles an error occured during task processing.
@@ -287,6 +336,8 @@ const (
defaultShutdownTimeout = 8 * time.Second defaultShutdownTimeout = 8 * time.Second
defaultHealthCheckInterval = 15 * time.Second defaultHealthCheckInterval = 15 * time.Second
defaultDelayedTaskCheckInterval = 5 * time.Second
) )
// NewServer returns a new Server given a redis connection option // NewServer returns a new Server given a redis connection option
@@ -296,6 +347,10 @@ func NewServer(r RedisConnOpt, cfg Config) *Server {
if !ok { if !ok {
panic(fmt.Sprintf("asynq: unsupported RedisConnOpt type %T", r)) panic(fmt.Sprintf("asynq: unsupported RedisConnOpt type %T", r))
} }
baseCtxFn := cfg.BaseContext
if baseCtxFn == nil {
baseCtxFn = context.Background
}
n := cfg.Concurrency n := cfg.Concurrency
if n < 1 { if n < 1 {
n = runtime.NumCPU() n = runtime.NumCPU()
@@ -343,7 +398,7 @@ func NewServer(r RedisConnOpt, cfg Config) *Server {
starting := make(chan *workerInfo) starting := make(chan *workerInfo)
finished := make(chan *base.TaskMessage) finished := make(chan *base.TaskMessage)
syncCh := make(chan *syncRequest) syncCh := make(chan *syncRequest)
state := base.NewServerState() srvState := &serverState{value: srvStateNew}
cancels := base.NewCancelations() cancels := base.NewCancelations()
syncer := newSyncer(syncerParams{ syncer := newSyncer(syncerParams{
@@ -358,15 +413,19 @@ func NewServer(r RedisConnOpt, cfg Config) *Server {
concurrency: n, concurrency: n,
queues: queues, queues: queues,
strictPriority: cfg.StrictPriority, strictPriority: cfg.StrictPriority,
state: state, state: srvState,
starting: starting, starting: starting,
finished: finished, finished: finished,
}) })
delayedTaskCheckInterval := cfg.DelayedTaskCheckInterval
if delayedTaskCheckInterval == 0 {
delayedTaskCheckInterval = defaultDelayedTaskCheckInterval
}
forwarder := newForwarder(forwarderParams{ forwarder := newForwarder(forwarderParams{
logger: logger, logger: logger,
broker: rdb, broker: rdb,
queues: qnames, queues: qnames,
interval: 5 * time.Second, interval: delayedTaskCheckInterval,
}) })
subscriber := newSubscriber(subscriberParams{ subscriber := newSubscriber(subscriberParams{
logger: logger, logger: logger,
@@ -377,6 +436,7 @@ func NewServer(r RedisConnOpt, cfg Config) *Server {
logger: logger, logger: logger,
broker: rdb, broker: rdb,
retryDelayFunc: delayFunc, retryDelayFunc: delayFunc,
baseCtxFn: baseCtxFn,
isFailureFunc: isFailureFunc, isFailureFunc: isFailureFunc,
syncCh: syncCh, syncCh: syncCh,
cancelations: cancels, cancelations: cancels,
@@ -411,7 +471,7 @@ func NewServer(r RedisConnOpt, cfg Config) *Server {
return &Server{ return &Server{
logger: logger, logger: logger,
broker: rdb, broker: rdb,
state: state, state: srvState,
forwarder: forwarder, forwarder: forwarder,
processor: processor, processor: processor,
syncer: syncer, syncer: syncer,
@@ -481,17 +541,11 @@ func (srv *Server) Start(handler Handler) error {
if handler == nil { if handler == nil {
return fmt.Errorf("asynq: server cannot run with nil handler") return fmt.Errorf("asynq: server cannot run with nil handler")
} }
switch srv.state.Get() {
case base.StateActive:
return fmt.Errorf("asynq: the server is already running")
case base.StateStopped:
return fmt.Errorf("asynq: the server is in the stopped state. Waiting for shutdown.")
case base.StateClosed:
return ErrServerClosed
}
srv.state.Set(base.StateActive)
srv.processor.handler = handler srv.processor.handler = handler
if err := srv.start(); err != nil {
return err
}
srv.logger.Info("Starting processing") srv.logger.Info("Starting processing")
srv.heartbeater.start(&srv.wg) srv.heartbeater.start(&srv.wg)
@@ -505,16 +559,36 @@ func (srv *Server) Start(handler Handler) error {
return nil return nil
} }
// Checks server state and returns an error if pre-condition is not met.
// Otherwise it sets the server state to active.
func (srv *Server) start() error {
srv.state.mu.Lock()
defer srv.state.mu.Unlock()
switch srv.state.value {
case srvStateActive:
return fmt.Errorf("asynq: the server is already running")
case srvStateStopped:
return fmt.Errorf("asynq: the server is in the stopped state. Waiting for shutdown.")
case srvStateClosed:
return ErrServerClosed
}
srv.state.value = srvStateActive
return nil
}
// Shutdown gracefully shuts down the server. // Shutdown gracefully shuts down the server.
// It gracefully closes all active workers. The server will wait for // It gracefully closes all active workers. The server will wait for
// active workers to finish processing tasks for duration specified in Config.ShutdownTimeout. // active workers to finish processing tasks for duration specified in Config.ShutdownTimeout.
// If worker didn't finish processing a task during the timeout, the task will be pushed back to Redis. // If worker didn't finish processing a task during the timeout, the task will be pushed back to Redis.
func (srv *Server) Shutdown() { func (srv *Server) Shutdown() {
switch srv.state.Get() { srv.state.mu.Lock()
case base.StateNew, base.StateClosed: if srv.state.value == srvStateNew || srv.state.value == srvStateClosed {
srv.state.mu.Unlock()
// server is not running, do nothing and return. // server is not running, do nothing and return.
return return
} }
srv.state.value = srvStateClosed
srv.state.mu.Unlock()
srv.logger.Info("Starting graceful shutdown") srv.logger.Info("Starting graceful shutdown")
// Note: The order of shutdown is important. // Note: The order of shutdown is important.
@@ -529,12 +603,9 @@ func (srv *Server) Shutdown() {
srv.janitor.shutdown() srv.janitor.shutdown()
srv.healthchecker.shutdown() srv.healthchecker.shutdown()
srv.heartbeater.shutdown() srv.heartbeater.shutdown()
srv.wg.Wait() srv.wg.Wait()
srv.broker.Close() srv.broker.Close()
srv.state.Set(base.StateClosed)
srv.logger.Info("Exiting") srv.logger.Info("Exiting")
} }
@@ -544,8 +615,16 @@ func (srv *Server) Shutdown() {
// //
// Stop does not shutdown the server, make sure to call Shutdown before exit. // Stop does not shutdown the server, make sure to call Shutdown before exit.
func (srv *Server) Stop() { func (srv *Server) Stop() {
srv.state.mu.Lock()
if srv.state.value != srvStateActive {
// Invalid calll to Stop, server can only go from Active state to Stopped state.
srv.state.mu.Unlock()
return
}
srv.state.value = srvStateStopped
srv.state.mu.Unlock()
srv.logger.Info("Stopping processor") srv.logger.Info("Stopping processor")
srv.processor.stop() srv.processor.stop()
srv.state.Set(base.StateStopped)
srv.logger.Info("Processor stopped") srv.logger.Info("Processor stopped")
} }

View File

@@ -5,6 +5,7 @@
package asynq package asynq
import ( import (
"context"
"fmt" "fmt"
"sync" "sync"
"testing" "testing"
@@ -41,7 +42,7 @@ func TestSyncer(t *testing.T) {
m := msg m := msg
syncRequestCh <- &syncRequest{ syncRequestCh <- &syncRequest{
fn: func() error { fn: func() error {
return rdbClient.Done(m) return rdbClient.Done(context.Background(), m)
}, },
deadline: time.Now().Add(5 * time.Minute), deadline: time.Now().Add(5 * time.Minute),
} }

View File

@@ -5,6 +5,7 @@
package cmd package cmd
import ( import (
"encoding/json"
"fmt" "fmt"
"io" "io"
"math" "math"
@@ -40,8 +41,11 @@ Example: watch -n 3 asynq stats -> Shows current state of tasks every three seco
Run: stats, Run: stats,
} }
var jsonFlag bool
func init() { func init() {
rootCmd.AddCommand(statsCmd) rootCmd.AddCommand(statsCmd)
statsCmd.Flags().BoolVar(&jsonFlag, "json", false, "Output stats in JSON format.")
// Here you will define your flags and configuration settings. // Here you will define your flags and configuration settings.
@@ -55,15 +59,21 @@ func init() {
} }
type AggregateStats struct { type AggregateStats struct {
Active int Active int `json:"active"`
Pending int Pending int `json:"pending"`
Scheduled int Scheduled int `json:"scheduled"`
Retry int Retry int `json:"retry"`
Archived int Archived int `json:"archived"`
Completed int Completed int `json:"completed"`
Processed int Processed int `json:"processed"`
Failed int Failed int `json:"failed"`
Timestamp time.Time Timestamp time.Time `json:"timestamp"`
}
type FullStats struct {
Aggregate AggregateStats `json:"aggregate"`
QueueStats []*rdb.Stats `json:"queues"`
RedisInfo map[string]string `json:"redis"`
} }
func stats(cmd *cobra.Command, args []string) { func stats(cmd *cobra.Command, args []string) {
@@ -104,6 +114,23 @@ func stats(cmd *cobra.Command, args []string) {
fmt.Println(err) fmt.Println(err)
os.Exit(1) os.Exit(1)
} }
if jsonFlag {
statsJSON, err := json.Marshal(FullStats{
Aggregate: aggStats,
QueueStats: stats,
RedisInfo: info,
})
if err != nil {
fmt.Println(err)
os.Exit(1)
}
fmt.Println(string(statsJSON))
return
}
bold := color.New(color.Bold) bold := color.New(color.Bold)
bold.Println("Task Count by State") bold.Println("Task Count by State")
printStatsByState(&aggStats) printStatsByState(&aggStats)

View File

@@ -4,12 +4,12 @@ go 1.13
require ( require (
github.com/fatih/color v1.9.0 github.com/fatih/color v1.9.0
github.com/go-redis/redis/v8 v8.11.2 github.com/go-redis/redis/v8 v8.11.4
github.com/google/uuid v1.2.0 github.com/hibiken/asynq v0.21.0
github.com/hibiken/asynq v0.17.1 github.com/hibiken/asynq/x v0.0.0-20220131170841-349f4c50fb1d
github.com/mitchellh/go-homedir v1.1.0 github.com/mitchellh/go-homedir v1.1.0
github.com/prometheus/client_golang v1.11.0
github.com/spf13/afero v1.1.2 // indirect
github.com/spf13/cobra v1.1.1 github.com/spf13/cobra v1.1.1
github.com/spf13/viper v1.7.0 github.com/spf13/viper v1.7.0
) )
replace github.com/hibiken/asynq => ./..

View File

@@ -16,19 +16,25 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
@@ -54,11 +60,16 @@ github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-redis/redis/v8 v8.11.2 h1:WqlSpAwz8mxDSMCvbyz1Mkiqe0LE5OY4j3lgkvu1Ts0= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
github.com/go-redis/redis/v8 v8.11.2/go.mod h1:DLomh7y2e3ggQXQLd1YgmvIfecPJoFl7WU5SOQ/r06M= github.com/go-redis/redis/v8 v8.11.2/go.mod h1:DLomh7y2e3ggQXQLd1YgmvIfecPJoFl7WU5SOQ/r06M=
github.com/go-redis/redis/v8 v8.11.4 h1:kHoYkfZP6+pe04aFTnhDH6GDROa5yJdHJVNxV3F46Tg=
github.com/go-redis/redis/v8 v8.11.4/go.mod h1:2Z2wHZXdQpCDXEGzqMockDpNyYvi2l4Pxt6RJr792+w=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
@@ -75,8 +86,11 @@ github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrU
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
@@ -84,14 +98,18 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs=
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
@@ -121,18 +139,28 @@ github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
github.com/hibiken/asynq v0.19.0/go.mod h1:tyc63ojaW8SJ5SBm8mvI4DDONsguP5HE85EEl4Qr5Ig=
github.com/hibiken/asynq v0.21.0 h1:uH9XogJhjq/S39E0/DEPWLZQ6hHJ73UiblZTe4RzHwA=
github.com/hibiken/asynq v0.21.0/go.mod h1:tyc63ojaW8SJ5SBm8mvI4DDONsguP5HE85EEl4Qr5Ig=
github.com/hibiken/asynq/x v0.0.0-20220131170841-349f4c50fb1d h1:Er+U+9PmnyRHRDQjSjRQ24HoWvOY7w9Pk7bUPYM3Ags=
github.com/hibiken/asynq/x v0.0.0-20220131170841-349f4c50fb1d/go.mod h1:VmxwMfMKyb6gyv8xG0oOBMXIhquWKPx+zPtbVBd2Q1s=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
@@ -148,6 +176,7 @@ github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNx
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.11 h1:FxPOTFNqGkuDUGi3H/qkUbQO4ZiBa2brKq5r0l8TGeM= github.com/mattn/go-isatty v0.0.11 h1:FxPOTFNqGkuDUGi3H/qkUbQO4ZiBa2brKq5r0l8TGeM=
github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
@@ -160,37 +189,58 @@ github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0Qu
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.15.0 h1:1V1NfVQR87RtWAgp1lv9JZJ5Jap+XFGKPi00andXGi4=
github.com/onsi/ginkgo v1.15.0/go.mod h1:hF8qUzuuC8DJGygJH3726JnCZX4MYbRB8yFfISqnKUg= github.com/onsi/ginkgo v1.15.0/go.mod h1:hF8qUzuuC8DJGygJH3726JnCZX4MYbRB8yFfISqnKUg=
github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc=
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.10.5 h1:7n6FEkpFmfCoo2t+YYqXH0evK+a9ICQz0xcAy9dYcaQ=
github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48= github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48=
github.com/onsi/gomega v1.16.0 h1:6gjqkI8iiRHMvdccRJM8rVKjCWk6ZIm6FTm3ddIe4/c=
github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
@@ -201,6 +251,8 @@ github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
@@ -225,6 +277,8 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
@@ -280,11 +334,14 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb h1:eBmm0M9fYhWpKZLjQUUKka/LtIxf46G4fxeEz5KJr9U=
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781 h1:DzZ89McO9/gWPsQXS/FVKAlG02ZjaQ6AlZRBimEYOd0=
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -293,7 +350,9 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -305,6 +364,7 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -313,15 +373,24 @@ golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210112080510-489259a85091 h1:DMyOG0U+gKfu8JZzg2UQe9MeaC1X+xQWlAKcRnjxjCw= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40 h1:JWgyZ1qgdTaF3N3oxC+MdTV7qvEEgHo3otj+HB5CM7Q=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -382,8 +451,10 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@@ -398,10 +469,13 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkep
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

View File

@@ -0,0 +1,56 @@
package main
import (
"flag"
"fmt"
"log"
"net/http"
"github.com/hibiken/asynq"
"github.com/hibiken/asynq/x/metrics"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/collectors"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
// Declare command-line flags.
// These variables are binded to flags in init().
var (
flagRedisAddr string
flagRedisDB int
flagRedisPassword string
flagRedisUsername string
flagPort int
)
func init() {
flag.StringVar(&flagRedisAddr, "redis-addr", "127.0.0.1:6379", "host:port of redis server to connect to")
flag.IntVar(&flagRedisDB, "redis-db", 0, "redis DB number to use")
flag.StringVar(&flagRedisPassword, "redis-password", "", "password used to connect to redis server")
flag.StringVar(&flagRedisUsername, "redis-username", "", "username used to connect to redis server")
flag.IntVar(&flagPort, "port", 9876, "port to use for the HTTP server")
}
func main() {
flag.Parse()
// Using NewPedanticRegistry here to test the implementation of Collectors and Metrics.
reg := prometheus.NewPedanticRegistry()
inspector := asynq.NewInspector(asynq.RedisClientOpt{
Addr: flagRedisAddr,
DB: flagRedisDB,
Password: flagRedisPassword,
Username: flagRedisUsername,
})
reg.MustRegister(
metrics.NewQueueMetricsCollector(inspector),
// Add the standard process and go metrics to the registry
collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}),
collectors.NewGoCollector(),
)
http.Handle("/metrics", promhttp.HandlerFor(reg, promhttp.HandlerOpts{}))
log.Printf("exporter server is listening on port: %d\n", flagPort)
log.Fatal(http.ListenAndServe(fmt.Sprintf(":%d", flagPort), nil))
}

10
x/go.mod Normal file
View File

@@ -0,0 +1,10 @@
module github.com/hibiken/asynq/x
go 1.16
require (
github.com/go-redis/redis/v8 v8.11.4
github.com/google/uuid v1.3.0
github.com/hibiken/asynq v0.21.0
github.com/prometheus/client_golang v1.11.0
)

258
x/go.sum Normal file
View File

@@ -0,0 +1,258 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
github.com/go-redis/redis/v8 v8.11.2/go.mod h1:DLomh7y2e3ggQXQLd1YgmvIfecPJoFl7WU5SOQ/r06M=
github.com/go-redis/redis/v8 v8.11.4 h1:kHoYkfZP6+pe04aFTnhDH6GDROa5yJdHJVNxV3F46Tg=
github.com/go-redis/redis/v8 v8.11.4/go.mod h1:2Z2wHZXdQpCDXEGzqMockDpNyYvi2l4Pxt6RJr792+w=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/hibiken/asynq v0.21.0 h1:uH9XogJhjq/S39E0/DEPWLZQ6hHJ73UiblZTe4RzHwA=
github.com/hibiken/asynq v0.21.0/go.mod h1:tyc63ojaW8SJ5SBm8mvI4DDONsguP5HE85EEl4Qr5Ig=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.15.0/go.mod h1:hF8qUzuuC8DJGygJH3726JnCZX4MYbRB8yFfISqnKUg=
github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc=
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48=
github.com/onsi/gomega v1.16.0 h1:6gjqkI8iiRHMvdccRJM8rVKjCWk6ZIm6FTm3ddIe4/c=
github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng=
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.uber.org/goleak v0.10.0 h1:G3eWbSNIskeRqtsN/1uI5B+eP73y3JUuBsv9AZjehb4=
go.uber.org/goleak v0.10.0/go.mod h1:VCZuO8V8mFPlL0F5J5GK1rtHV3DrFcQ1R8ryq7FK0aI=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781 h1:DzZ89McO9/gWPsQXS/FVKAlG02ZjaQ6AlZRBimEYOd0=
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40 h1:JWgyZ1qgdTaF3N3oxC+MdTV7qvEEgHo3otj+HB5CM7Q=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

190
x/metrics/metrics.go Normal file
View File

@@ -0,0 +1,190 @@
// Package metrics provides implementations of prometheus.Collector to collect Asynq queue metrics.
package metrics
import (
"fmt"
"log"
"github.com/hibiken/asynq"
"github.com/prometheus/client_golang/prometheus"
)
// Namespace used in fully-qualified metrics names.
const namespace = "asynq"
// QueueMetricsCollector gathers queue metrics.
// It implements prometheus.Collector interface.
//
// All metrics exported from this collector have prefix "asynq".
type QueueMetricsCollector struct {
inspector *asynq.Inspector
}
// collectQueueInfo gathers QueueInfo of all queues.
// Since this operation is expensive, it must be called once per collection.
func (qmc *QueueMetricsCollector) collectQueueInfo() ([]*asynq.QueueInfo, error) {
qnames, err := qmc.inspector.Queues()
if err != nil {
return nil, fmt.Errorf("failed to get queue names: %v", err)
}
infos := make([]*asynq.QueueInfo, len(qnames))
for i, qname := range qnames {
qinfo, err := qmc.inspector.GetQueueInfo(qname)
if err != nil {
return nil, fmt.Errorf("failed to get queue info: %v", err)
}
infos[i] = qinfo
}
return infos, nil
}
// Descriptors used by QueueMetricsCollector
var (
tasksQueuedDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, "", "tasks_enqueued_total"),
"Number of tasks enqueued; broken down by queue and state.",
[]string{"queue", "state"}, nil,
)
queueSizeDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, "", "queue_size"),
"Number of tasks in a queue",
[]string{"queue"}, nil,
)
queueLatencyDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, "", "queue_latency_seconds"),
"Number of seconds the oldest pending task is waiting in pending state to be processed.",
[]string{"queue"}, nil,
)
queueMemUsgDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, "", "queue_memory_usage_approx_bytes"),
"Number of memory used by a given queue (approximated number by sampling).",
[]string{"queue"}, nil,
)
tasksProcessedTotalDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, "", "tasks_processed_total"),
"Number of tasks processed (both succeeded and failed); broken down by queue",
[]string{"queue"}, nil,
)
tasksFailedTotalDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, "", "tasks_failed_total"),
"Number of tasks failed; broken down by queue",
[]string{"queue"}, nil,
)
pausedQueues = prometheus.NewDesc(
prometheus.BuildFQName(namespace, "", "queue_paused_total"),
"Number of queues paused",
[]string{"queue"}, nil,
)
)
func (qmc *QueueMetricsCollector) Describe(ch chan<- *prometheus.Desc) {
prometheus.DescribeByCollect(qmc, ch)
}
func (qmc *QueueMetricsCollector) Collect(ch chan<- prometheus.Metric) {
queueInfos, err := qmc.collectQueueInfo()
if err != nil {
log.Printf("Failed to collect metrics data: %v", err)
}
for _, info := range queueInfos {
ch <- prometheus.MustNewConstMetric(
tasksQueuedDesc,
prometheus.GaugeValue,
float64(info.Active),
info.Queue,
"active",
)
ch <- prometheus.MustNewConstMetric(
tasksQueuedDesc,
prometheus.GaugeValue,
float64(info.Pending),
info.Queue,
"pending",
)
ch <- prometheus.MustNewConstMetric(
tasksQueuedDesc,
prometheus.GaugeValue,
float64(info.Scheduled),
info.Queue,
"scheduled",
)
ch <- prometheus.MustNewConstMetric(
tasksQueuedDesc,
prometheus.GaugeValue,
float64(info.Retry),
info.Queue,
"retry",
)
ch <- prometheus.MustNewConstMetric(
tasksQueuedDesc,
prometheus.GaugeValue,
float64(info.Archived),
info.Queue,
"archived",
)
ch <- prometheus.MustNewConstMetric(
tasksQueuedDesc,
prometheus.GaugeValue,
float64(info.Completed),
info.Queue,
"completed",
)
ch <- prometheus.MustNewConstMetric(
queueSizeDesc,
prometheus.GaugeValue,
float64(info.Size),
info.Queue,
)
ch <- prometheus.MustNewConstMetric(
queueLatencyDesc,
prometheus.GaugeValue,
info.Latency.Seconds(),
info.Queue,
)
ch <- prometheus.MustNewConstMetric(
queueMemUsgDesc,
prometheus.GaugeValue,
float64(info.MemoryUsage),
info.Queue,
)
ch <- prometheus.MustNewConstMetric(
tasksProcessedTotalDesc,
prometheus.CounterValue,
float64(info.ProcessedTotal),
info.Queue,
)
ch <- prometheus.MustNewConstMetric(
tasksFailedTotalDesc,
prometheus.CounterValue,
float64(info.FailedTotal),
info.Queue,
)
pausedValue := 0 // zero to indicate "not paused"
if info.Paused {
pausedValue = 1
}
ch <- prometheus.MustNewConstMetric(
pausedQueues,
prometheus.GaugeValue,
float64(pausedValue),
info.Queue,
)
}
}
// NewQueueMetricsCollector returns a collector that exports metrics about Asynq queues.
func NewQueueMetricsCollector(inspector *asynq.Inspector) *QueueMetricsCollector {
return &QueueMetricsCollector{inspector: inspector}
}