mirror of
https://github.com/hibiken/asynq.git
synced 2025-10-20 09:16:12 +08:00
Compare commits
12 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
476862dd7b | ||
|
dcd873fa2a | ||
|
2604bb2192 | ||
|
942345ee80 | ||
|
1f059eeee1 | ||
|
4ae73abdaa | ||
|
96b2318300 | ||
|
8312515e64 | ||
|
50e7f38365 | ||
|
fadcae76d6 | ||
|
a2d4ead989 | ||
|
82b6828f43 |
@@ -5,9 +5,9 @@ git:
|
||||
go: [1.13.x, 1.14.x, 1.15.x]
|
||||
script:
|
||||
- go test -race -v -coverprofile=coverage.txt -covermode=atomic ./...
|
||||
- go test -run=XXX -bench=. -loglevel=debug ./...
|
||||
- go test -run=^$ -bench=. -loglevel=debug ./...
|
||||
services:
|
||||
- redis-server
|
||||
after_success:
|
||||
- bash ./.travis/benchcmp.sh
|
||||
- travis_wait 60 bash ./.travis/benchstat.sh
|
||||
- bash <(curl -s https://codecov.io/bash)
|
||||
|
@@ -2,17 +2,19 @@ if [ "${TRAVIS_PULL_REQUEST_BRANCH:-$TRAVIS_BRANCH}" != "master" ]; then
|
||||
REMOTE_URL="$(git config --get remote.origin.url)";
|
||||
cd ${TRAVIS_BUILD_DIR}/.. && \
|
||||
git clone ${REMOTE_URL} "${TRAVIS_REPO_SLUG}-bench" && \
|
||||
# turn the detached message off
|
||||
git config --global advice.detachedHead false && \
|
||||
cd "${TRAVIS_REPO_SLUG}-bench" && \
|
||||
|
||||
# Benchmark master
|
||||
git checkout master && \
|
||||
go test -run=XXX -bench=. ./... > master.txt && \
|
||||
go test -run=^$ -bench=. -count=5 -timeout=60m -benchmem ./... > master.txt && \
|
||||
|
||||
# Benchmark feature branch
|
||||
git checkout ${TRAVIS_COMMIT} && \
|
||||
go test -run=XXX -bench=. ./... > feature.txt && \
|
||||
go test -run=^$ -bench=. -count=5 -timeout=60m -benchmem ./... > feature.txt && \
|
||||
|
||||
# compare two benchmarks
|
||||
go get -u golang.org/x/tools/cmd/benchcmp && \
|
||||
benchcmp master.txt feature.txt;
|
||||
go get -u golang.org/x/perf/cmd/benchstat && \
|
||||
benchstat master.txt feature.txt;
|
||||
fi
|
24
CHANGELOG.md
24
CHANGELOG.md
@@ -7,6 +7,30 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
||||
|
||||
## [Unreleased]
|
||||
|
||||
## [0.13.1] - 2020-11-22
|
||||
|
||||
### Fixed
|
||||
|
||||
- Fixed processor to wait for specified time duration before forcefully shutdown workers.
|
||||
|
||||
|
||||
## [0.13.0] - 2020-10-13
|
||||
|
||||
### Added
|
||||
|
||||
- `Scheduler` type is added to enable periodic tasks. See the godoc for its APIs and [wiki](https://github.com/hibiken/asynq/wiki/Periodic-Tasks) for the getting-started guide.
|
||||
|
||||
### Changed
|
||||
|
||||
- interface `Option` has changed. See the godoc for the new interface.
|
||||
This change would have no impact as long as you are using exported functions (e.g. `MaxRetry`, `Queue`, etc)
|
||||
to create `Option`s.
|
||||
|
||||
### Added
|
||||
|
||||
- `Payload.String() string` method is added
|
||||
- `Payload.MarshalJSON() ([]byte, error)` method is added
|
||||
|
||||
## [0.12.0] - 2020-09-12
|
||||
|
||||
**IMPORTANT**: If you are upgrading from a previous version, please install the latest version of the CLI `go get -u github.com/hibiken/asynq/tools/asynq` and run `asynq migrate` command. No process should be writing to Redis while you run the migration command.
|
||||
|
@@ -42,6 +42,7 @@ A system can consist of multiple worker servers and brokers, giving way to high
|
||||
- Allow [timeout and deadline per task](https://github.com/hibiken/asynq/wiki/Task-Timeout-and-Cancelation)
|
||||
- [Flexible handler interface with support for middlewares](https://github.com/hibiken/asynq/wiki/Handler-Deep-Dive)
|
||||
- [Ability to pause queue](/tools/asynq/README.md#pause) to stop processing tasks from the queue
|
||||
- [Periodic Tasks](https://github.com/hibiken/asynq/wiki/Periodic-Tasks)
|
||||
- [Support Redis Cluster](https://github.com/hibiken/asynq/wiki/Redis-Cluster) for automatic sharding and high availability
|
||||
- [Support Redis Sentinels](https://github.com/hibiken/asynq/wiki/Automatic-Failover) for high availability
|
||||
- [CLI](#command-line-tool) to inspect and remote-control queues and tasks
|
||||
|
73
client.go
73
client.go
@@ -28,7 +28,7 @@ type Client struct {
|
||||
rdb *rdb.RDB
|
||||
}
|
||||
|
||||
// NewClient and returns a new Client given a redis connection option.
|
||||
// NewClient returns a new Client instance given a redis connection option.
|
||||
func NewClient(r RedisConnOpt) *Client {
|
||||
rdb := rdb.NewRDB(createRedisClient(r))
|
||||
return &Client{
|
||||
@@ -37,8 +37,29 @@ func NewClient(r RedisConnOpt) *Client {
|
||||
}
|
||||
}
|
||||
|
||||
type OptionType int
|
||||
|
||||
const (
|
||||
MaxRetryOpt OptionType = iota
|
||||
QueueOpt
|
||||
TimeoutOpt
|
||||
DeadlineOpt
|
||||
UniqueOpt
|
||||
ProcessAtOpt
|
||||
ProcessInOpt
|
||||
)
|
||||
|
||||
// Option specifies the task processing behavior.
|
||||
type Option interface{}
|
||||
type Option interface {
|
||||
// String returns a string representation of the option.
|
||||
String() string
|
||||
|
||||
// Type describes the type of the option.
|
||||
Type() OptionType
|
||||
|
||||
// Value returns a value used to create this option.
|
||||
Value() interface{}
|
||||
}
|
||||
|
||||
// Internal option representations.
|
||||
type (
|
||||
@@ -62,13 +83,21 @@ func MaxRetry(n int) Option {
|
||||
return retryOption(n)
|
||||
}
|
||||
|
||||
func (n retryOption) String() string { return fmt.Sprintf("MaxRetry(%d)", int(n)) }
|
||||
func (n retryOption) Type() OptionType { return MaxRetryOpt }
|
||||
func (n retryOption) Value() interface{} { return n }
|
||||
|
||||
// Queue returns an option to specify the queue to enqueue the task into.
|
||||
//
|
||||
// Queue name is case-insensitive and the lowercased version is used.
|
||||
func Queue(name string) Option {
|
||||
return queueOption(strings.ToLower(name))
|
||||
func Queue(qname string) Option {
|
||||
return queueOption(strings.ToLower(qname))
|
||||
}
|
||||
|
||||
func (qname queueOption) String() string { return fmt.Sprintf("Queue(%q)", string(qname)) }
|
||||
func (qname queueOption) Type() OptionType { return QueueOpt }
|
||||
func (qname queueOption) Value() interface{} { return qname }
|
||||
|
||||
// Timeout returns an option to specify how long a task may run.
|
||||
// If the timeout elapses before the Handler returns, then the task
|
||||
// will be retried.
|
||||
@@ -81,6 +110,10 @@ func Timeout(d time.Duration) Option {
|
||||
return timeoutOption(d)
|
||||
}
|
||||
|
||||
func (d timeoutOption) String() string { return fmt.Sprintf("Timeout(%v)", time.Duration(d)) }
|
||||
func (d timeoutOption) Type() OptionType { return TimeoutOpt }
|
||||
func (d timeoutOption) Value() interface{} { return d }
|
||||
|
||||
// Deadline returns an option to specify the deadline for the given task.
|
||||
// If it reaches the deadline before the Handler returns, then the task
|
||||
// will be retried.
|
||||
@@ -91,6 +124,10 @@ func Deadline(t time.Time) Option {
|
||||
return deadlineOption(t)
|
||||
}
|
||||
|
||||
func (t deadlineOption) String() string { return fmt.Sprintf("Deadline(%v)", time.Time(t)) }
|
||||
func (t deadlineOption) Type() OptionType { return DeadlineOpt }
|
||||
func (t deadlineOption) Value() interface{} { return t }
|
||||
|
||||
// Unique returns an option to enqueue a task only if the given task is unique.
|
||||
// Task enqueued with this option is guaranteed to be unique within the given ttl.
|
||||
// Once the task gets processed successfully or once the TTL has expired, another task with the same uniqueness may be enqueued.
|
||||
@@ -104,6 +141,10 @@ func Unique(ttl time.Duration) Option {
|
||||
return uniqueOption(ttl)
|
||||
}
|
||||
|
||||
func (ttl uniqueOption) String() string { return fmt.Sprintf("Unique(%v)", time.Duration(ttl)) }
|
||||
func (ttl uniqueOption) Type() OptionType { return UniqueOpt }
|
||||
func (ttl uniqueOption) Value() interface{} { return ttl }
|
||||
|
||||
// ProcessAt returns an option to specify when to process the given task.
|
||||
//
|
||||
// If there's a conflicting ProcessIn option, the last option passed to Enqueue overrides the others.
|
||||
@@ -111,6 +152,10 @@ func ProcessAt(t time.Time) Option {
|
||||
return processAtOption(t)
|
||||
}
|
||||
|
||||
func (t processAtOption) String() string { return fmt.Sprintf("ProcessAt(%v)", time.Time(t)) }
|
||||
func (t processAtOption) Type() OptionType { return ProcessAtOpt }
|
||||
func (t processAtOption) Value() interface{} { return t }
|
||||
|
||||
// ProcessIn returns an option to specify when to process the given task relative to the current time.
|
||||
//
|
||||
// If there's a conflicting ProcessAt option, the last option passed to Enqueue overrides the others.
|
||||
@@ -118,6 +163,10 @@ func ProcessIn(d time.Duration) Option {
|
||||
return processInOption(d)
|
||||
}
|
||||
|
||||
func (d processInOption) String() string { return fmt.Sprintf("ProcessIn(%v)", time.Duration(d)) }
|
||||
func (d processInOption) Type() OptionType { return ProcessInOpt }
|
||||
func (d processInOption) Value() interface{} { return d }
|
||||
|
||||
// ErrDuplicateTask indicates that the given task could not be enqueued since it's a duplicate of another task.
|
||||
//
|
||||
// ErrDuplicateTask error only applies to tasks enqueued with a Unique option.
|
||||
@@ -208,6 +257,9 @@ type Result struct {
|
||||
// ID is a unique identifier for the task.
|
||||
ID string
|
||||
|
||||
// EnqueuedAt is the time the task was enqueued in UTC.
|
||||
EnqueuedAt time.Time
|
||||
|
||||
// ProcessAt indicates when the task should be processed.
|
||||
ProcessAt time.Time
|
||||
|
||||
@@ -298,12 +350,13 @@ func (c *Client) Enqueue(task *Task, opts ...Option) (*Result, error) {
|
||||
return nil, err
|
||||
}
|
||||
return &Result{
|
||||
ID: msg.ID.String(),
|
||||
ProcessAt: opt.processAt,
|
||||
Queue: msg.Queue,
|
||||
Retry: msg.Retry,
|
||||
Timeout: timeout,
|
||||
Deadline: deadline,
|
||||
ID: msg.ID.String(),
|
||||
EnqueuedAt: time.Now().UTC(),
|
||||
ProcessAt: opt.processAt,
|
||||
Queue: msg.Queue,
|
||||
Retry: msg.Retry,
|
||||
Timeout: timeout,
|
||||
Deadline: deadline,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@@ -42,11 +42,12 @@ func TestClientEnqueueWithProcessAtOption(t *testing.T) {
|
||||
processAt: now,
|
||||
opts: []Option{},
|
||||
wantRes: &Result{
|
||||
ProcessAt: now,
|
||||
Queue: "default",
|
||||
Retry: defaultMaxRetry,
|
||||
Timeout: defaultTimeout,
|
||||
Deadline: noDeadline,
|
||||
EnqueuedAt: now.UTC(),
|
||||
ProcessAt: now,
|
||||
Queue: "default",
|
||||
Retry: defaultMaxRetry,
|
||||
Timeout: defaultTimeout,
|
||||
Deadline: noDeadline,
|
||||
},
|
||||
wantPending: map[string][]*base.TaskMessage{
|
||||
"default": {
|
||||
@@ -70,11 +71,12 @@ func TestClientEnqueueWithProcessAtOption(t *testing.T) {
|
||||
processAt: oneHourLater,
|
||||
opts: []Option{},
|
||||
wantRes: &Result{
|
||||
ProcessAt: oneHourLater,
|
||||
Queue: "default",
|
||||
Retry: defaultMaxRetry,
|
||||
Timeout: defaultTimeout,
|
||||
Deadline: noDeadline,
|
||||
EnqueuedAt: now.UTC(),
|
||||
ProcessAt: oneHourLater,
|
||||
Queue: "default",
|
||||
Retry: defaultMaxRetry,
|
||||
Timeout: defaultTimeout,
|
||||
Deadline: noDeadline,
|
||||
},
|
||||
wantPending: map[string][]*base.TaskMessage{
|
||||
"default": {},
|
||||
@@ -111,8 +113,8 @@ func TestClientEnqueueWithProcessAtOption(t *testing.T) {
|
||||
cmpopts.EquateApproxTime(500 * time.Millisecond),
|
||||
}
|
||||
if diff := cmp.Diff(tc.wantRes, gotRes, cmpOptions...); diff != "" {
|
||||
t.Errorf("%s;\nEnqueueAt(processAt, task) returned %v, want %v; (-want,+got)\n%s",
|
||||
tc.desc, gotRes, tc.wantRes, diff)
|
||||
t.Errorf("%s;\nEnqueue(task, ProcessAt(%v)) returned %v, want %v; (-want,+got)\n%s",
|
||||
tc.desc, tc.processAt, gotRes, tc.wantRes, diff)
|
||||
}
|
||||
|
||||
for qname, want := range tc.wantPending {
|
||||
@@ -366,7 +368,7 @@ func TestClientEnqueue(t *testing.T) {
|
||||
continue
|
||||
}
|
||||
cmpOptions := []cmp.Option{
|
||||
cmpopts.IgnoreFields(Result{}, "ID"),
|
||||
cmpopts.IgnoreFields(Result{}, "ID", "EnqueuedAt"),
|
||||
cmpopts.EquateApproxTime(500 * time.Millisecond),
|
||||
}
|
||||
if diff := cmp.Diff(tc.wantRes, gotRes, cmpOptions...); diff != "" {
|
||||
@@ -471,12 +473,12 @@ func TestClientEnqueueWithProcessInOption(t *testing.T) {
|
||||
continue
|
||||
}
|
||||
cmpOptions := []cmp.Option{
|
||||
cmpopts.IgnoreFields(Result{}, "ID"),
|
||||
cmpopts.IgnoreFields(Result{}, "ID", "EnqueuedAt"),
|
||||
cmpopts.EquateApproxTime(500 * time.Millisecond),
|
||||
}
|
||||
if diff := cmp.Diff(tc.wantRes, gotRes, cmpOptions...); diff != "" {
|
||||
t.Errorf("%s;\nEnqueueIn(delay, task) returned %v, want %v; (-want,+got)\n%s",
|
||||
tc.desc, gotRes, tc.wantRes, diff)
|
||||
t.Errorf("%s;\nEnqueue(task, ProcessIn(%v)) returned %v, want %v; (-want,+got)\n%s",
|
||||
tc.desc, tc.delay, gotRes, tc.wantRes, diff)
|
||||
}
|
||||
|
||||
for qname, want := range tc.wantPending {
|
||||
@@ -617,7 +619,7 @@ func TestClientDefaultOptions(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cmpOptions := []cmp.Option{
|
||||
cmpopts.IgnoreFields(Result{}, "ID"),
|
||||
cmpopts.IgnoreFields(Result{}, "ID", "EnqueuedAt"),
|
||||
cmpopts.EquateApproxTime(500 * time.Millisecond),
|
||||
}
|
||||
if diff := cmp.Diff(tc.wantRes, gotRes, cmpOptions...); diff != "" {
|
||||
|
@@ -9,6 +9,7 @@ import (
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"time"
|
||||
|
||||
"github.com/hibiken/asynq"
|
||||
"golang.org/x/sys/unix"
|
||||
@@ -78,6 +79,25 @@ func ExampleServer_Quiet() {
|
||||
srv.Stop()
|
||||
}
|
||||
|
||||
func ExampleScheduler() {
|
||||
scheduler := asynq.NewScheduler(
|
||||
asynq.RedisClientOpt{Addr: ":6379"},
|
||||
&asynq.SchedulerOpts{Location: time.Local},
|
||||
)
|
||||
|
||||
if _, err := scheduler.Register("* * * * *", asynq.NewTask("task1", nil)); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if _, err := scheduler.Register("@every 30s", asynq.NewTask("task2", nil)); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Run blocks and waits for os signal to terminate the program.
|
||||
if err := scheduler.Run(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleParseRedisURI() {
|
||||
rconn, err := asynq.ParseRedisURI("redis://localhost:6379/10")
|
||||
if err != nil {
|
||||
|
75
forwarder.go
Normal file
75
forwarder.go
Normal file
@@ -0,0 +1,75 @@
|
||||
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
package asynq
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/hibiken/asynq/internal/base"
|
||||
"github.com/hibiken/asynq/internal/log"
|
||||
)
|
||||
|
||||
// A forwarder is responsible for moving scheduled and retry tasks to pending state
|
||||
// so that the tasks get processed by the workers.
|
||||
type forwarder struct {
|
||||
logger *log.Logger
|
||||
broker base.Broker
|
||||
|
||||
// channel to communicate back to the long running "forwarder" goroutine.
|
||||
done chan struct{}
|
||||
|
||||
// list of queue names to check and enqueue.
|
||||
queues []string
|
||||
|
||||
// poll interval on average
|
||||
avgInterval time.Duration
|
||||
}
|
||||
|
||||
type forwarderParams struct {
|
||||
logger *log.Logger
|
||||
broker base.Broker
|
||||
queues []string
|
||||
interval time.Duration
|
||||
}
|
||||
|
||||
func newForwarder(params forwarderParams) *forwarder {
|
||||
return &forwarder{
|
||||
logger: params.logger,
|
||||
broker: params.broker,
|
||||
done: make(chan struct{}),
|
||||
queues: params.queues,
|
||||
avgInterval: params.interval,
|
||||
}
|
||||
}
|
||||
|
||||
func (f *forwarder) terminate() {
|
||||
f.logger.Debug("Forwarder shutting down...")
|
||||
// Signal the forwarder goroutine to stop polling.
|
||||
f.done <- struct{}{}
|
||||
}
|
||||
|
||||
// start starts the "forwarder" goroutine.
|
||||
func (f *forwarder) start(wg *sync.WaitGroup) {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for {
|
||||
select {
|
||||
case <-f.done:
|
||||
f.logger.Debug("Forwarder done")
|
||||
return
|
||||
case <-time.After(f.avgInterval):
|
||||
f.exec()
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (f *forwarder) exec() {
|
||||
if err := f.broker.CheckAndEnqueue(f.queues...); err != nil {
|
||||
f.logger.Errorf("Could not enqueue scheduled tasks: %v", err)
|
||||
}
|
||||
}
|
137
forwarder_test.go
Normal file
137
forwarder_test.go
Normal file
@@ -0,0 +1,137 @@
|
||||
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
package asynq
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
h "github.com/hibiken/asynq/internal/asynqtest"
|
||||
"github.com/hibiken/asynq/internal/base"
|
||||
"github.com/hibiken/asynq/internal/rdb"
|
||||
)
|
||||
|
||||
func TestForwarder(t *testing.T) {
|
||||
r := setup(t)
|
||||
defer r.Close()
|
||||
rdbClient := rdb.NewRDB(r)
|
||||
const pollInterval = time.Second
|
||||
s := newForwarder(forwarderParams{
|
||||
logger: testLogger,
|
||||
broker: rdbClient,
|
||||
queues: []string{"default", "critical"},
|
||||
interval: pollInterval,
|
||||
})
|
||||
t1 := h.NewTaskMessageWithQueue("gen_thumbnail", nil, "default")
|
||||
t2 := h.NewTaskMessageWithQueue("send_email", nil, "critical")
|
||||
t3 := h.NewTaskMessageWithQueue("reindex", nil, "default")
|
||||
t4 := h.NewTaskMessageWithQueue("sync", nil, "critical")
|
||||
now := time.Now()
|
||||
|
||||
tests := []struct {
|
||||
initScheduled map[string][]base.Z // scheduled queue initial state
|
||||
initRetry map[string][]base.Z // retry queue initial state
|
||||
initPending map[string][]*base.TaskMessage // default queue initial state
|
||||
wait time.Duration // wait duration before checking for final state
|
||||
wantScheduled map[string][]*base.TaskMessage // schedule queue final state
|
||||
wantRetry map[string][]*base.TaskMessage // retry queue final state
|
||||
wantPending map[string][]*base.TaskMessage // default queue final state
|
||||
}{
|
||||
{
|
||||
initScheduled: map[string][]base.Z{
|
||||
"default": {{Message: t1, Score: now.Add(time.Hour).Unix()}},
|
||||
"critical": {{Message: t2, Score: now.Add(-2 * time.Second).Unix()}},
|
||||
},
|
||||
initRetry: map[string][]base.Z{
|
||||
"default": {{Message: t3, Score: time.Now().Add(-500 * time.Millisecond).Unix()}},
|
||||
"critical": {},
|
||||
},
|
||||
initPending: map[string][]*base.TaskMessage{
|
||||
"default": {},
|
||||
"critical": {t4},
|
||||
},
|
||||
wait: pollInterval * 2,
|
||||
wantScheduled: map[string][]*base.TaskMessage{
|
||||
"default": {t1},
|
||||
"critical": {},
|
||||
},
|
||||
wantRetry: map[string][]*base.TaskMessage{
|
||||
"default": {},
|
||||
"critical": {},
|
||||
},
|
||||
wantPending: map[string][]*base.TaskMessage{
|
||||
"default": {t3},
|
||||
"critical": {t2, t4},
|
||||
},
|
||||
},
|
||||
{
|
||||
initScheduled: map[string][]base.Z{
|
||||
"default": {
|
||||
{Message: t1, Score: now.Unix()},
|
||||
{Message: t3, Score: now.Add(-500 * time.Millisecond).Unix()},
|
||||
},
|
||||
"critical": {
|
||||
{Message: t2, Score: now.Add(-2 * time.Second).Unix()},
|
||||
},
|
||||
},
|
||||
initRetry: map[string][]base.Z{
|
||||
"default": {},
|
||||
"critical": {},
|
||||
},
|
||||
initPending: map[string][]*base.TaskMessage{
|
||||
"default": {},
|
||||
"critical": {t4},
|
||||
},
|
||||
wait: pollInterval * 2,
|
||||
wantScheduled: map[string][]*base.TaskMessage{
|
||||
"default": {},
|
||||
"critical": {},
|
||||
},
|
||||
wantRetry: map[string][]*base.TaskMessage{
|
||||
"default": {},
|
||||
"critical": {},
|
||||
},
|
||||
wantPending: map[string][]*base.TaskMessage{
|
||||
"default": {t1, t3},
|
||||
"critical": {t2, t4},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
h.FlushDB(t, r) // clean up db before each test case.
|
||||
h.SeedAllScheduledQueues(t, r, tc.initScheduled) // initialize scheduled queue
|
||||
h.SeedAllRetryQueues(t, r, tc.initRetry) // initialize retry queue
|
||||
h.SeedAllPendingQueues(t, r, tc.initPending) // initialize default queue
|
||||
|
||||
var wg sync.WaitGroup
|
||||
s.start(&wg)
|
||||
time.Sleep(tc.wait)
|
||||
s.terminate()
|
||||
|
||||
for qname, want := range tc.wantScheduled {
|
||||
gotScheduled := h.GetScheduledMessages(t, r, qname)
|
||||
if diff := cmp.Diff(want, gotScheduled, h.SortMsgOpt); diff != "" {
|
||||
t.Errorf("mismatch found in %q after running forwarder: (-want, +got)\n%s", base.ScheduledKey(qname), diff)
|
||||
}
|
||||
}
|
||||
|
||||
for qname, want := range tc.wantRetry {
|
||||
gotRetry := h.GetRetryMessages(t, r, qname)
|
||||
if diff := cmp.Diff(want, gotRetry, h.SortMsgOpt); diff != "" {
|
||||
t.Errorf("mismatch found in %q after running forwarder: (-want, +got)\n%s", base.RetryKey(qname), diff)
|
||||
}
|
||||
}
|
||||
|
||||
for qname, want := range tc.wantPending {
|
||||
gotPending := h.GetPendingMessages(t, r, qname)
|
||||
if diff := cmp.Diff(want, gotPending, h.SortMsgOpt); diff != "" {
|
||||
t.Errorf("mismatch found in %q after running forwarder: (-want, +got)\n%s", base.QueueKey(qname), diff)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
1
go.mod
1
go.mod
@@ -6,6 +6,7 @@ require (
|
||||
github.com/go-redis/redis/v7 v7.4.0
|
||||
github.com/google/go-cmp v0.4.0
|
||||
github.com/google/uuid v1.1.1
|
||||
github.com/robfig/cron/v3 v3.0.1
|
||||
github.com/spf13/cast v1.3.1
|
||||
go.uber.org/goleak v0.10.0
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e
|
||||
|
2
go.sum
2
go.sum
@@ -27,6 +27,8 @@ github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME=
|
||||
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
|
||||
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
|
||||
github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng=
|
||||
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
|
||||
|
@@ -65,6 +65,24 @@ var SortWorkerInfoOpt = cmp.Transformer("SortWorkerInfo", func(in []*base.Worker
|
||||
return out
|
||||
})
|
||||
|
||||
// SortSchedulerEntryOpt is a cmp.Option to sort base.SchedulerEntry for comparing slice of entries.
|
||||
var SortSchedulerEntryOpt = cmp.Transformer("SortSchedulerEntry", func(in []*base.SchedulerEntry) []*base.SchedulerEntry {
|
||||
out := append([]*base.SchedulerEntry(nil), in...) // Copy input to avoid mutating it
|
||||
sort.Slice(out, func(i, j int) bool {
|
||||
return out[i].Spec < out[j].Spec
|
||||
})
|
||||
return out
|
||||
})
|
||||
|
||||
// SortSchedulerEnqueueEventOpt is a cmp.Option to sort base.SchedulerEnqueueEvent for comparing slice of events.
|
||||
var SortSchedulerEnqueueEventOpt = cmp.Transformer("SortSchedulerEnqueueEvent", func(in []*base.SchedulerEnqueueEvent) []*base.SchedulerEnqueueEvent {
|
||||
out := append([]*base.SchedulerEnqueueEvent(nil), in...)
|
||||
sort.Slice(out, func(i, j int) bool {
|
||||
return out[i].EnqueuedAt.Unix() < out[j].EnqueuedAt.Unix()
|
||||
})
|
||||
return out
|
||||
})
|
||||
|
||||
// SortStringSliceOpt is a cmp.Option to sort string slice.
|
||||
var SortStringSliceOpt = cmp.Transformer("SortStringSlice", func(in []string) []string {
|
||||
out := append([]string(nil), in...)
|
||||
|
@@ -19,7 +19,7 @@ import (
|
||||
)
|
||||
|
||||
// Version of asynq library and CLI.
|
||||
const Version = "0.12.0"
|
||||
const Version = "0.13.0"
|
||||
|
||||
// DefaultQueueName is the queue name used if none are specified by user.
|
||||
const DefaultQueueName = "default"
|
||||
@@ -29,10 +29,11 @@ var DefaultQueue = QueueKey(DefaultQueueName)
|
||||
|
||||
// Global Redis keys.
|
||||
const (
|
||||
AllServers = "asynq:servers" // ZSET
|
||||
AllWorkers = "asynq:workers" // ZSET
|
||||
AllQueues = "asynq:queues" // SET
|
||||
CancelChannel = "asynq:cancel" // PubSub channel
|
||||
AllServers = "asynq:servers" // ZSET
|
||||
AllWorkers = "asynq:workers" // ZSET
|
||||
AllSchedulers = "asynq:schedulers" // ZSET
|
||||
AllQueues = "asynq:queues" // SET
|
||||
CancelChannel = "asynq:cancel" // PubSub channel
|
||||
)
|
||||
|
||||
// QueueKey returns a redis key for the given queue name.
|
||||
@@ -81,13 +82,23 @@ func FailedKey(qname string, t time.Time) string {
|
||||
}
|
||||
|
||||
// ServerInfoKey returns a redis key for process info.
|
||||
func ServerInfoKey(hostname string, pid int, sid string) string {
|
||||
return fmt.Sprintf("asynq:servers:{%s:%d:%s}", hostname, pid, sid)
|
||||
func ServerInfoKey(hostname string, pid int, serverID string) string {
|
||||
return fmt.Sprintf("asynq:servers:{%s:%d:%s}", hostname, pid, serverID)
|
||||
}
|
||||
|
||||
// WorkersKey returns a redis key for the workers given hostname, pid, and server ID.
|
||||
func WorkersKey(hostname string, pid int, sid string) string {
|
||||
return fmt.Sprintf("asynq:workers:{%s:%d:%s}", hostname, pid, sid)
|
||||
func WorkersKey(hostname string, pid int, serverID string) string {
|
||||
return fmt.Sprintf("asynq:workers:{%s:%d:%s}", hostname, pid, serverID)
|
||||
}
|
||||
|
||||
// SchedulerEntriesKey returns a redis key for the scheduler entries given scheduler ID.
|
||||
func SchedulerEntriesKey(schedulerID string) string {
|
||||
return fmt.Sprintf("asynq:schedulers:{%s}", schedulerID)
|
||||
}
|
||||
|
||||
// SchedulerHistoryKey returns a redis key for the scheduler's history for the given entry.
|
||||
func SchedulerHistoryKey(entryID string) string {
|
||||
return fmt.Sprintf("asynq:scheduler_history:%s", entryID)
|
||||
}
|
||||
|
||||
// UniqueKey returns a redis key with the given type, payload, and queue name.
|
||||
@@ -208,10 +219,10 @@ const (
|
||||
// StatusIdle indicates the server is in idle state.
|
||||
StatusIdle ServerStatusValue = iota
|
||||
|
||||
// StatusRunning indicates the servier is up and processing tasks.
|
||||
// StatusRunning indicates the server is up and active.
|
||||
StatusRunning
|
||||
|
||||
// StatusQuiet indicates the server is up but not processing new tasks.
|
||||
// StatusQuiet indicates the server is up but not active.
|
||||
StatusQuiet
|
||||
|
||||
// StatusStopped indicates the server server has been stopped.
|
||||
@@ -273,6 +284,40 @@ type WorkerInfo struct {
|
||||
Started time.Time
|
||||
}
|
||||
|
||||
// SchedulerEntry holds information about a periodic task registered with a scheduler.
|
||||
type SchedulerEntry struct {
|
||||
// Identifier of this entry.
|
||||
ID string
|
||||
|
||||
// Spec describes the schedule of this entry.
|
||||
Spec string
|
||||
|
||||
// Type is the task type of the periodic task.
|
||||
Type string
|
||||
|
||||
// Payload is the payload of the periodic task.
|
||||
Payload map[string]interface{}
|
||||
|
||||
// Opts is the options for the periodic task.
|
||||
Opts []string
|
||||
|
||||
// Next shows the next time the task will be enqueued.
|
||||
Next time.Time
|
||||
|
||||
// Prev shows the last time the task was enqueued.
|
||||
// Zero time if task was never enqueued.
|
||||
Prev time.Time
|
||||
}
|
||||
|
||||
// SchedulerEnqueueEvent holds information about an enqueue event by a scheduler.
|
||||
type SchedulerEnqueueEvent struct {
|
||||
// ID of the task that was enqueued.
|
||||
TaskID string
|
||||
|
||||
// Time the task was enqueued.
|
||||
EnqueuedAt time.Time
|
||||
}
|
||||
|
||||
// Cancelations is a collection that holds cancel functions for all active tasks.
|
||||
//
|
||||
// Cancelations are safe for concurrent use by multipel goroutines.
|
||||
|
@@ -212,6 +212,41 @@ func TestWorkersKey(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestSchedulerEntriesKey(t *testing.T) {
|
||||
tests := []struct {
|
||||
schedulerID string
|
||||
want string
|
||||
}{
|
||||
{"localhost:9876:scheduler123", "asynq:schedulers:{localhost:9876:scheduler123}"},
|
||||
{"127.0.0.1:1234:scheduler987", "asynq:schedulers:{127.0.0.1:1234:scheduler987}"},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
got := SchedulerEntriesKey(tc.schedulerID)
|
||||
if got != tc.want {
|
||||
t.Errorf("SchedulerEntriesKey(%q) = %q, want %q", tc.schedulerID, got, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSchedulerHistoryKey(t *testing.T) {
|
||||
tests := []struct {
|
||||
entryID string
|
||||
want string
|
||||
}{
|
||||
{"entry876", "asynq:scheduler_history:entry876"},
|
||||
{"entry345", "asynq:scheduler_history:entry345"},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
got := SchedulerHistoryKey(tc.entryID)
|
||||
if got != tc.want {
|
||||
t.Errorf("SchedulerHistoryKey(%q) = %q, want %q",
|
||||
tc.entryID, got, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestUniqueKey(t *testing.T) {
|
||||
tests := []struct {
|
||||
desc string
|
||||
|
@@ -758,7 +758,7 @@ return keys`)
|
||||
|
||||
// ListServers returns the list of server info.
|
||||
func (r *RDB) ListServers() ([]*base.ServerInfo, error) {
|
||||
now := time.Now().UTC()
|
||||
now := time.Now()
|
||||
res, err := listServerKeysCmd.Run(r.client, []string{base.AllServers}, now.Unix()).Result()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -791,7 +791,7 @@ return keys`)
|
||||
|
||||
// ListWorkers returns the list of worker stats.
|
||||
func (r *RDB) ListWorkers() ([]*base.WorkerInfo, error) {
|
||||
now := time.Now().UTC()
|
||||
now := time.Now()
|
||||
res, err := listWorkerKeysCmd.Run(r.client, []string{base.AllWorkers}, now.Unix()).Result()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -818,6 +818,63 @@ func (r *RDB) ListWorkers() ([]*base.WorkerInfo, error) {
|
||||
return workers, nil
|
||||
}
|
||||
|
||||
// Note: Script also removes stale keys.
|
||||
var listSchedulerKeysCmd = redis.NewScript(`
|
||||
local now = tonumber(ARGV[1])
|
||||
local keys = redis.call("ZRANGEBYSCORE", KEYS[1], now, "+inf")
|
||||
redis.call("ZREMRANGEBYSCORE", KEYS[1], "-inf", now-1)
|
||||
return keys`)
|
||||
|
||||
// ListSchedulerEntries returns the list of scheduler entries.
|
||||
func (r *RDB) ListSchedulerEntries() ([]*base.SchedulerEntry, error) {
|
||||
now := time.Now()
|
||||
res, err := listSchedulerKeysCmd.Run(r.client, []string{base.AllSchedulers}, now.Unix()).Result()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
keys, err := cast.ToStringSliceE(res)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var entries []*base.SchedulerEntry
|
||||
for _, key := range keys {
|
||||
data, err := r.client.LRange(key, 0, -1).Result()
|
||||
if err != nil {
|
||||
continue // skip bad data
|
||||
}
|
||||
for _, s := range data {
|
||||
var e base.SchedulerEntry
|
||||
if err := json.Unmarshal([]byte(s), &e); err != nil {
|
||||
continue // skip bad data
|
||||
}
|
||||
entries = append(entries, &e)
|
||||
}
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// ListSchedulerEnqueueEvents returns the list of scheduler enqueue events.
|
||||
func (r *RDB) ListSchedulerEnqueueEvents(entryID string) ([]*base.SchedulerEnqueueEvent, error) {
|
||||
key := base.SchedulerHistoryKey(entryID)
|
||||
zs, err := r.client.ZRangeWithScores(key, 0, -1).Result()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var events []*base.SchedulerEnqueueEvent
|
||||
for _, z := range zs {
|
||||
data, err := cast.ToStringE(z.Member)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var e base.SchedulerEnqueueEvent
|
||||
if err := json.Unmarshal([]byte(data), &e); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
events = append(events, &e)
|
||||
}
|
||||
return events, nil
|
||||
}
|
||||
|
||||
// Pause pauses processing of tasks from the given queue.
|
||||
func (r *RDB) Pause(qname string) error {
|
||||
key := base.PausedKey(qname)
|
||||
|
@@ -2983,6 +2983,103 @@ func TestListWorkers(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteListClearSchedulerEntries(t *testing.T) {
|
||||
r := setup(t)
|
||||
now := time.Now().UTC()
|
||||
schedulerID := "127.0.0.1:9876:abc123"
|
||||
|
||||
data := []*base.SchedulerEntry{
|
||||
&base.SchedulerEntry{
|
||||
Spec: "* * * * *",
|
||||
Type: "foo",
|
||||
Payload: nil,
|
||||
Opts: nil,
|
||||
Next: now.Add(5 * time.Hour),
|
||||
Prev: now.Add(-2 * time.Hour),
|
||||
},
|
||||
&base.SchedulerEntry{
|
||||
Spec: "@every 20m",
|
||||
Type: "bar",
|
||||
Payload: map[string]interface{}{"fiz": "baz"},
|
||||
Opts: nil,
|
||||
Next: now.Add(1 * time.Minute),
|
||||
Prev: now.Add(-19 * time.Minute),
|
||||
},
|
||||
}
|
||||
|
||||
if err := r.WriteSchedulerEntries(schedulerID, data, 30*time.Second); err != nil {
|
||||
t.Fatalf("WriteSchedulerEnties failed: %v", err)
|
||||
}
|
||||
entries, err := r.ListSchedulerEntries()
|
||||
if err != nil {
|
||||
t.Fatalf("ListSchedulerEntries failed: %v", err)
|
||||
}
|
||||
if diff := cmp.Diff(data, entries, h.SortSchedulerEntryOpt); diff != "" {
|
||||
t.Errorf("ListSchedulerEntries() = %v, want %v; (-want,+got)\n%s", entries, data, diff)
|
||||
}
|
||||
if err := r.ClearSchedulerEntries(schedulerID); err != nil {
|
||||
t.Fatalf("ClearSchedulerEntries failed: %v", err)
|
||||
}
|
||||
entries, err = r.ListSchedulerEntries()
|
||||
if err != nil {
|
||||
t.Fatalf("ListSchedulerEntries() after clear failed: %v", err)
|
||||
}
|
||||
if len(entries) != 0 {
|
||||
t.Errorf("found %d entries, want 0 after clearing", len(entries))
|
||||
}
|
||||
}
|
||||
|
||||
func TestSchedulerEnqueueEvents(t *testing.T) {
|
||||
r := setup(t)
|
||||
|
||||
var (
|
||||
now = time.Now()
|
||||
oneDayAgo = now.Add(-24 * time.Hour)
|
||||
oneHourAgo = now.Add(-1 * time.Hour)
|
||||
)
|
||||
|
||||
type event struct {
|
||||
entryID string
|
||||
taskID string
|
||||
enqueuedAt time.Time
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
entryID string
|
||||
events []*base.SchedulerEnqueueEvent
|
||||
}{
|
||||
{
|
||||
entryID: "entry123",
|
||||
events: []*base.SchedulerEnqueueEvent{{"task123", oneDayAgo}, {"task456", oneHourAgo}},
|
||||
},
|
||||
{
|
||||
entryID: "entry123",
|
||||
events: []*base.SchedulerEnqueueEvent{},
|
||||
},
|
||||
}
|
||||
|
||||
loop:
|
||||
for _, tc := range tests {
|
||||
h.FlushDB(t, r.client)
|
||||
|
||||
for _, e := range tc.events {
|
||||
if err := r.RecordSchedulerEnqueueEvent(tc.entryID, e); err != nil {
|
||||
t.Errorf("RecordSchedulerEnqueueEvent(%q, %v) failed: %v", tc.entryID, e, err)
|
||||
continue loop
|
||||
}
|
||||
}
|
||||
got, err := r.ListSchedulerEnqueueEvents(tc.entryID)
|
||||
if err != nil {
|
||||
t.Errorf("ListSchedulerEnqueueEvents(%q) failed: %v", tc.entryID, err)
|
||||
continue
|
||||
}
|
||||
if diff := cmp.Diff(tc.events, got, h.SortSchedulerEnqueueEventOpt, timeCmpOpt); diff != "" {
|
||||
t.Errorf("ListSchedulerEnqueueEvent(%q) = %v, want %v; (-want,+got)\n%s",
|
||||
tc.entryID, got, tc.events, diff)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPause(t *testing.T) {
|
||||
r := setup(t)
|
||||
|
||||
|
@@ -575,6 +575,45 @@ func (r *RDB) ClearServerState(host string, pid int, serverID string) error {
|
||||
return clearServerStateCmd.Run(r.client, []string{skey, wkey}).Err()
|
||||
}
|
||||
|
||||
// KEYS[1] -> asynq:schedulers:{<schedulerID>}
|
||||
// ARGV[1] -> TTL in seconds
|
||||
// ARGV[2:] -> schedler entries
|
||||
var writeSchedulerEntriesCmd = redis.NewScript(`
|
||||
redis.call("DEL", KEYS[1])
|
||||
for i = 2, #ARGV do
|
||||
redis.call("LPUSH", KEYS[1], ARGV[i])
|
||||
end
|
||||
redis.call("EXPIRE", KEYS[1], ARGV[1])
|
||||
return redis.status_reply("OK")`)
|
||||
|
||||
// WriteSchedulerEntries writes scheduler entries data to redis with expiration set to the value ttl.
|
||||
func (r *RDB) WriteSchedulerEntries(schedulerID string, entries []*base.SchedulerEntry, ttl time.Duration) error {
|
||||
args := []interface{}{ttl.Seconds()}
|
||||
for _, e := range entries {
|
||||
bytes, err := json.Marshal(e)
|
||||
if err != nil {
|
||||
continue // skip bad data
|
||||
}
|
||||
args = append(args, bytes)
|
||||
}
|
||||
exp := time.Now().Add(ttl).UTC()
|
||||
key := base.SchedulerEntriesKey(schedulerID)
|
||||
err := r.client.ZAdd(base.AllSchedulers, &redis.Z{Score: float64(exp.Unix()), Member: key}).Err()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return writeSchedulerEntriesCmd.Run(r.client, []string{key}, args...).Err()
|
||||
}
|
||||
|
||||
// ClearSchedulerEntries deletes scheduler entries data from redis.
|
||||
func (r *RDB) ClearSchedulerEntries(scheduelrID string) error {
|
||||
key := base.SchedulerEntriesKey(scheduelrID)
|
||||
if err := r.client.ZRem(base.AllSchedulers, key).Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
return r.client.Del(key).Err()
|
||||
}
|
||||
|
||||
// CancelationPubSub returns a pubsub for cancelation messages.
|
||||
func (r *RDB) CancelationPubSub() (*redis.PubSub, error) {
|
||||
pubsub := r.client.Subscribe(base.CancelChannel)
|
||||
@@ -590,3 +629,26 @@ func (r *RDB) CancelationPubSub() (*redis.PubSub, error) {
|
||||
func (r *RDB) PublishCancelation(id string) error {
|
||||
return r.client.Publish(base.CancelChannel, id).Err()
|
||||
}
|
||||
|
||||
// KEYS[1] -> asynq:scheduler_history:<entryID>
|
||||
// ARGV[1] -> enqueued_at timestamp
|
||||
// ARGV[2] -> serialized SchedulerEnqueueEvent data
|
||||
// ARGV[3] -> max number of events to be persisted
|
||||
var recordSchedulerEnqueueEventCmd = redis.NewScript(`
|
||||
redis.call("ZADD", KEYS[1], ARGV[1], ARGV[2])
|
||||
redis.call("ZREMRANGEBYSCORE", KEYS[1], "-inf", ARGV[3])
|
||||
return redis.status_reply("OK")`)
|
||||
|
||||
// Maximum number of enqueue events to store per entry.
|
||||
const maxEvents = 10000
|
||||
|
||||
// RecordSchedulerEnqueueEvent records the time when the given task was enqueued.
|
||||
func (r *RDB) RecordSchedulerEnqueueEvent(entryID string, event *base.SchedulerEnqueueEvent) error {
|
||||
key := base.SchedulerHistoryKey(entryID)
|
||||
data, err := json.Marshal(event)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return recordSchedulerEnqueueEventCmd.Run(
|
||||
r.client, []string{key}, event.EnqueuedAt.Unix(), data, maxEvents).Err()
|
||||
}
|
||||
|
10
payload.go
10
payload.go
@@ -44,6 +44,16 @@ func toInt(v interface{}) (int, error) {
|
||||
}
|
||||
}
|
||||
|
||||
// String returns a string representation of payload data.
|
||||
func (p Payload) String() string {
|
||||
return fmt.Sprint(p.data)
|
||||
}
|
||||
|
||||
// MarshalJSON returns the JSON encoding of payload data.
|
||||
func (p Payload) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(p.data)
|
||||
}
|
||||
|
||||
// GetString returns a string value if a string type is associated with
|
||||
// the key, otherwise reports an error.
|
||||
func (p Payload) GetString(key string) (string, error) {
|
||||
|
@@ -6,6 +6,7 @@ package asynq
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -645,3 +646,30 @@ func TestPayloadHas(t *testing.T) {
|
||||
t.Errorf("Payload.Has(%q) = true, want false", "name")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPayloadDebuggingStrings(t *testing.T) {
|
||||
data := map[string]interface{}{
|
||||
"foo": 123,
|
||||
"bar": "hello",
|
||||
"baz": false,
|
||||
}
|
||||
payload := Payload{data: data}
|
||||
|
||||
if payload.String() != fmt.Sprint(data) {
|
||||
t.Errorf("Payload.String() = %q, want %q",
|
||||
payload.String(), fmt.Sprint(data))
|
||||
}
|
||||
|
||||
got, err := payload.MarshalJSON()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
want, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if diff := cmp.Diff(got, want); diff != "" {
|
||||
t.Errorf("Payload.MarhsalJSON() = %s, want %s; (-want,+got)\n%s",
|
||||
got, want, diff)
|
||||
}
|
||||
}
|
||||
|
33
processor.go
33
processor.go
@@ -88,22 +88,23 @@ func newProcessor(params processorParams) *processor {
|
||||
orderedQueues = sortByPriority(queues)
|
||||
}
|
||||
return &processor{
|
||||
logger: params.logger,
|
||||
broker: params.broker,
|
||||
queueConfig: queues,
|
||||
orderedQueues: orderedQueues,
|
||||
retryDelayFunc: params.retryDelayFunc,
|
||||
syncRequestCh: params.syncCh,
|
||||
cancelations: params.cancelations,
|
||||
errLogLimiter: rate.NewLimiter(rate.Every(3*time.Second), 1),
|
||||
sema: make(chan struct{}, params.concurrency),
|
||||
done: make(chan struct{}),
|
||||
quit: make(chan struct{}),
|
||||
abort: make(chan struct{}),
|
||||
errHandler: params.errHandler,
|
||||
handler: HandlerFunc(func(ctx context.Context, t *Task) error { return fmt.Errorf("handler not set") }),
|
||||
starting: params.starting,
|
||||
finished: params.finished,
|
||||
logger: params.logger,
|
||||
broker: params.broker,
|
||||
queueConfig: queues,
|
||||
orderedQueues: orderedQueues,
|
||||
retryDelayFunc: params.retryDelayFunc,
|
||||
syncRequestCh: params.syncCh,
|
||||
cancelations: params.cancelations,
|
||||
errLogLimiter: rate.NewLimiter(rate.Every(3*time.Second), 1),
|
||||
sema: make(chan struct{}, params.concurrency),
|
||||
done: make(chan struct{}),
|
||||
quit: make(chan struct{}),
|
||||
abort: make(chan struct{}),
|
||||
errHandler: params.errHandler,
|
||||
handler: HandlerFunc(func(ctx context.Context, t *Task) error { return fmt.Errorf("handler not set") }),
|
||||
shutdownTimeout: params.shutdownTimeout,
|
||||
starting: params.starting,
|
||||
finished: params.finished,
|
||||
}
|
||||
}
|
||||
|
||||
|
262
scheduler.go
262
scheduler.go
@@ -5,69 +5,235 @@
|
||||
package asynq
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/hibiken/asynq/internal/base"
|
||||
"github.com/hibiken/asynq/internal/log"
|
||||
"github.com/hibiken/asynq/internal/rdb"
|
||||
"github.com/robfig/cron/v3"
|
||||
)
|
||||
|
||||
type scheduler struct {
|
||||
logger *log.Logger
|
||||
broker base.Broker
|
||||
|
||||
// channel to communicate back to the long running "scheduler" goroutine.
|
||||
done chan struct{}
|
||||
|
||||
// list of queue names to check and enqueue.
|
||||
queues []string
|
||||
|
||||
// poll interval on average
|
||||
avgInterval time.Duration
|
||||
// A Scheduler kicks off tasks at regular intervals based on the user defined schedule.
|
||||
type Scheduler struct {
|
||||
id string
|
||||
status *base.ServerStatus
|
||||
logger *log.Logger
|
||||
client *Client
|
||||
rdb *rdb.RDB
|
||||
cron *cron.Cron
|
||||
location *time.Location
|
||||
done chan struct{}
|
||||
wg sync.WaitGroup
|
||||
errHandler func(task *Task, opts []Option, err error)
|
||||
}
|
||||
|
||||
type schedulerParams struct {
|
||||
logger *log.Logger
|
||||
broker base.Broker
|
||||
queues []string
|
||||
interval time.Duration
|
||||
}
|
||||
// NewScheduler returns a new Scheduler instance given the redis connection option.
|
||||
// The parameter opts is optional, defaults will be used if opts is set to nil
|
||||
func NewScheduler(r RedisConnOpt, opts *SchedulerOpts) *Scheduler {
|
||||
if opts == nil {
|
||||
opts = &SchedulerOpts{}
|
||||
}
|
||||
|
||||
func newScheduler(params schedulerParams) *scheduler {
|
||||
return &scheduler{
|
||||
logger: params.logger,
|
||||
broker: params.broker,
|
||||
done: make(chan struct{}),
|
||||
queues: params.queues,
|
||||
avgInterval: params.interval,
|
||||
logger := log.NewLogger(opts.Logger)
|
||||
loglevel := opts.LogLevel
|
||||
if loglevel == level_unspecified {
|
||||
loglevel = InfoLevel
|
||||
}
|
||||
logger.SetLevel(toInternalLogLevel(loglevel))
|
||||
|
||||
loc := opts.Location
|
||||
if loc == nil {
|
||||
loc = time.UTC
|
||||
}
|
||||
|
||||
return &Scheduler{
|
||||
id: generateSchedulerID(),
|
||||
status: base.NewServerStatus(base.StatusIdle),
|
||||
logger: logger,
|
||||
client: NewClient(r),
|
||||
rdb: rdb.NewRDB(createRedisClient(r)),
|
||||
cron: cron.New(cron.WithLocation(loc)),
|
||||
location: loc,
|
||||
done: make(chan struct{}),
|
||||
errHandler: opts.EnqueueErrorHandler,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *scheduler) terminate() {
|
||||
s.logger.Debug("Scheduler shutting down...")
|
||||
// Signal the scheduler goroutine to stop polling.
|
||||
s.done <- struct{}{}
|
||||
func generateSchedulerID() string {
|
||||
host, err := os.Hostname()
|
||||
if err != nil {
|
||||
host = "unknown-host"
|
||||
}
|
||||
return fmt.Sprintf("%s:%d:%v", host, os.Getpid(), uuid.New())
|
||||
}
|
||||
|
||||
// start starts the "scheduler" goroutine.
|
||||
func (s *scheduler) start(wg *sync.WaitGroup) {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for {
|
||||
select {
|
||||
case <-s.done:
|
||||
s.logger.Debug("Scheduler done")
|
||||
return
|
||||
case <-time.After(s.avgInterval):
|
||||
s.exec()
|
||||
}
|
||||
// SchedulerOpts specifies scheduler options.
|
||||
type SchedulerOpts struct {
|
||||
// Logger specifies the logger used by the scheduler instance.
|
||||
//
|
||||
// If unset, the default logger is used.
|
||||
Logger Logger
|
||||
|
||||
// LogLevel specifies the minimum log level to enable.
|
||||
//
|
||||
// If unset, InfoLevel is used by default.
|
||||
LogLevel LogLevel
|
||||
|
||||
// Location specifies the time zone location.
|
||||
//
|
||||
// If unset, the UTC time zone (time.UTC) is used.
|
||||
Location *time.Location
|
||||
|
||||
// EnqueueErrorHandler gets called when scheduler cannot enqueue a registered task
|
||||
// due to an error.
|
||||
EnqueueErrorHandler func(task *Task, opts []Option, err error)
|
||||
}
|
||||
|
||||
// enqueueJob encapsulates the job of enqueing a task and recording the event.
|
||||
type enqueueJob struct {
|
||||
id uuid.UUID
|
||||
cronspec string
|
||||
task *Task
|
||||
opts []Option
|
||||
location *time.Location
|
||||
logger *log.Logger
|
||||
client *Client
|
||||
rdb *rdb.RDB
|
||||
errHandler func(task *Task, opts []Option, err error)
|
||||
}
|
||||
|
||||
func (j *enqueueJob) Run() {
|
||||
res, err := j.client.Enqueue(j.task, j.opts...)
|
||||
if err != nil {
|
||||
j.logger.Errorf("scheduler could not enqueue a task %+v: %v", j.task, err)
|
||||
if j.errHandler != nil {
|
||||
j.errHandler(j.task, j.opts, err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (s *scheduler) exec() {
|
||||
if err := s.broker.CheckAndEnqueue(s.queues...); err != nil {
|
||||
s.logger.Errorf("Could not enqueue scheduled tasks: %v", err)
|
||||
return
|
||||
}
|
||||
j.logger.Infof("scheduler enqueued a task: %+v", res)
|
||||
event := &base.SchedulerEnqueueEvent{
|
||||
TaskID: res.ID,
|
||||
EnqueuedAt: res.EnqueuedAt.In(j.location),
|
||||
}
|
||||
err = j.rdb.RecordSchedulerEnqueueEvent(j.id.String(), event)
|
||||
if err != nil {
|
||||
j.logger.Errorf("scheduler could not record enqueue event of enqueued task %+v: %v", j.task, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Register registers a task to be enqueued on the given schedule specified by the cronspec.
|
||||
// It returns an ID of the newly registered entry.
|
||||
func (s *Scheduler) Register(cronspec string, task *Task, opts ...Option) (entryID string, err error) {
|
||||
job := &enqueueJob{
|
||||
id: uuid.New(),
|
||||
cronspec: cronspec,
|
||||
task: task,
|
||||
opts: opts,
|
||||
location: s.location,
|
||||
client: s.client,
|
||||
rdb: s.rdb,
|
||||
logger: s.logger,
|
||||
errHandler: s.errHandler,
|
||||
}
|
||||
if _, err = s.cron.AddJob(cronspec, job); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return job.id.String(), nil
|
||||
}
|
||||
|
||||
// Run starts the scheduler until an os signal to exit the program is received.
|
||||
// It returns an error if scheduler is already running or has been stopped.
|
||||
func (s *Scheduler) Run() error {
|
||||
if err := s.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
s.waitForSignals()
|
||||
return s.Stop()
|
||||
}
|
||||
|
||||
// Start starts the scheduler.
|
||||
// It returns an error if the scheduler is already running or has been stopped.
|
||||
func (s *Scheduler) Start() error {
|
||||
switch s.status.Get() {
|
||||
case base.StatusRunning:
|
||||
return fmt.Errorf("asynq: the scheduler is already running")
|
||||
case base.StatusStopped:
|
||||
return fmt.Errorf("asynq: the scheduler has already been stopped")
|
||||
}
|
||||
s.logger.Info("Scheduler starting")
|
||||
s.logger.Infof("Scheduler timezone is set to %v", s.location)
|
||||
s.cron.Start()
|
||||
s.wg.Add(1)
|
||||
go s.runHeartbeater()
|
||||
s.status.Set(base.StatusRunning)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop stops the scheduler.
|
||||
// It returns an error if the scheduler is not currently running.
|
||||
func (s *Scheduler) Stop() error {
|
||||
if s.status.Get() != base.StatusRunning {
|
||||
return fmt.Errorf("asynq: the scheduler is not running")
|
||||
}
|
||||
s.logger.Info("Scheduler shutting down")
|
||||
close(s.done) // signal heartbeater to stop
|
||||
ctx := s.cron.Stop()
|
||||
<-ctx.Done()
|
||||
s.wg.Wait()
|
||||
|
||||
s.client.Close()
|
||||
s.rdb.Close()
|
||||
s.status.Set(base.StatusStopped)
|
||||
s.logger.Info("Scheduler stopped")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Scheduler) runHeartbeater() {
|
||||
defer s.wg.Done()
|
||||
ticker := time.NewTicker(5 * time.Second)
|
||||
for {
|
||||
select {
|
||||
case <-s.done:
|
||||
s.logger.Debugf("Scheduler heatbeater shutting down")
|
||||
s.rdb.ClearSchedulerEntries(s.id)
|
||||
return
|
||||
case <-ticker.C:
|
||||
s.beat()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// beat writes a snapshot of entries to redis.
|
||||
func (s *Scheduler) beat() {
|
||||
var entries []*base.SchedulerEntry
|
||||
for _, entry := range s.cron.Entries() {
|
||||
job := entry.Job.(*enqueueJob)
|
||||
e := &base.SchedulerEntry{
|
||||
ID: job.id.String(),
|
||||
Spec: job.cronspec,
|
||||
Type: job.task.Type,
|
||||
Payload: job.task.Payload.data,
|
||||
Opts: stringifyOptions(job.opts),
|
||||
Next: entry.Next,
|
||||
Prev: entry.Prev,
|
||||
}
|
||||
entries = append(entries, e)
|
||||
}
|
||||
s.logger.Debugf("Writing entries %v", entries)
|
||||
if err := s.rdb.WriteSchedulerEntries(s.id, entries, 5*time.Second); err != nil {
|
||||
s.logger.Warnf("Scheduler could not write heartbeat data: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func stringifyOptions(opts []Option) []string {
|
||||
var res []string
|
||||
for _, opt := range opts {
|
||||
res = append(res, opt.String())
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
@@ -10,128 +10,109 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
h "github.com/hibiken/asynq/internal/asynqtest"
|
||||
"github.com/hibiken/asynq/internal/asynqtest"
|
||||
"github.com/hibiken/asynq/internal/base"
|
||||
"github.com/hibiken/asynq/internal/rdb"
|
||||
)
|
||||
|
||||
func TestScheduler(t *testing.T) {
|
||||
r := setup(t)
|
||||
defer r.Close()
|
||||
rdbClient := rdb.NewRDB(r)
|
||||
const pollInterval = time.Second
|
||||
s := newScheduler(schedulerParams{
|
||||
logger: testLogger,
|
||||
broker: rdbClient,
|
||||
queues: []string{"default", "critical"},
|
||||
interval: pollInterval,
|
||||
})
|
||||
t1 := h.NewTaskMessageWithQueue("gen_thumbnail", nil, "default")
|
||||
t2 := h.NewTaskMessageWithQueue("send_email", nil, "critical")
|
||||
t3 := h.NewTaskMessageWithQueue("reindex", nil, "default")
|
||||
t4 := h.NewTaskMessageWithQueue("sync", nil, "critical")
|
||||
now := time.Now()
|
||||
|
||||
tests := []struct {
|
||||
initScheduled map[string][]base.Z // scheduled queue initial state
|
||||
initRetry map[string][]base.Z // retry queue initial state
|
||||
initPending map[string][]*base.TaskMessage // default queue initial state
|
||||
wait time.Duration // wait duration before checking for final state
|
||||
wantScheduled map[string][]*base.TaskMessage // schedule queue final state
|
||||
wantRetry map[string][]*base.TaskMessage // retry queue final state
|
||||
wantPending map[string][]*base.TaskMessage // default queue final state
|
||||
cronspec string
|
||||
task *Task
|
||||
opts []Option
|
||||
wait time.Duration
|
||||
queue string
|
||||
want []*base.TaskMessage
|
||||
}{
|
||||
{
|
||||
initScheduled: map[string][]base.Z{
|
||||
"default": {{Message: t1, Score: now.Add(time.Hour).Unix()}},
|
||||
"critical": {{Message: t2, Score: now.Add(-2 * time.Second).Unix()}},
|
||||
},
|
||||
initRetry: map[string][]base.Z{
|
||||
"default": {{Message: t3, Score: time.Now().Add(-500 * time.Millisecond).Unix()}},
|
||||
"critical": {},
|
||||
},
|
||||
initPending: map[string][]*base.TaskMessage{
|
||||
"default": {},
|
||||
"critical": {t4},
|
||||
},
|
||||
wait: pollInterval * 2,
|
||||
wantScheduled: map[string][]*base.TaskMessage{
|
||||
"default": {t1},
|
||||
"critical": {},
|
||||
},
|
||||
wantRetry: map[string][]*base.TaskMessage{
|
||||
"default": {},
|
||||
"critical": {},
|
||||
},
|
||||
wantPending: map[string][]*base.TaskMessage{
|
||||
"default": {t3},
|
||||
"critical": {t2, t4},
|
||||
},
|
||||
},
|
||||
{
|
||||
initScheduled: map[string][]base.Z{
|
||||
"default": {
|
||||
{Message: t1, Score: now.Unix()},
|
||||
{Message: t3, Score: now.Add(-500 * time.Millisecond).Unix()},
|
||||
cronspec: "@every 3s",
|
||||
task: NewTask("task1", nil),
|
||||
opts: []Option{MaxRetry(10)},
|
||||
wait: 10 * time.Second,
|
||||
queue: "default",
|
||||
want: []*base.TaskMessage{
|
||||
{
|
||||
Type: "task1",
|
||||
Payload: nil,
|
||||
Retry: 10,
|
||||
Timeout: int64(defaultTimeout.Seconds()),
|
||||
Queue: "default",
|
||||
},
|
||||
"critical": {
|
||||
{Message: t2, Score: now.Add(-2 * time.Second).Unix()},
|
||||
{
|
||||
Type: "task1",
|
||||
Payload: nil,
|
||||
Retry: 10,
|
||||
Timeout: int64(defaultTimeout.Seconds()),
|
||||
Queue: "default",
|
||||
},
|
||||
{
|
||||
Type: "task1",
|
||||
Payload: nil,
|
||||
Retry: 10,
|
||||
Timeout: int64(defaultTimeout.Seconds()),
|
||||
Queue: "default",
|
||||
},
|
||||
},
|
||||
initRetry: map[string][]base.Z{
|
||||
"default": {},
|
||||
"critical": {},
|
||||
},
|
||||
initPending: map[string][]*base.TaskMessage{
|
||||
"default": {},
|
||||
"critical": {t4},
|
||||
},
|
||||
wait: pollInterval * 2,
|
||||
wantScheduled: map[string][]*base.TaskMessage{
|
||||
"default": {},
|
||||
"critical": {},
|
||||
},
|
||||
wantRetry: map[string][]*base.TaskMessage{
|
||||
"default": {},
|
||||
"critical": {},
|
||||
},
|
||||
wantPending: map[string][]*base.TaskMessage{
|
||||
"default": {t1, t3},
|
||||
"critical": {t2, t4},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
r := setup(t)
|
||||
|
||||
for _, tc := range tests {
|
||||
h.FlushDB(t, r) // clean up db before each test case.
|
||||
h.SeedAllScheduledQueues(t, r, tc.initScheduled) // initialize scheduled queue
|
||||
h.SeedAllRetryQueues(t, r, tc.initRetry) // initialize retry queue
|
||||
h.SeedAllPendingQueues(t, r, tc.initPending) // initialize default queue
|
||||
scheduler := NewScheduler(getRedisConnOpt(t), nil)
|
||||
if _, err := scheduler.Register(tc.cronspec, tc.task, tc.opts...); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
s.start(&wg)
|
||||
if err := scheduler.Start(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
time.Sleep(tc.wait)
|
||||
s.terminate()
|
||||
|
||||
for qname, want := range tc.wantScheduled {
|
||||
gotScheduled := h.GetScheduledMessages(t, r, qname)
|
||||
if diff := cmp.Diff(want, gotScheduled, h.SortMsgOpt); diff != "" {
|
||||
t.Errorf("mismatch found in %q after running scheduler: (-want, +got)\n%s", base.ScheduledKey(qname), diff)
|
||||
}
|
||||
if err := scheduler.Stop(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for qname, want := range tc.wantRetry {
|
||||
gotRetry := h.GetRetryMessages(t, r, qname)
|
||||
if diff := cmp.Diff(want, gotRetry, h.SortMsgOpt); diff != "" {
|
||||
t.Errorf("mismatch found in %q after running scheduler: (-want, +got)\n%s", base.RetryKey(qname), diff)
|
||||
}
|
||||
}
|
||||
|
||||
for qname, want := range tc.wantPending {
|
||||
gotPending := h.GetPendingMessages(t, r, qname)
|
||||
if diff := cmp.Diff(want, gotPending, h.SortMsgOpt); diff != "" {
|
||||
t.Errorf("mismatch found in %q after running scheduler: (-want, +got)\n%s", base.QueueKey(qname), diff)
|
||||
}
|
||||
got := asynqtest.GetPendingMessages(t, r, tc.queue)
|
||||
if diff := cmp.Diff(tc.want, got, asynqtest.IgnoreIDOpt); diff != "" {
|
||||
t.Errorf("mismatch found in queue %q: (-want,+got)\n%s", tc.queue, diff)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSchedulerWhenRedisDown(t *testing.T) {
|
||||
var (
|
||||
mu sync.Mutex
|
||||
counter int
|
||||
)
|
||||
errorHandler := func(task *Task, opts []Option, err error) {
|
||||
mu.Lock()
|
||||
counter++
|
||||
mu.Unlock()
|
||||
}
|
||||
|
||||
// Connect to non-existent redis instance to simulate a redis server being down.
|
||||
scheduler := NewScheduler(
|
||||
RedisClientOpt{Addr: ":9876"},
|
||||
&SchedulerOpts{EnqueueErrorHandler: errorHandler},
|
||||
)
|
||||
|
||||
task := NewTask("test", nil)
|
||||
|
||||
if _, err := scheduler.Register("@every 3s", task); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := scheduler.Start(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Scheduler should attempt to enqueue the task three times (every 3s).
|
||||
time.Sleep(10 * time.Second)
|
||||
if err := scheduler.Stop(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
mu.Lock()
|
||||
if counter != 3 {
|
||||
t.Errorf("EnqueueErrorHandler was called %d times, want 3", counter)
|
||||
}
|
||||
mu.Unlock()
|
||||
}
|
||||
|
39
server.go
39
server.go
@@ -41,7 +41,7 @@ type Server struct {
|
||||
|
||||
// wait group to wait for all goroutines to finish.
|
||||
wg sync.WaitGroup
|
||||
scheduler *scheduler
|
||||
forwarder *forwarder
|
||||
processor *processor
|
||||
syncer *syncer
|
||||
heartbeater *heartbeater
|
||||
@@ -75,11 +75,13 @@ type Config struct {
|
||||
// Priority is treated as follows to avoid starving low priority queues.
|
||||
//
|
||||
// Example:
|
||||
// Queues: map[string]int{
|
||||
// "critical": 6,
|
||||
// "default": 3,
|
||||
// "low": 1,
|
||||
// }
|
||||
//
|
||||
// Queues: map[string]int{
|
||||
// "critical": 6,
|
||||
// "default": 3,
|
||||
// "low": 1,
|
||||
// }
|
||||
//
|
||||
// With the above config and given that all queues are not empty, the tasks
|
||||
// in "critical", "default", "low" should be processed 60%, 30%, 10% of
|
||||
// the time respectively.
|
||||
@@ -99,14 +101,17 @@ type Config struct {
|
||||
// HandleError is invoked only if the task handler returns a non-nil error.
|
||||
//
|
||||
// Example:
|
||||
// func reportError(task *asynq.Task, err error, retried, maxRetry int) {
|
||||
// if retried >= maxRetry {
|
||||
// err = fmt.Errorf("retry exhausted for task %s: %w", task.Type, err)
|
||||
// }
|
||||
// errorReportingService.Notify(err)
|
||||
// })
|
||||
//
|
||||
// ErrorHandler: asynq.ErrorHandlerFunc(reportError)
|
||||
// func reportError(ctx context, task *asynq.Task, err error) {
|
||||
// retried, _ := asynq.GetRetryCount(ctx)
|
||||
// maxRetry, _ := asynq.GetMaxRetry(ctx)
|
||||
// if retried >= maxRetry {
|
||||
// err = fmt.Errorf("retry exhausted for task %s: %w", task.Type, err)
|
||||
// }
|
||||
// errorReportingService.Notify(err)
|
||||
// })
|
||||
//
|
||||
// ErrorHandler: asynq.ErrorHandlerFunc(reportError)
|
||||
ErrorHandler ErrorHandler
|
||||
|
||||
// Logger specifies the logger used by the server instance.
|
||||
@@ -328,7 +333,7 @@ func NewServer(r RedisConnOpt, cfg Config) *Server {
|
||||
starting: starting,
|
||||
finished: finished,
|
||||
})
|
||||
scheduler := newScheduler(schedulerParams{
|
||||
forwarder := newForwarder(forwarderParams{
|
||||
logger: logger,
|
||||
broker: rdb,
|
||||
queues: qnames,
|
||||
@@ -370,7 +375,7 @@ func NewServer(r RedisConnOpt, cfg Config) *Server {
|
||||
logger: logger,
|
||||
broker: rdb,
|
||||
status: status,
|
||||
scheduler: scheduler,
|
||||
forwarder: forwarder,
|
||||
processor: processor,
|
||||
syncer: syncer,
|
||||
heartbeater: heartbeater,
|
||||
@@ -448,7 +453,7 @@ func (srv *Server) Start(handler Handler) error {
|
||||
srv.subscriber.start(&srv.wg)
|
||||
srv.syncer.start(&srv.wg)
|
||||
srv.recoverer.start(&srv.wg)
|
||||
srv.scheduler.start(&srv.wg)
|
||||
srv.forwarder.start(&srv.wg)
|
||||
srv.processor.start(&srv.wg)
|
||||
return nil
|
||||
}
|
||||
@@ -469,7 +474,7 @@ func (srv *Server) Stop() {
|
||||
// Sender goroutines should be terminated before the receiver goroutines.
|
||||
// processor -> syncer (via syncCh)
|
||||
// processor -> heartbeater (via starting, finished channels)
|
||||
srv.scheduler.terminate()
|
||||
srv.forwarder.terminate()
|
||||
srv.processor.terminate()
|
||||
srv.recoverer.terminate()
|
||||
srv.syncer.terminate()
|
||||
|
@@ -127,7 +127,7 @@ func TestServerWithRedisDown(t *testing.T) {
|
||||
testBroker := testbroker.NewTestBroker(r)
|
||||
srv := NewServer(RedisClientOpt{Addr: ":6379"}, Config{LogLevel: testLogLevel})
|
||||
srv.broker = testBroker
|
||||
srv.scheduler.broker = testBroker
|
||||
srv.forwarder.broker = testBroker
|
||||
srv.heartbeater.broker = testBroker
|
||||
srv.processor.broker = testBroker
|
||||
srv.subscriber.broker = testBroker
|
||||
@@ -160,7 +160,7 @@ func TestServerWithFlakyBroker(t *testing.T) {
|
||||
redisConnOpt := getRedisConnOpt(t)
|
||||
srv := NewServer(redisConnOpt, Config{LogLevel: testLogLevel})
|
||||
srv.broker = testBroker
|
||||
srv.scheduler.broker = testBroker
|
||||
srv.forwarder.broker = testBroker
|
||||
srv.heartbeater.broker = testBroker
|
||||
srv.processor.broker = testBroker
|
||||
srv.subscriber.broker = testBroker
|
||||
|
@@ -28,3 +28,10 @@ func (srv *Server) waitForSignals() {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Scheduler) waitForSignals() {
|
||||
s.logger.Info("Send signal TERM or INT to stop the scheduler")
|
||||
sigs := make(chan os.Signal, 1)
|
||||
signal.Notify(sigs, unix.SIGTERM, unix.SIGINT)
|
||||
<-sigs
|
||||
}
|
||||
|
@@ -20,3 +20,10 @@ func (srv *Server) waitForSignals() {
|
||||
signal.Notify(sigs, windows.SIGTERM, windows.SIGINT)
|
||||
<-sigs
|
||||
}
|
||||
|
||||
func (s *Scheduler) waitForSignals() {
|
||||
s.logger.Info("Send signal TERM or INT to stop the scheduler")
|
||||
sigs := make(chan os.Signal, 1)
|
||||
signal.Notify(sigs, windows.SIGTERM, windows.SIGINT)
|
||||
<-sigs
|
||||
}
|
||||
|
122
tools/asynq/cmd/cron.go
Normal file
122
tools/asynq/cmd/cron.go
Normal file
@@ -0,0 +1,122 @@
|
||||
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(cronCmd)
|
||||
cronCmd.AddCommand(cronListCmd)
|
||||
cronCmd.AddCommand(cronHistoryCmd)
|
||||
}
|
||||
|
||||
var cronCmd = &cobra.Command{
|
||||
Use: "cron",
|
||||
Short: "Manage cron",
|
||||
}
|
||||
|
||||
var cronListCmd = &cobra.Command{
|
||||
Use: "ls",
|
||||
Short: "List cron entries",
|
||||
Run: cronList,
|
||||
}
|
||||
|
||||
var cronHistoryCmd = &cobra.Command{
|
||||
Use: "history",
|
||||
Short: "Show history of each cron tasks",
|
||||
Args: cobra.MinimumNArgs(1),
|
||||
Run: cronHistory,
|
||||
}
|
||||
|
||||
func cronList(cmd *cobra.Command, args []string) {
|
||||
r := createRDB()
|
||||
|
||||
entries, err := r.ListSchedulerEntries()
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
if len(entries) == 0 {
|
||||
fmt.Println("No scheduler entries")
|
||||
return
|
||||
}
|
||||
|
||||
// Sort entries by spec.
|
||||
sort.Slice(entries, func(i, j int) bool {
|
||||
x, y := entries[i], entries[j]
|
||||
return x.Spec < y.Spec
|
||||
})
|
||||
|
||||
cols := []string{"EntryID", "Spec", "Type", "Payload", "Options", "Next", "Prev"}
|
||||
printRows := func(w io.Writer, tmpl string) {
|
||||
for _, e := range entries {
|
||||
fmt.Fprintf(w, tmpl, e.ID, e.Spec, e.Type, e.Payload, e.Opts,
|
||||
nextEnqueue(e.Next), prevEnqueue(e.Prev))
|
||||
}
|
||||
}
|
||||
printTable(cols, printRows)
|
||||
}
|
||||
|
||||
// Returns a string describing when the next enqueue will happen.
|
||||
func nextEnqueue(nextEnqueueAt time.Time) string {
|
||||
d := nextEnqueueAt.Sub(time.Now()).Round(time.Second)
|
||||
if d < 0 {
|
||||
return "Now"
|
||||
}
|
||||
return fmt.Sprintf("In %v", d)
|
||||
}
|
||||
|
||||
// Returns a string describing when the previous enqueue was.
|
||||
func prevEnqueue(prevEnqueuedAt time.Time) string {
|
||||
if prevEnqueuedAt.IsZero() {
|
||||
return "N/A"
|
||||
}
|
||||
return fmt.Sprintf("%v ago", time.Since(prevEnqueuedAt).Round(time.Second))
|
||||
}
|
||||
|
||||
// TODO: Paginate the result set.
|
||||
func cronHistory(cmd *cobra.Command, args []string) {
|
||||
r := createRDB()
|
||||
for i, entryID := range args {
|
||||
if i > 0 {
|
||||
fmt.Printf("\n%s\n", separator)
|
||||
}
|
||||
fmt.Println()
|
||||
|
||||
fmt.Printf("Entry: %s\n\n", entryID)
|
||||
|
||||
events, err := r.ListSchedulerEnqueueEvents(entryID)
|
||||
if err != nil {
|
||||
fmt.Printf("error: %v\n", err)
|
||||
continue
|
||||
}
|
||||
if len(events) == 0 {
|
||||
fmt.Printf("No scheduler enqueue events found for entry: %s\n", entryID)
|
||||
continue
|
||||
}
|
||||
|
||||
// Sort entries by enqueuedAt timestamp.
|
||||
sort.Slice(events, func(i, j int) bool {
|
||||
x, y := events[i], events[j]
|
||||
return x.EnqueuedAt.Unix() > y.EnqueuedAt.Unix()
|
||||
})
|
||||
|
||||
cols := []string{"TaskID", "EnqueuedAt"}
|
||||
printRows := func(w io.Writer, tmpl string) {
|
||||
for _, e := range events {
|
||||
fmt.Fprintf(w, tmpl, e.TaskID, e.EnqueuedAt)
|
||||
}
|
||||
}
|
||||
printTable(cols, printRows)
|
||||
}
|
||||
}
|
@@ -5,6 +5,7 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
@@ -31,6 +32,7 @@ var (
|
||||
|
||||
useRedisCluster bool
|
||||
clusterAddrs string
|
||||
tlsServerName string
|
||||
)
|
||||
|
||||
// rootCmd represents the base command when called without any subcommands
|
||||
@@ -74,12 +76,15 @@ func init() {
|
||||
rootCmd.PersistentFlags().StringVar(&clusterAddrs, "cluster_addrs",
|
||||
"127.0.0.1:7000,127.0.0.1:7001,127.0.0.1:7002,127.0.0.1:7003,127.0.0.1:7004,127.0.0.1:7005",
|
||||
"list of comma-separated redis server addresses")
|
||||
rootCmd.PersistentFlags().StringVar(&tlsServerName, "tls_server",
|
||||
"", "server name for TLS validation")
|
||||
// Bind flags with config.
|
||||
viper.BindPFlag("uri", rootCmd.PersistentFlags().Lookup("uri"))
|
||||
viper.BindPFlag("db", rootCmd.PersistentFlags().Lookup("db"))
|
||||
viper.BindPFlag("password", rootCmd.PersistentFlags().Lookup("password"))
|
||||
viper.BindPFlag("cluster", rootCmd.PersistentFlags().Lookup("cluster"))
|
||||
viper.BindPFlag("cluster_addrs", rootCmd.PersistentFlags().Lookup("cluster_addrs"))
|
||||
viper.BindPFlag("tls_server", rootCmd.PersistentFlags().Lookup("tls_server"))
|
||||
}
|
||||
|
||||
// initConfig reads in config file and ENV variables if set.
|
||||
@@ -114,14 +119,16 @@ func createRDB() *rdb.RDB {
|
||||
if useRedisCluster {
|
||||
addrs := strings.Split(viper.GetString("cluster_addrs"), ",")
|
||||
c = redis.NewClusterClient(&redis.ClusterOptions{
|
||||
Addrs: addrs,
|
||||
Password: viper.GetString("password"),
|
||||
Addrs: addrs,
|
||||
Password: viper.GetString("password"),
|
||||
TLSConfig: getTLSConfig(),
|
||||
})
|
||||
} else {
|
||||
c = redis.NewClient(&redis.Options{
|
||||
Addr: viper.GetString("uri"),
|
||||
DB: viper.GetInt("db"),
|
||||
Password: viper.GetString("password"),
|
||||
Addr: viper.GetString("uri"),
|
||||
DB: viper.GetInt("db"),
|
||||
Password: viper.GetString("password"),
|
||||
TLSConfig: getTLSConfig(),
|
||||
})
|
||||
}
|
||||
return rdb.NewRDB(c)
|
||||
@@ -133,19 +140,29 @@ func createInspector() *asynq.Inspector {
|
||||
if useRedisCluster {
|
||||
addrs := strings.Split(viper.GetString("cluster_addrs"), ",")
|
||||
connOpt = asynq.RedisClusterClientOpt{
|
||||
Addrs: addrs,
|
||||
Password: viper.GetString("password"),
|
||||
Addrs: addrs,
|
||||
Password: viper.GetString("password"),
|
||||
TLSConfig: getTLSConfig(),
|
||||
}
|
||||
} else {
|
||||
connOpt = asynq.RedisClientOpt{
|
||||
Addr: viper.GetString("uri"),
|
||||
DB: viper.GetInt("db"),
|
||||
Password: viper.GetString("password"),
|
||||
Addr: viper.GetString("uri"),
|
||||
DB: viper.GetInt("db"),
|
||||
Password: viper.GetString("password"),
|
||||
TLSConfig: getTLSConfig(),
|
||||
}
|
||||
}
|
||||
return asynq.NewInspector(connOpt)
|
||||
}
|
||||
|
||||
func getTLSConfig() *tls.Config {
|
||||
tlsServer := viper.GetString("tls_server")
|
||||
if tlsServer == "" {
|
||||
return nil
|
||||
}
|
||||
return &tls.Config{ServerName: tlsServer}
|
||||
}
|
||||
|
||||
// printTable is a helper function to print data in table format.
|
||||
//
|
||||
// cols is a list of headers and printRow specifies how to print rows.
|
||||
|
@@ -108,6 +108,8 @@ github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
|
||||
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
|
Reference in New Issue
Block a user