mirror of
https://github.com/hibiken/asynq.git
synced 2025-10-20 09:16:12 +08:00
Compare commits
16 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
8bf5917cd9 | ||
|
7f30fa2bb6 | ||
|
ade6e61f51 | ||
|
a2abeedaa0 | ||
|
81bb52b08c | ||
|
bc2a7635a0 | ||
|
f65d408bf9 | ||
|
4749b4bbfc | ||
|
06c4a1c7f8 | ||
|
8af4cbad51 | ||
|
4e800a7f68 | ||
|
d6a5c84dc6 | ||
|
363cfedb49 | ||
|
4595bd41c3 | ||
|
e236d55477 | ||
|
a38f628f3b |
@@ -5,6 +5,7 @@ git:
|
|||||||
go: [1.13.x, 1.14.x]
|
go: [1.13.x, 1.14.x]
|
||||||
script:
|
script:
|
||||||
- go test -race -v -coverprofile=coverage.txt -covermode=atomic ./...
|
- go test -race -v -coverprofile=coverage.txt -covermode=atomic ./...
|
||||||
|
- go test -run=XXX -bench=. -loglevel=debug ./...
|
||||||
services:
|
services:
|
||||||
- redis-server
|
- redis-server
|
||||||
after_success:
|
after_success:
|
||||||
|
@@ -3,13 +3,16 @@ if [ "${TRAVIS_PULL_REQUEST_BRANCH:-$TRAVIS_BRANCH}" != "master" ]; then
|
|||||||
cd ${TRAVIS_BUILD_DIR}/.. && \
|
cd ${TRAVIS_BUILD_DIR}/.. && \
|
||||||
git clone ${REMOTE_URL} "${TRAVIS_REPO_SLUG}-bench" && \
|
git clone ${REMOTE_URL} "${TRAVIS_REPO_SLUG}-bench" && \
|
||||||
cd "${TRAVIS_REPO_SLUG}-bench" && \
|
cd "${TRAVIS_REPO_SLUG}-bench" && \
|
||||||
|
|
||||||
# Benchmark master
|
# Benchmark master
|
||||||
git checkout master && \
|
git checkout master && \
|
||||||
go test -run=XXX -bench=. ./... > master.txt && \
|
go test -run=XXX -bench=. ./... > master.txt && \
|
||||||
|
|
||||||
# Benchmark feature branch
|
# Benchmark feature branch
|
||||||
git checkout ${TRAVIS_COMMIT} && \
|
git checkout ${TRAVIS_COMMIT} && \
|
||||||
go test -run=XXX -bench=. ./... > feature.txt && \
|
go test -run=XXX -bench=. ./... > feature.txt && \
|
||||||
go get -u golang.org/x/tools/cmd/benchcmp && \
|
|
||||||
# compare two benchmarks
|
# compare two benchmarks
|
||||||
|
go get -u golang.org/x/tools/cmd/benchcmp && \
|
||||||
benchcmp master.txt feature.txt;
|
benchcmp master.txt feature.txt;
|
||||||
fi
|
fi
|
||||||
|
19
CHANGELOG.md
19
CHANGELOG.md
@@ -7,6 +7,25 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
|||||||
|
|
||||||
## [Unreleased]
|
## [Unreleased]
|
||||||
|
|
||||||
|
## [0.9.4] - 2020-06-13
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- Fixes issue of same tasks processed by more than one worker (https://github.com/hibiken/asynq/issues/90).
|
||||||
|
|
||||||
|
## [0.9.3] - 2020-06-12
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- Fixes the JSON number overflow issue (https://github.com/hibiken/asynq/issues/166).
|
||||||
|
|
||||||
|
|
||||||
|
## [0.9.2] - 2020-06-08
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- The `pause` and `unpause` commands were added to the CLI. See README for the CLI for details.
|
||||||
|
|
||||||
## [0.9.1] - 2020-05-29
|
## [0.9.1] - 2020-05-29
|
||||||
|
|
||||||
### Added
|
### Added
|
||||||
|
@@ -40,6 +40,7 @@ A system can consist of multiple worker servers and brokers, giving way to high
|
|||||||
- De-duplication of tasks using [unique option](https://github.com/hibiken/asynq/wiki/Unique-Tasks)
|
- De-duplication of tasks using [unique option](https://github.com/hibiken/asynq/wiki/Unique-Tasks)
|
||||||
- Allow [timeout and deadline per task](https://github.com/hibiken/asynq/wiki/Task-Timeout-and-Cancelation)
|
- Allow [timeout and deadline per task](https://github.com/hibiken/asynq/wiki/Task-Timeout-and-Cancelation)
|
||||||
- [Flexible handler interface with support for middlewares](https://github.com/hibiken/asynq/wiki/Handler-Deep-Dive)
|
- [Flexible handler interface with support for middlewares](https://github.com/hibiken/asynq/wiki/Handler-Deep-Dive)
|
||||||
|
- [Ability to pause queue](/tools/asynq/README.md#pause) to stop processing tasks from the queue
|
||||||
- [Support Redis Sentinels](https://github.com/hibiken/asynq/wiki/Automatic-Failover) for HA
|
- [Support Redis Sentinels](https://github.com/hibiken/asynq/wiki/Automatic-Failover) for HA
|
||||||
- [CLI](#command-line-tool) to inspect and remote-control queues and tasks
|
- [CLI](#command-line-tool) to inspect and remote-control queues and tasks
|
||||||
|
|
||||||
|
@@ -7,7 +7,6 @@ package asynq
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/rand"
|
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
@@ -29,6 +28,7 @@ func BenchmarkEndToEndSimple(b *testing.B) {
|
|||||||
RetryDelayFunc: func(n int, err error, t *Task) time.Duration {
|
RetryDelayFunc: func(n int, err error, t *Task) time.Duration {
|
||||||
return time.Second
|
return time.Second
|
||||||
},
|
},
|
||||||
|
LogLevel: testLogLevel,
|
||||||
})
|
})
|
||||||
// Create a bunch of tasks
|
// Create a bunch of tasks
|
||||||
for i := 0; i < count; i++ {
|
for i := 0; i < count; i++ {
|
||||||
@@ -60,7 +60,6 @@ func BenchmarkEndToEnd(b *testing.B) {
|
|||||||
const count = 100000
|
const count = 100000
|
||||||
for n := 0; n < b.N; n++ {
|
for n := 0; n < b.N; n++ {
|
||||||
b.StopTimer() // begin setup
|
b.StopTimer() // begin setup
|
||||||
rand.Seed(time.Now().UnixNano())
|
|
||||||
setup(b)
|
setup(b)
|
||||||
redis := &RedisClientOpt{
|
redis := &RedisClientOpt{
|
||||||
Addr: redisAddr,
|
Addr: redisAddr,
|
||||||
@@ -72,6 +71,7 @@ func BenchmarkEndToEnd(b *testing.B) {
|
|||||||
RetryDelayFunc: func(n int, err error, t *Task) time.Duration {
|
RetryDelayFunc: func(n int, err error, t *Task) time.Duration {
|
||||||
return time.Second
|
return time.Second
|
||||||
},
|
},
|
||||||
|
LogLevel: testLogLevel,
|
||||||
})
|
})
|
||||||
// Create a bunch of tasks
|
// Create a bunch of tasks
|
||||||
for i := 0; i < count; i++ {
|
for i := 0; i < count; i++ {
|
||||||
@@ -90,8 +90,16 @@ func BenchmarkEndToEnd(b *testing.B) {
|
|||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
wg.Add(count * 2)
|
wg.Add(count * 2)
|
||||||
handler := func(ctx context.Context, t *Task) error {
|
handler := func(ctx context.Context, t *Task) error {
|
||||||
// randomly fail 1% of tasks
|
n, err := t.Payload.GetInt("data")
|
||||||
if rand.Intn(100) == 1 {
|
if err != nil {
|
||||||
|
b.Logf("internal error: %v", err)
|
||||||
|
}
|
||||||
|
retried, ok := GetRetryCount(ctx)
|
||||||
|
if !ok {
|
||||||
|
b.Logf("internal error: %v", err)
|
||||||
|
}
|
||||||
|
// Fail 1% of tasks for the first attempt.
|
||||||
|
if retried == 0 && n%100 == 0 {
|
||||||
return fmt.Errorf(":(")
|
return fmt.Errorf(":(")
|
||||||
}
|
}
|
||||||
wg.Done()
|
wg.Done()
|
||||||
@@ -131,6 +139,7 @@ func BenchmarkEndToEndMultipleQueues(b *testing.B) {
|
|||||||
"default": 3,
|
"default": 3,
|
||||||
"low": 1,
|
"low": 1,
|
||||||
},
|
},
|
||||||
|
LogLevel: testLogLevel,
|
||||||
})
|
})
|
||||||
// Create a bunch of tasks
|
// Create a bunch of tasks
|
||||||
for i := 0; i < highCount; i++ {
|
for i := 0; i < highCount; i++ {
|
||||||
@@ -168,3 +177,62 @@ func BenchmarkEndToEndMultipleQueues(b *testing.B) {
|
|||||||
b.StartTimer() // end teardown
|
b.StartTimer() // end teardown
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// E2E benchmark to check client enqueue operation performs correctly,
|
||||||
|
// while server is busy processing tasks.
|
||||||
|
func BenchmarkClientWhileServerRunning(b *testing.B) {
|
||||||
|
const count = 10000
|
||||||
|
for n := 0; n < b.N; n++ {
|
||||||
|
b.StopTimer() // begin setup
|
||||||
|
setup(b)
|
||||||
|
redis := &RedisClientOpt{
|
||||||
|
Addr: redisAddr,
|
||||||
|
DB: redisDB,
|
||||||
|
}
|
||||||
|
client := NewClient(redis)
|
||||||
|
srv := NewServer(redis, Config{
|
||||||
|
Concurrency: 10,
|
||||||
|
RetryDelayFunc: func(n int, err error, t *Task) time.Duration {
|
||||||
|
return time.Second
|
||||||
|
},
|
||||||
|
LogLevel: testLogLevel,
|
||||||
|
})
|
||||||
|
// Enqueue 10,000 tasks.
|
||||||
|
for i := 0; i < count; i++ {
|
||||||
|
t := NewTask(fmt.Sprintf("task%d", i), map[string]interface{}{"data": i})
|
||||||
|
if err := client.Enqueue(t); err != nil {
|
||||||
|
b.Fatalf("could not enqueue a task: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Schedule 10,000 tasks.
|
||||||
|
for i := 0; i < count; i++ {
|
||||||
|
t := NewTask(fmt.Sprintf("scheduled%d", i), map[string]interface{}{"data": i})
|
||||||
|
if err := client.EnqueueAt(time.Now().Add(time.Second), t); err != nil {
|
||||||
|
b.Fatalf("could not enqueue a task: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
handler := func(ctx context.Context, t *Task) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
srv.Start(HandlerFunc(handler))
|
||||||
|
|
||||||
|
b.StartTimer() // end setup
|
||||||
|
|
||||||
|
b.Log("Starting enqueueing")
|
||||||
|
enqueued := 0
|
||||||
|
for enqueued < 100000 {
|
||||||
|
t := NewTask(fmt.Sprintf("enqueued%d", enqueued), map[string]interface{}{"data": enqueued})
|
||||||
|
if err := client.Enqueue(t); err != nil {
|
||||||
|
b.Logf("could not enqueue task %d: %v", enqueued, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
enqueued++
|
||||||
|
}
|
||||||
|
b.Logf("Finished enqueueing %d tasks", enqueued)
|
||||||
|
|
||||||
|
b.StopTimer() // begin teardown
|
||||||
|
srv.Stop()
|
||||||
|
b.StartTimer() // end teardown
|
||||||
|
}
|
||||||
|
}
|
||||||
|
113
heartbeat.go
113
heartbeat.go
@@ -5,11 +5,13 @@
|
|||||||
package asynq
|
package asynq
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"os"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/hibiken/asynq/internal/base"
|
"github.com/hibiken/asynq/internal/base"
|
||||||
"github.com/hibiken/asynq/internal/log"
|
"github.com/hibiken/asynq/internal/log"
|
||||||
|
"github.com/rs/xid"
|
||||||
)
|
)
|
||||||
|
|
||||||
// heartbeater is responsible for writing process info to redis periodically to
|
// heartbeater is responsible for writing process info to redis periodically to
|
||||||
@@ -18,29 +20,69 @@ type heartbeater struct {
|
|||||||
logger *log.Logger
|
logger *log.Logger
|
||||||
broker base.Broker
|
broker base.Broker
|
||||||
|
|
||||||
ss *base.ServerState
|
|
||||||
|
|
||||||
// channel to communicate back to the long running "heartbeater" goroutine.
|
// channel to communicate back to the long running "heartbeater" goroutine.
|
||||||
done chan struct{}
|
done chan struct{}
|
||||||
|
|
||||||
// interval between heartbeats.
|
// interval between heartbeats.
|
||||||
interval time.Duration
|
interval time.Duration
|
||||||
|
|
||||||
|
// following fields are initialized at construction time and are immutable.
|
||||||
|
host string
|
||||||
|
pid int
|
||||||
|
serverID string
|
||||||
|
concurrency int
|
||||||
|
queues map[string]int
|
||||||
|
strictPriority bool
|
||||||
|
|
||||||
|
// following fields are mutable and should be accessed only by the
|
||||||
|
// heartbeater goroutine. In other words, confine these variables
|
||||||
|
// to this goroutine only.
|
||||||
|
started time.Time
|
||||||
|
workers map[string]workerStat
|
||||||
|
|
||||||
|
// status is shared with other goroutine but is concurrency safe.
|
||||||
|
status *base.ServerStatus
|
||||||
|
|
||||||
|
// channels to receive updates on active workers.
|
||||||
|
starting <-chan *base.TaskMessage
|
||||||
|
finished <-chan *base.TaskMessage
|
||||||
}
|
}
|
||||||
|
|
||||||
type heartbeaterParams struct {
|
type heartbeaterParams struct {
|
||||||
logger *log.Logger
|
logger *log.Logger
|
||||||
broker base.Broker
|
broker base.Broker
|
||||||
serverState *base.ServerState
|
interval time.Duration
|
||||||
interval time.Duration
|
concurrency int
|
||||||
|
queues map[string]int
|
||||||
|
strictPriority bool
|
||||||
|
status *base.ServerStatus
|
||||||
|
starting <-chan *base.TaskMessage
|
||||||
|
finished <-chan *base.TaskMessage
|
||||||
}
|
}
|
||||||
|
|
||||||
func newHeartbeater(params heartbeaterParams) *heartbeater {
|
func newHeartbeater(params heartbeaterParams) *heartbeater {
|
||||||
|
host, err := os.Hostname()
|
||||||
|
if err != nil {
|
||||||
|
host = "unknown-host"
|
||||||
|
}
|
||||||
|
|
||||||
return &heartbeater{
|
return &heartbeater{
|
||||||
logger: params.logger,
|
logger: params.logger,
|
||||||
broker: params.broker,
|
broker: params.broker,
|
||||||
ss: params.serverState,
|
|
||||||
done: make(chan struct{}),
|
done: make(chan struct{}),
|
||||||
interval: params.interval,
|
interval: params.interval,
|
||||||
|
|
||||||
|
host: host,
|
||||||
|
pid: os.Getpid(),
|
||||||
|
serverID: xid.New().String(),
|
||||||
|
concurrency: params.concurrency,
|
||||||
|
queues: params.queues,
|
||||||
|
strictPriority: params.strictPriority,
|
||||||
|
|
||||||
|
status: params.status,
|
||||||
|
workers: make(map[string]workerStat),
|
||||||
|
starting: params.starting,
|
||||||
|
finished: params.finished,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -50,31 +92,74 @@ func (h *heartbeater) terminate() {
|
|||||||
h.done <- struct{}{}
|
h.done <- struct{}{}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// A workerStat records the message a worker is working on
|
||||||
|
// and the time the worker has started processing the message.
|
||||||
|
type workerStat struct {
|
||||||
|
started time.Time
|
||||||
|
msg *base.TaskMessage
|
||||||
|
}
|
||||||
|
|
||||||
func (h *heartbeater) start(wg *sync.WaitGroup) {
|
func (h *heartbeater) start(wg *sync.WaitGroup) {
|
||||||
h.ss.SetStarted(time.Now())
|
|
||||||
h.ss.SetStatus(base.StatusRunning)
|
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
|
|
||||||
|
h.started = time.Now()
|
||||||
|
|
||||||
h.beat()
|
h.beat()
|
||||||
|
|
||||||
|
timer := time.NewTimer(h.interval)
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-h.done:
|
case <-h.done:
|
||||||
h.broker.ClearServerState(h.ss)
|
h.broker.ClearServerState(h.host, h.pid, h.serverID)
|
||||||
h.logger.Debug("Heartbeater done")
|
h.logger.Debug("Heartbeater done")
|
||||||
|
timer.Stop()
|
||||||
return
|
return
|
||||||
case <-time.After(h.interval):
|
|
||||||
|
case <-timer.C:
|
||||||
h.beat()
|
h.beat()
|
||||||
|
timer.Reset(h.interval)
|
||||||
|
|
||||||
|
case msg := <-h.starting:
|
||||||
|
h.workers[msg.ID.String()] = workerStat{time.Now(), msg}
|
||||||
|
|
||||||
|
case msg := <-h.finished:
|
||||||
|
delete(h.workers, msg.ID.String())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *heartbeater) beat() {
|
func (h *heartbeater) beat() {
|
||||||
|
info := base.ServerInfo{
|
||||||
|
Host: h.host,
|
||||||
|
PID: h.pid,
|
||||||
|
ServerID: h.serverID,
|
||||||
|
Concurrency: h.concurrency,
|
||||||
|
Queues: h.queues,
|
||||||
|
StrictPriority: h.strictPriority,
|
||||||
|
Status: h.status.String(),
|
||||||
|
Started: h.started,
|
||||||
|
ActiveWorkerCount: len(h.workers),
|
||||||
|
}
|
||||||
|
|
||||||
|
var ws []*base.WorkerInfo
|
||||||
|
for id, stat := range h.workers {
|
||||||
|
ws = append(ws, &base.WorkerInfo{
|
||||||
|
Host: h.host,
|
||||||
|
PID: h.pid,
|
||||||
|
ID: id,
|
||||||
|
Type: stat.msg.Type,
|
||||||
|
Queue: stat.msg.Queue,
|
||||||
|
Payload: stat.msg.Payload,
|
||||||
|
Started: stat.started,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// Note: Set TTL to be long enough so that it won't expire before we write again
|
// Note: Set TTL to be long enough so that it won't expire before we write again
|
||||||
// and short enough to expire quickly once the process is shut down or killed.
|
// and short enough to expire quickly once the process is shut down or killed.
|
||||||
err := h.broker.WriteServerState(h.ss, h.interval*2)
|
if err := h.broker.WriteServerState(&info, ws, h.interval*2); err != nil {
|
||||||
if err != nil {
|
h.logger.Errorf("could not write server state data: %v", err)
|
||||||
h.logger.Errorf("could not write heartbeat data: %v", err)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -37,14 +37,24 @@ func TestHeartbeater(t *testing.T) {
|
|||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
h.FlushDB(t, r)
|
h.FlushDB(t, r)
|
||||||
|
|
||||||
state := base.NewServerState(tc.host, tc.pid, tc.concurrency, tc.queues, false)
|
status := base.NewServerStatus(base.StatusIdle)
|
||||||
hb := newHeartbeater(heartbeaterParams{
|
hb := newHeartbeater(heartbeaterParams{
|
||||||
logger: testLogger,
|
logger: testLogger,
|
||||||
broker: rdbClient,
|
broker: rdbClient,
|
||||||
serverState: state,
|
interval: tc.interval,
|
||||||
interval: tc.interval,
|
concurrency: tc.concurrency,
|
||||||
|
queues: tc.queues,
|
||||||
|
strictPriority: false,
|
||||||
|
status: status,
|
||||||
|
starting: make(chan *base.TaskMessage),
|
||||||
|
finished: make(chan *base.TaskMessage),
|
||||||
})
|
})
|
||||||
|
|
||||||
|
// Change host and pid fields for testing purpose.
|
||||||
|
hb.host = tc.host
|
||||||
|
hb.pid = tc.pid
|
||||||
|
|
||||||
|
status.Set(base.StatusRunning)
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
hb.start(&wg)
|
hb.start(&wg)
|
||||||
|
|
||||||
@@ -80,7 +90,7 @@ func TestHeartbeater(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// status change
|
// status change
|
||||||
state.SetStatus(base.StatusStopped)
|
status.Set(base.StatusStopped)
|
||||||
|
|
||||||
// allow for heartbeater to write to redis
|
// allow for heartbeater to write to redis
|
||||||
time.Sleep(tc.interval * 2)
|
time.Sleep(tc.interval * 2)
|
||||||
@@ -119,12 +129,16 @@ func TestHeartbeaterWithRedisDown(t *testing.T) {
|
|||||||
}()
|
}()
|
||||||
r := rdb.NewRDB(setup(t))
|
r := rdb.NewRDB(setup(t))
|
||||||
testBroker := testbroker.NewTestBroker(r)
|
testBroker := testbroker.NewTestBroker(r)
|
||||||
ss := base.NewServerState("localhost", 1234, 10, map[string]int{"default": 1}, false)
|
|
||||||
hb := newHeartbeater(heartbeaterParams{
|
hb := newHeartbeater(heartbeaterParams{
|
||||||
logger: testLogger,
|
logger: testLogger,
|
||||||
broker: testBroker,
|
broker: testBroker,
|
||||||
serverState: ss,
|
interval: time.Second,
|
||||||
interval: time.Second,
|
concurrency: 10,
|
||||||
|
queues: map[string]int{"default": 1},
|
||||||
|
strictPriority: false,
|
||||||
|
status: base.NewServerStatus(base.StatusRunning),
|
||||||
|
starting: make(chan *base.TaskMessage),
|
||||||
|
finished: make(chan *base.TaskMessage),
|
||||||
})
|
})
|
||||||
|
|
||||||
testBroker.Sleep()
|
testBroker.Sleep()
|
||||||
|
@@ -57,7 +57,7 @@ var SortServerInfoOpt = cmp.Transformer("SortServerInfo", func(in []*base.Server
|
|||||||
var SortWorkerInfoOpt = cmp.Transformer("SortWorkerInfo", func(in []*base.WorkerInfo) []*base.WorkerInfo {
|
var SortWorkerInfoOpt = cmp.Transformer("SortWorkerInfo", func(in []*base.WorkerInfo) []*base.WorkerInfo {
|
||||||
out := append([]*base.WorkerInfo(nil), in...) // Copy input to avoid mutating it
|
out := append([]*base.WorkerInfo(nil), in...) // Copy input to avoid mutating it
|
||||||
sort.Slice(out, func(i, j int) bool {
|
sort.Slice(out, func(i, j int) bool {
|
||||||
return out[i].ID.String() < out[j].ID.String()
|
return out[i].ID < out[j].ID
|
||||||
})
|
})
|
||||||
return out
|
return out
|
||||||
})
|
})
|
||||||
|
@@ -7,6 +7,7 @@ package base
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
@@ -34,6 +35,7 @@ const (
|
|||||||
RetryQueue = "asynq:retry" // ZSET
|
RetryQueue = "asynq:retry" // ZSET
|
||||||
DeadQueue = "asynq:dead" // ZSET
|
DeadQueue = "asynq:dead" // ZSET
|
||||||
InProgressQueue = "asynq:in_progress" // LIST
|
InProgressQueue = "asynq:in_progress" // LIST
|
||||||
|
PausedQueues = "asynq:paused" // SET
|
||||||
CancelChannel = "asynq:cancel" // PubSub channel
|
CancelChannel = "asynq:cancel" // PubSub channel
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -105,28 +107,43 @@ type TaskMessage struct {
|
|||||||
UniqueKey string
|
UniqueKey string
|
||||||
}
|
}
|
||||||
|
|
||||||
// ServerState holds process level information.
|
// EncodeMessage marshals the given task message in JSON and returns an encoded string.
|
||||||
//
|
func EncodeMessage(msg *TaskMessage) (string, error) {
|
||||||
// ServerStates are safe for concurrent use by multiple goroutines.
|
b, err := json.Marshal(msg)
|
||||||
type ServerState struct {
|
if err != nil {
|
||||||
mu sync.Mutex // guards all data fields
|
return "", err
|
||||||
id xid.ID
|
}
|
||||||
concurrency int
|
return string(b), nil
|
||||||
queues map[string]int
|
}
|
||||||
strictPriority bool
|
|
||||||
pid int
|
// DecodeMessage unmarshals the given encoded string and returns a decoded task message.
|
||||||
host string
|
func DecodeMessage(s string) (*TaskMessage, error) {
|
||||||
status ServerStatus
|
d := json.NewDecoder(strings.NewReader(s))
|
||||||
started time.Time
|
d.UseNumber()
|
||||||
workers map[string]*workerStats
|
var msg TaskMessage
|
||||||
|
if err := d.Decode(&msg); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &msg, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ServerStatus represents status of a server.
|
// ServerStatus represents status of a server.
|
||||||
type ServerStatus int
|
// ServerStatus methods are concurrency safe.
|
||||||
|
type ServerStatus struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
val ServerStatusValue
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewServerStatus returns a new status instance given an initial value.
|
||||||
|
func NewServerStatus(v ServerStatusValue) *ServerStatus {
|
||||||
|
return &ServerStatus{val: v}
|
||||||
|
}
|
||||||
|
|
||||||
|
type ServerStatusValue int
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// StatusIdle indicates the server is in idle state.
|
// StatusIdle indicates the server is in idle state.
|
||||||
StatusIdle ServerStatus = iota
|
StatusIdle ServerStatusValue = iota
|
||||||
|
|
||||||
// StatusRunning indicates the servier is up and processing tasks.
|
// StatusRunning indicates the servier is up and processing tasks.
|
||||||
StatusRunning
|
StatusRunning
|
||||||
@@ -145,117 +162,28 @@ var statuses = []string{
|
|||||||
"stopped",
|
"stopped",
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s ServerStatus) String() string {
|
func (s *ServerStatus) String() string {
|
||||||
if StatusIdle <= s && s <= StatusStopped {
|
s.mu.Lock()
|
||||||
return statuses[s]
|
defer s.mu.Unlock()
|
||||||
|
if StatusIdle <= s.val && s.val <= StatusStopped {
|
||||||
|
return statuses[s.val]
|
||||||
}
|
}
|
||||||
return "unknown status"
|
return "unknown status"
|
||||||
}
|
}
|
||||||
|
|
||||||
type workerStats struct {
|
// Get returns the status value.
|
||||||
msg *TaskMessage
|
func (s *ServerStatus) Get() ServerStatusValue {
|
||||||
started time.Time
|
s.mu.Lock()
|
||||||
|
v := s.val
|
||||||
|
s.mu.Unlock()
|
||||||
|
return v
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewServerState returns a new instance of ServerState.
|
// Set sets the status value.
|
||||||
func NewServerState(host string, pid, concurrency int, queues map[string]int, strict bool) *ServerState {
|
func (s *ServerStatus) Set(v ServerStatusValue) {
|
||||||
return &ServerState{
|
s.mu.Lock()
|
||||||
host: host,
|
s.val = v
|
||||||
pid: pid,
|
s.mu.Unlock()
|
||||||
id: xid.New(),
|
|
||||||
concurrency: concurrency,
|
|
||||||
queues: cloneQueueConfig(queues),
|
|
||||||
strictPriority: strict,
|
|
||||||
status: StatusIdle,
|
|
||||||
workers: make(map[string]*workerStats),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetStatus updates the status of server.
|
|
||||||
func (ss *ServerState) SetStatus(status ServerStatus) {
|
|
||||||
ss.mu.Lock()
|
|
||||||
defer ss.mu.Unlock()
|
|
||||||
ss.status = status
|
|
||||||
}
|
|
||||||
|
|
||||||
// Status returns the status of server.
|
|
||||||
func (ss *ServerState) Status() ServerStatus {
|
|
||||||
ss.mu.Lock()
|
|
||||||
defer ss.mu.Unlock()
|
|
||||||
return ss.status
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetStarted records when the process started processing.
|
|
||||||
func (ss *ServerState) SetStarted(t time.Time) {
|
|
||||||
ss.mu.Lock()
|
|
||||||
defer ss.mu.Unlock()
|
|
||||||
ss.started = t
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddWorkerStats records when a worker started and which task it's processing.
|
|
||||||
func (ss *ServerState) AddWorkerStats(msg *TaskMessage, started time.Time) {
|
|
||||||
ss.mu.Lock()
|
|
||||||
defer ss.mu.Unlock()
|
|
||||||
ss.workers[msg.ID.String()] = &workerStats{msg, started}
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteWorkerStats removes a worker's entry from the process state.
|
|
||||||
func (ss *ServerState) DeleteWorkerStats(msg *TaskMessage) {
|
|
||||||
ss.mu.Lock()
|
|
||||||
defer ss.mu.Unlock()
|
|
||||||
delete(ss.workers, msg.ID.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetInfo returns current state of server as a ServerInfo.
|
|
||||||
func (ss *ServerState) GetInfo() *ServerInfo {
|
|
||||||
ss.mu.Lock()
|
|
||||||
defer ss.mu.Unlock()
|
|
||||||
return &ServerInfo{
|
|
||||||
Host: ss.host,
|
|
||||||
PID: ss.pid,
|
|
||||||
ServerID: ss.id.String(),
|
|
||||||
Concurrency: ss.concurrency,
|
|
||||||
Queues: cloneQueueConfig(ss.queues),
|
|
||||||
StrictPriority: ss.strictPriority,
|
|
||||||
Status: ss.status.String(),
|
|
||||||
Started: ss.started,
|
|
||||||
ActiveWorkerCount: len(ss.workers),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetWorkers returns a list of currently running workers' info.
|
|
||||||
func (ss *ServerState) GetWorkers() []*WorkerInfo {
|
|
||||||
ss.mu.Lock()
|
|
||||||
defer ss.mu.Unlock()
|
|
||||||
var res []*WorkerInfo
|
|
||||||
for _, w := range ss.workers {
|
|
||||||
res = append(res, &WorkerInfo{
|
|
||||||
Host: ss.host,
|
|
||||||
PID: ss.pid,
|
|
||||||
ID: w.msg.ID,
|
|
||||||
Type: w.msg.Type,
|
|
||||||
Queue: w.msg.Queue,
|
|
||||||
Payload: clonePayload(w.msg.Payload),
|
|
||||||
Started: w.started,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
func cloneQueueConfig(qcfg map[string]int) map[string]int {
|
|
||||||
res := make(map[string]int)
|
|
||||||
for qname, n := range qcfg {
|
|
||||||
res[qname] = n
|
|
||||||
}
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
func clonePayload(payload map[string]interface{}) map[string]interface{} {
|
|
||||||
res := make(map[string]interface{})
|
|
||||||
for k, v := range payload {
|
|
||||||
res[k] = v
|
|
||||||
}
|
|
||||||
return res
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ServerInfo holds information about a running server.
|
// ServerInfo holds information about a running server.
|
||||||
@@ -275,7 +203,7 @@ type ServerInfo struct {
|
|||||||
type WorkerInfo struct {
|
type WorkerInfo struct {
|
||||||
Host string
|
Host string
|
||||||
PID int
|
PID int
|
||||||
ID xid.ID
|
ID string
|
||||||
Type string
|
Type string
|
||||||
Queue string
|
Queue string
|
||||||
Payload map[string]interface{}
|
Payload map[string]interface{}
|
||||||
@@ -343,10 +271,9 @@ type Broker interface {
|
|||||||
ScheduleUnique(msg *TaskMessage, processAt time.Time, ttl time.Duration) error
|
ScheduleUnique(msg *TaskMessage, processAt time.Time, ttl time.Duration) error
|
||||||
Retry(msg *TaskMessage, processAt time.Time, errMsg string) error
|
Retry(msg *TaskMessage, processAt time.Time, errMsg string) error
|
||||||
Kill(msg *TaskMessage, errMsg string) error
|
Kill(msg *TaskMessage, errMsg string) error
|
||||||
RequeueAll() (int64, error)
|
CheckAndEnqueue() error
|
||||||
CheckAndEnqueue(qnames ...string) error
|
WriteServerState(info *ServerInfo, workers []*WorkerInfo, ttl time.Duration) error
|
||||||
WriteServerState(ss *ServerState, ttl time.Duration) error
|
ClearServerState(host string, pid int, serverID string) error
|
||||||
ClearServerState(ss *ServerState) error
|
|
||||||
CancelationPubSub() (*redis.PubSub, error) // TODO: Need to decouple from redis to support other brokers
|
CancelationPubSub() (*redis.PubSub, error) // TODO: Need to decouple from redis to support other brokers
|
||||||
PublishCancelation(id string) error
|
PublishCancelation(id string) error
|
||||||
Close() error
|
Close() error
|
||||||
|
@@ -6,13 +6,12 @@ package base
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"math/rand"
|
"encoding/json"
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/google/go-cmp/cmp"
|
"github.com/google/go-cmp/cmp"
|
||||||
"github.com/google/go-cmp/cmp/cmpopts"
|
|
||||||
"github.com/rs/xid"
|
"github.com/rs/xid"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -108,69 +107,74 @@ func TestWorkersKey(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test for server state being accessed by multiple goroutines.
|
func TestMessageEncoding(t *testing.T) {
|
||||||
// Run with -race flag to check for data race.
|
id := xid.New()
|
||||||
func TestServerStateConcurrentAccess(t *testing.T) {
|
tests := []struct {
|
||||||
ss := NewServerState("127.0.0.1", 1234, 10, map[string]int{"default": 1}, false)
|
in *TaskMessage
|
||||||
var wg sync.WaitGroup
|
out *TaskMessage
|
||||||
started := time.Now()
|
}{
|
||||||
msgs := []*TaskMessage{
|
{
|
||||||
{ID: xid.New(), Type: "type1", Payload: map[string]interface{}{"user_id": 42}},
|
in: &TaskMessage{
|
||||||
{ID: xid.New(), Type: "type2"},
|
Type: "task1",
|
||||||
{ID: xid.New(), Type: "type3"},
|
Payload: map[string]interface{}{"a": 1, "b": "hello!", "c": true},
|
||||||
|
ID: id,
|
||||||
|
Queue: "default",
|
||||||
|
Retry: 10,
|
||||||
|
Retried: 0,
|
||||||
|
Timeout: "0",
|
||||||
|
},
|
||||||
|
out: &TaskMessage{
|
||||||
|
Type: "task1",
|
||||||
|
Payload: map[string]interface{}{"a": json.Number("1"), "b": "hello!", "c": true},
|
||||||
|
ID: id,
|
||||||
|
Queue: "default",
|
||||||
|
Retry: 10,
|
||||||
|
Retried: 0,
|
||||||
|
Timeout: "0",
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
// Simulate hearbeater calling SetStatus and SetStarted.
|
for _, tc := range tests {
|
||||||
|
encoded, err := EncodeMessage(tc.in)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("EncodeMessage(msg) returned error: %v", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
decoded, err := DecodeMessage(encoded)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("DecodeMessage(encoded) returned error: %v", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if diff := cmp.Diff(tc.out, decoded); diff != "" {
|
||||||
|
t.Errorf("Decoded message == %+v, want %+v;(-want,+got)\n%s",
|
||||||
|
decoded, tc.out, diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test for status being accessed by multiple goroutines.
|
||||||
|
// Run with -race flag to check for data race.
|
||||||
|
func TestStatusConcurrentAccess(t *testing.T) {
|
||||||
|
status := NewServerStatus(StatusIdle)
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
ss.SetStarted(started)
|
status.Get()
|
||||||
ss.SetStatus(StatusRunning)
|
status.String()
|
||||||
if status := ss.Status(); status != StatusRunning {
|
|
||||||
t.Errorf("(*ServerState).Status() = %v, want %v", status, StatusRunning)
|
|
||||||
}
|
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// Simulate processor starting worker goroutines.
|
|
||||||
for _, msg := range msgs {
|
|
||||||
wg.Add(1)
|
|
||||||
ss.AddWorkerStats(msg, time.Now())
|
|
||||||
go func(msg *TaskMessage) {
|
|
||||||
defer wg.Done()
|
|
||||||
time.Sleep(time.Duration(rand.Intn(500)) * time.Millisecond)
|
|
||||||
ss.DeleteWorkerStats(msg)
|
|
||||||
}(msg)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Simulate hearbeater calling Get and GetWorkers
|
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
wg.Done()
|
defer wg.Done()
|
||||||
for i := 0; i < 5; i++ {
|
status.Set(StatusStopped)
|
||||||
ss.GetInfo()
|
status.String()
|
||||||
ss.GetWorkers()
|
|
||||||
time.Sleep(time.Duration(rand.Intn(100)) * time.Millisecond)
|
|
||||||
}
|
|
||||||
}()
|
}()
|
||||||
|
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
|
||||||
want := &ServerInfo{
|
|
||||||
Host: "127.0.0.1",
|
|
||||||
PID: 1234,
|
|
||||||
Concurrency: 10,
|
|
||||||
Queues: map[string]int{"default": 1},
|
|
||||||
StrictPriority: false,
|
|
||||||
Status: "running",
|
|
||||||
Started: started,
|
|
||||||
ActiveWorkerCount: 0,
|
|
||||||
}
|
|
||||||
|
|
||||||
got := ss.GetInfo()
|
|
||||||
if diff := cmp.Diff(want, got, cmpopts.IgnoreFields(ServerInfo{}, "ServerID")); diff != "" {
|
|
||||||
t.Errorf("(*ServerState).GetInfo() = %+v, want %+v; (-want,+got)\n%s",
|
|
||||||
got, want, diff)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test for cancelations being accessed by multiple goroutines.
|
// Test for cancelations being accessed by multiple goroutines.
|
||||||
|
@@ -7,6 +7,7 @@ package rdb
|
|||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -25,10 +26,24 @@ type Stats struct {
|
|||||||
Dead int
|
Dead int
|
||||||
Processed int
|
Processed int
|
||||||
Failed int
|
Failed int
|
||||||
Queues map[string]int // map of queue name to number of tasks in the queue (e.g., "default": 100, "critical": 20)
|
Queues []*Queue
|
||||||
Timestamp time.Time
|
Timestamp time.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Queue represents a task queue.
|
||||||
|
type Queue struct {
|
||||||
|
// Name of the queue (e.g. "default", "critical").
|
||||||
|
// Note: It doesn't include the prefix "asynq:queues:".
|
||||||
|
Name string
|
||||||
|
|
||||||
|
// Paused indicates whether the queue is paused.
|
||||||
|
// If true, tasks in the queue should not be processed.
|
||||||
|
Paused bool
|
||||||
|
|
||||||
|
// Size is the number of tasks in the queue.
|
||||||
|
Size int
|
||||||
|
}
|
||||||
|
|
||||||
// DailyStats holds aggregate data for a given day.
|
// DailyStats holds aggregate data for a given day.
|
||||||
type DailyStats struct {
|
type DailyStats struct {
|
||||||
Processed int
|
Processed int
|
||||||
@@ -143,8 +158,12 @@ func (r *RDB) CurrentStats() (*Stats, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
paused, err := r.client.SMembersMap(base.PausedQueues).Result()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
stats := &Stats{
|
stats := &Stats{
|
||||||
Queues: make(map[string]int),
|
Queues: make([]*Queue, 0),
|
||||||
Timestamp: now,
|
Timestamp: now,
|
||||||
}
|
}
|
||||||
for i := 0; i < len(data); i += 2 {
|
for i := 0; i < len(data); i += 2 {
|
||||||
@@ -154,7 +173,14 @@ func (r *RDB) CurrentStats() (*Stats, error) {
|
|||||||
switch {
|
switch {
|
||||||
case strings.HasPrefix(key, base.QueuePrefix):
|
case strings.HasPrefix(key, base.QueuePrefix):
|
||||||
stats.Enqueued += val
|
stats.Enqueued += val
|
||||||
stats.Queues[strings.TrimPrefix(key, base.QueuePrefix)] = val
|
q := Queue{
|
||||||
|
Name: strings.TrimPrefix(key, base.QueuePrefix),
|
||||||
|
Size: val,
|
||||||
|
}
|
||||||
|
if _, exist := paused[key]; exist {
|
||||||
|
q.Paused = true
|
||||||
|
}
|
||||||
|
stats.Queues = append(stats.Queues, &q)
|
||||||
case key == base.InProgressQueue:
|
case key == base.InProgressQueue:
|
||||||
stats.InProgress = val
|
stats.InProgress = val
|
||||||
case key == base.ScheduledQueue:
|
case key == base.ScheduledQueue:
|
||||||
@@ -169,6 +195,9 @@ func (r *RDB) CurrentStats() (*Stats, error) {
|
|||||||
stats.Failed = val
|
stats.Failed = val
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
sort.Slice(stats.Queues, func(i, j int) bool {
|
||||||
|
return stats.Queues[i].Name < stats.Queues[j].Name
|
||||||
|
})
|
||||||
return stats, nil
|
return stats, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -830,3 +859,33 @@ func (r *RDB) ListWorkers() ([]*base.WorkerInfo, error) {
|
|||||||
}
|
}
|
||||||
return workers, nil
|
return workers, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// KEYS[1] -> asynq:paused
|
||||||
|
// ARGV[1] -> asynq:queues:<qname> - queue to pause
|
||||||
|
var pauseCmd = redis.NewScript(`
|
||||||
|
local ismem = redis.call("SISMEMBER", KEYS[1], ARGV[1])
|
||||||
|
if ismem == 1 then
|
||||||
|
return redis.error_reply("queue is already paused")
|
||||||
|
end
|
||||||
|
return redis.call("SADD", KEYS[1], ARGV[1])`)
|
||||||
|
|
||||||
|
// Pause pauses processing of tasks from the given queue.
|
||||||
|
func (r *RDB) Pause(qname string) error {
|
||||||
|
qkey := base.QueueKey(qname)
|
||||||
|
return pauseCmd.Run(r.client, []string{base.PausedQueues}, qkey).Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
// KEYS[1] -> asynq:paused
|
||||||
|
// ARGV[1] -> asynq:queues:<qname> - queue to unpause
|
||||||
|
var unpauseCmd = redis.NewScript(`
|
||||||
|
local ismem = redis.call("SISMEMBER", KEYS[1], ARGV[1])
|
||||||
|
if ismem == 0 then
|
||||||
|
return redis.error_reply("queue is not paused")
|
||||||
|
end
|
||||||
|
return redis.call("SREM", KEYS[1], ARGV[1])`)
|
||||||
|
|
||||||
|
// Unpause resumes processing of tasks from the given queue.
|
||||||
|
func (r *RDB) Unpause(qname string) error {
|
||||||
|
qkey := base.QueueKey(qname)
|
||||||
|
return unpauseCmd.Run(r.client, []string{base.PausedQueues}, qkey).Err()
|
||||||
|
}
|
||||||
|
@@ -38,6 +38,7 @@ func TestCurrentStats(t *testing.T) {
|
|||||||
processed int
|
processed int
|
||||||
failed int
|
failed int
|
||||||
allQueues []interface{}
|
allQueues []interface{}
|
||||||
|
paused []string
|
||||||
want *Stats
|
want *Stats
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
@@ -55,6 +56,7 @@ func TestCurrentStats(t *testing.T) {
|
|||||||
processed: 120,
|
processed: 120,
|
||||||
failed: 2,
|
failed: 2,
|
||||||
allQueues: []interface{}{base.DefaultQueue, base.QueueKey("critical"), base.QueueKey("low")},
|
allQueues: []interface{}{base.DefaultQueue, base.QueueKey("critical"), base.QueueKey("low")},
|
||||||
|
paused: []string{},
|
||||||
want: &Stats{
|
want: &Stats{
|
||||||
Enqueued: 3,
|
Enqueued: 3,
|
||||||
InProgress: 1,
|
InProgress: 1,
|
||||||
@@ -64,7 +66,12 @@ func TestCurrentStats(t *testing.T) {
|
|||||||
Processed: 120,
|
Processed: 120,
|
||||||
Failed: 2,
|
Failed: 2,
|
||||||
Timestamp: now,
|
Timestamp: now,
|
||||||
Queues: map[string]int{base.DefaultQueueName: 1, "critical": 1, "low": 1},
|
// Queues should be sorted by name.
|
||||||
|
Queues: []*Queue{
|
||||||
|
{Name: "critical", Paused: false, Size: 1},
|
||||||
|
{Name: "default", Paused: false, Size: 1},
|
||||||
|
{Name: "low", Paused: false, Size: 1},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -82,6 +89,7 @@ func TestCurrentStats(t *testing.T) {
|
|||||||
processed: 90,
|
processed: 90,
|
||||||
failed: 10,
|
failed: 10,
|
||||||
allQueues: []interface{}{base.DefaultQueue},
|
allQueues: []interface{}{base.DefaultQueue},
|
||||||
|
paused: []string{},
|
||||||
want: &Stats{
|
want: &Stats{
|
||||||
Enqueued: 0,
|
Enqueued: 0,
|
||||||
InProgress: 0,
|
InProgress: 0,
|
||||||
@@ -91,13 +99,52 @@ func TestCurrentStats(t *testing.T) {
|
|||||||
Processed: 90,
|
Processed: 90,
|
||||||
Failed: 10,
|
Failed: 10,
|
||||||
Timestamp: now,
|
Timestamp: now,
|
||||||
Queues: map[string]int{base.DefaultQueueName: 0},
|
Queues: []*Queue{
|
||||||
|
{Name: base.DefaultQueueName, Paused: false, Size: 0},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
enqueued: map[string][]*base.TaskMessage{
|
||||||
|
base.DefaultQueueName: {m1},
|
||||||
|
"critical": {m5},
|
||||||
|
"low": {m6},
|
||||||
|
},
|
||||||
|
inProgress: []*base.TaskMessage{m2},
|
||||||
|
scheduled: []h.ZSetEntry{
|
||||||
|
{Msg: m3, Score: float64(now.Add(time.Hour).Unix())},
|
||||||
|
{Msg: m4, Score: float64(now.Unix())}},
|
||||||
|
retry: []h.ZSetEntry{},
|
||||||
|
dead: []h.ZSetEntry{},
|
||||||
|
processed: 120,
|
||||||
|
failed: 2,
|
||||||
|
allQueues: []interface{}{base.DefaultQueue, base.QueueKey("critical"), base.QueueKey("low")},
|
||||||
|
paused: []string{"critical", "low"},
|
||||||
|
want: &Stats{
|
||||||
|
Enqueued: 3,
|
||||||
|
InProgress: 1,
|
||||||
|
Scheduled: 2,
|
||||||
|
Retry: 0,
|
||||||
|
Dead: 0,
|
||||||
|
Processed: 120,
|
||||||
|
Failed: 2,
|
||||||
|
Timestamp: now,
|
||||||
|
Queues: []*Queue{
|
||||||
|
{Name: "critical", Paused: true, Size: 1},
|
||||||
|
{Name: "default", Paused: false, Size: 1},
|
||||||
|
{Name: "low", Paused: true, Size: 1},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
h.FlushDB(t, r.client) // clean up db before each test case
|
h.FlushDB(t, r.client) // clean up db before each test case
|
||||||
|
for _, qname := range tc.paused {
|
||||||
|
if err := r.Pause(qname); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
for qname, msgs := range tc.enqueued {
|
for qname, msgs := range tc.enqueued {
|
||||||
h.SeedEnqueuedQueue(t, r.client, msgs, qname)
|
h.SeedEnqueuedQueue(t, r.client, msgs, qname)
|
||||||
}
|
}
|
||||||
@@ -136,7 +183,7 @@ func TestCurrentStatsWithoutData(t *testing.T) {
|
|||||||
Processed: 0,
|
Processed: 0,
|
||||||
Failed: 0,
|
Failed: 0,
|
||||||
Timestamp: time.Now(),
|
Timestamp: time.Now(),
|
||||||
Queues: map[string]int{},
|
Queues: make([]*Queue, 0),
|
||||||
}
|
}
|
||||||
|
|
||||||
got, err := r.CurrentStats()
|
got, err := r.CurrentStats()
|
||||||
@@ -658,12 +705,14 @@ func TestListRetry(t *testing.T) {
|
|||||||
func TestListRetryPagination(t *testing.T) {
|
func TestListRetryPagination(t *testing.T) {
|
||||||
r := setup(t)
|
r := setup(t)
|
||||||
// create 100 tasks with an increasing number of wait time.
|
// create 100 tasks with an increasing number of wait time.
|
||||||
|
now := time.Now()
|
||||||
|
var seed []h.ZSetEntry
|
||||||
for i := 0; i < 100; i++ {
|
for i := 0; i < 100; i++ {
|
||||||
msg := h.NewTaskMessage(fmt.Sprintf("task %d", i), nil)
|
msg := h.NewTaskMessage(fmt.Sprintf("task %d", i), nil)
|
||||||
if err := r.Retry(msg, time.Now().Add(time.Duration(i)*time.Second), "error"); err != nil {
|
processAt := now.Add(time.Duration(i) * time.Second)
|
||||||
t.Fatal(err)
|
seed = append(seed, h.ZSetEntry{Msg: msg, Score: float64(processAt.Unix())})
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
h.SeedRetryQueue(t, r.client, seed)
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
desc string
|
desc string
|
||||||
@@ -2055,60 +2104,48 @@ func TestListServers(t *testing.T) {
|
|||||||
r := setup(t)
|
r := setup(t)
|
||||||
|
|
||||||
started1 := time.Now().Add(-time.Hour)
|
started1 := time.Now().Add(-time.Hour)
|
||||||
ss1 := base.NewServerState("do.droplet1", 1234, 10, map[string]int{"default": 1}, false)
|
|
||||||
ss1.SetStarted(started1)
|
|
||||||
ss1.SetStatus(base.StatusRunning)
|
|
||||||
info1 := &base.ServerInfo{
|
info1 := &base.ServerInfo{
|
||||||
Concurrency: 10,
|
|
||||||
Queues: map[string]int{"default": 1},
|
|
||||||
Host: "do.droplet1",
|
Host: "do.droplet1",
|
||||||
PID: 1234,
|
PID: 1234,
|
||||||
|
ServerID: "server123",
|
||||||
|
Concurrency: 10,
|
||||||
|
Queues: map[string]int{"default": 1},
|
||||||
Status: "running",
|
Status: "running",
|
||||||
Started: started1,
|
Started: started1,
|
||||||
ActiveWorkerCount: 0,
|
ActiveWorkerCount: 0,
|
||||||
}
|
}
|
||||||
|
|
||||||
started2 := time.Now().Add(-2 * time.Hour)
|
started2 := time.Now().Add(-2 * time.Hour)
|
||||||
ss2 := base.NewServerState("do.droplet2", 9876, 20, map[string]int{"email": 1}, false)
|
|
||||||
ss2.SetStarted(started2)
|
|
||||||
ss2.SetStatus(base.StatusStopped)
|
|
||||||
ss2.AddWorkerStats(h.NewTaskMessage("send_email", nil), time.Now())
|
|
||||||
info2 := &base.ServerInfo{
|
info2 := &base.ServerInfo{
|
||||||
Concurrency: 20,
|
|
||||||
Queues: map[string]int{"email": 1},
|
|
||||||
Host: "do.droplet2",
|
Host: "do.droplet2",
|
||||||
PID: 9876,
|
PID: 9876,
|
||||||
|
ServerID: "server456",
|
||||||
|
Concurrency: 20,
|
||||||
|
Queues: map[string]int{"email": 1},
|
||||||
Status: "stopped",
|
Status: "stopped",
|
||||||
Started: started2,
|
Started: started2,
|
||||||
ActiveWorkerCount: 1,
|
ActiveWorkerCount: 1,
|
||||||
}
|
}
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
serverStates []*base.ServerState
|
data []*base.ServerInfo
|
||||||
want []*base.ServerInfo
|
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
serverStates: []*base.ServerState{},
|
data: []*base.ServerInfo{},
|
||||||
want: []*base.ServerInfo{},
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
serverStates: []*base.ServerState{ss1},
|
data: []*base.ServerInfo{info1},
|
||||||
want: []*base.ServerInfo{info1},
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
serverStates: []*base.ServerState{ss1, ss2},
|
data: []*base.ServerInfo{info1, info2},
|
||||||
want: []*base.ServerInfo{info1, info2},
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
ignoreOpt := cmpopts.IgnoreUnexported(base.ServerInfo{})
|
|
||||||
ignoreFieldOpt := cmpopts.IgnoreFields(base.ServerInfo{}, "ServerID")
|
|
||||||
|
|
||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
h.FlushDB(t, r.client)
|
h.FlushDB(t, r.client)
|
||||||
|
|
||||||
for _, ss := range tc.serverStates {
|
for _, info := range tc.data {
|
||||||
if err := r.WriteServerState(ss, 5*time.Second); err != nil {
|
if err := r.WriteServerState(info, []*base.WorkerInfo{}, 5*time.Second); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -2117,9 +2154,9 @@ func TestListServers(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("r.ListServers returned an error: %v", err)
|
t.Errorf("r.ListServers returned an error: %v", err)
|
||||||
}
|
}
|
||||||
if diff := cmp.Diff(tc.want, got, h.SortServerInfoOpt, ignoreOpt, ignoreFieldOpt); diff != "" {
|
if diff := cmp.Diff(tc.data, got, h.SortServerInfoOpt); diff != "" {
|
||||||
t.Errorf("r.ListServers returned %v, want %v; (-want,+got)\n%s",
|
t.Errorf("r.ListServers returned %v, want %v; (-want,+got)\n%s",
|
||||||
got, tc.serverStates, diff)
|
got, tc.data, diff)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -2127,37 +2164,23 @@ func TestListServers(t *testing.T) {
|
|||||||
func TestListWorkers(t *testing.T) {
|
func TestListWorkers(t *testing.T) {
|
||||||
r := setup(t)
|
r := setup(t)
|
||||||
|
|
||||||
const (
|
var (
|
||||||
host = "127.0.0.1"
|
host = "127.0.0.1"
|
||||||
pid = 4567
|
pid = 4567
|
||||||
|
|
||||||
|
m1 = h.NewTaskMessage("send_email", map[string]interface{}{"user_id": "abc123"})
|
||||||
|
m2 = h.NewTaskMessage("gen_thumbnail", map[string]interface{}{"path": "some/path/to/image/file"})
|
||||||
|
m3 = h.NewTaskMessage("reindex", map[string]interface{}{})
|
||||||
)
|
)
|
||||||
|
|
||||||
m1 := h.NewTaskMessage("send_email", map[string]interface{}{"user_id": "abc123"})
|
|
||||||
m2 := h.NewTaskMessage("gen_thumbnail", map[string]interface{}{"path": "some/path/to/image/file"})
|
|
||||||
m3 := h.NewTaskMessage("reindex", map[string]interface{}{})
|
|
||||||
t1 := time.Now().Add(-time.Second)
|
|
||||||
t2 := time.Now().Add(-10 * time.Second)
|
|
||||||
t3 := time.Now().Add(-time.Minute)
|
|
||||||
|
|
||||||
type workerStats struct {
|
|
||||||
msg *base.TaskMessage
|
|
||||||
started time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
workers []*workerStats
|
data []*base.WorkerInfo
|
||||||
want []*base.WorkerInfo
|
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
workers: []*workerStats{
|
data: []*base.WorkerInfo{
|
||||||
{m1, t1},
|
{Host: host, PID: pid, ID: m1.ID.String(), Type: m1.Type, Queue: m1.Queue, Payload: m1.Payload, Started: time.Now().Add(-1 * time.Second)},
|
||||||
{m2, t2},
|
{Host: host, PID: pid, ID: m2.ID.String(), Type: m2.Type, Queue: m2.Queue, Payload: m2.Payload, Started: time.Now().Add(-5 * time.Second)},
|
||||||
{m3, t3},
|
{Host: host, PID: pid, ID: m3.ID.String(), Type: m3.Type, Queue: m3.Queue, Payload: m3.Payload, Started: time.Now().Add(-30 * time.Second)},
|
||||||
},
|
|
||||||
want: []*base.WorkerInfo{
|
|
||||||
{Host: host, PID: pid, ID: m1.ID, Type: m1.Type, Queue: m1.Queue, Payload: m1.Payload, Started: t1},
|
|
||||||
{Host: host, PID: pid, ID: m2.ID, Type: m2.Type, Queue: m2.Queue, Payload: m2.Payload, Started: t2},
|
|
||||||
{Host: host, PID: pid, ID: m3.ID, Type: m3.Type, Queue: m3.Queue, Payload: m3.Payload, Started: t3},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@@ -2165,13 +2188,7 @@ func TestListWorkers(t *testing.T) {
|
|||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
h.FlushDB(t, r.client)
|
h.FlushDB(t, r.client)
|
||||||
|
|
||||||
ss := base.NewServerState(host, pid, 10, map[string]int{"default": 1}, false)
|
err := r.WriteServerState(&base.ServerInfo{}, tc.data, time.Minute)
|
||||||
|
|
||||||
for _, w := range tc.workers {
|
|
||||||
ss.AddWorkerStats(w.msg, w.started)
|
|
||||||
}
|
|
||||||
|
|
||||||
err := r.WriteServerState(ss, time.Minute)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("could not write server state to redis: %v", err)
|
t.Errorf("could not write server state to redis: %v", err)
|
||||||
continue
|
continue
|
||||||
@@ -2183,8 +2200,165 @@ func TestListWorkers(t *testing.T) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if diff := cmp.Diff(tc.want, got, h.SortWorkerInfoOpt); diff != "" {
|
if diff := cmp.Diff(tc.data, got, h.SortWorkerInfoOpt); diff != "" {
|
||||||
t.Errorf("(*RDB).ListWorkers() = %v, want = %v; (-want,+got)\n%s", got, tc.want, diff)
|
t.Errorf("(*RDB).ListWorkers() = %v, want = %v; (-want,+got)\n%s", got, tc.data, diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPause(t *testing.T) {
|
||||||
|
r := setup(t)
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
initial []string // initial keys in the paused set
|
||||||
|
qname string // name of the queue to pause
|
||||||
|
want []string // expected keys in the paused set
|
||||||
|
}{
|
||||||
|
{[]string{}, "default", []string{"asynq:queues:default"}},
|
||||||
|
{[]string{"asynq:queues:default"}, "critical", []string{"asynq:queues:default", "asynq:queues:critical"}},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
h.FlushDB(t, r.client)
|
||||||
|
|
||||||
|
// Set up initial state.
|
||||||
|
for _, qkey := range tc.initial {
|
||||||
|
if err := r.client.SAdd(base.PausedQueues, qkey).Err(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err := r.Pause(tc.qname)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Pause(%q) returned error: %v", tc.qname, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
got, err := r.client.SMembers(base.PausedQueues).Result()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if diff := cmp.Diff(tc.want, got, h.SortStringSliceOpt); diff != "" {
|
||||||
|
t.Errorf("%q has members %v, want %v; (-want,+got)\n%s",
|
||||||
|
base.PausedQueues, got, tc.want, diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPauseError(t *testing.T) {
|
||||||
|
r := setup(t)
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
desc string // test case description
|
||||||
|
initial []string // initial keys in the paused set
|
||||||
|
qname string // name of the queue to pause
|
||||||
|
want []string // expected keys in the paused set
|
||||||
|
}{
|
||||||
|
{"queue already paused", []string{"asynq:queues:default"}, "default", []string{"asynq:queues:default"}},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
h.FlushDB(t, r.client)
|
||||||
|
|
||||||
|
// Set up initial state.
|
||||||
|
for _, qkey := range tc.initial {
|
||||||
|
if err := r.client.SAdd(base.PausedQueues, qkey).Err(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err := r.Pause(tc.qname)
|
||||||
|
if err == nil {
|
||||||
|
t.Errorf("%s; Pause(%q) returned nil: want error", tc.desc, tc.qname)
|
||||||
|
}
|
||||||
|
|
||||||
|
got, err := r.client.SMembers(base.PausedQueues).Result()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if diff := cmp.Diff(tc.want, got, h.SortStringSliceOpt); diff != "" {
|
||||||
|
t.Errorf("%s; %q has members %v, want %v; (-want,+got)\n%s",
|
||||||
|
tc.desc, base.PausedQueues, got, tc.want, diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnpause(t *testing.T) {
|
||||||
|
r := setup(t)
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
initial []string // initial keys in the paused set
|
||||||
|
qname string // name of the queue to unpause
|
||||||
|
want []string // expected keys in the paused set
|
||||||
|
}{
|
||||||
|
{[]string{"asynq:queues:default"}, "default", []string{}},
|
||||||
|
{[]string{"asynq:queues:default", "asynq:queues:low"}, "low", []string{"asynq:queues:default"}},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
h.FlushDB(t, r.client)
|
||||||
|
|
||||||
|
// Set up initial state.
|
||||||
|
for _, qkey := range tc.initial {
|
||||||
|
if err := r.client.SAdd(base.PausedQueues, qkey).Err(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err := r.Unpause(tc.qname)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Unpause(%q) returned error: %v", tc.qname, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
got, err := r.client.SMembers(base.PausedQueues).Result()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if diff := cmp.Diff(tc.want, got, h.SortStringSliceOpt); diff != "" {
|
||||||
|
t.Errorf("%q has members %v, want %v; (-want,+got)\n%s",
|
||||||
|
base.PausedQueues, got, tc.want, diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnpauseError(t *testing.T) {
|
||||||
|
r := setup(t)
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
desc string // test case description
|
||||||
|
initial []string // initial keys in the paused set
|
||||||
|
qname string // name of the queue to unpause
|
||||||
|
want []string // expected keys in the paused set
|
||||||
|
}{
|
||||||
|
{"set is empty", []string{}, "default", []string{}},
|
||||||
|
{"queue is not in the set", []string{"asynq:queues:default"}, "low", []string{"asynq:queues:default"}},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
h.FlushDB(t, r.client)
|
||||||
|
|
||||||
|
// Set up initial state.
|
||||||
|
for _, qkey := range tc.initial {
|
||||||
|
if err := r.client.SAdd(base.PausedQueues, qkey).Err(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err := r.Unpause(tc.qname)
|
||||||
|
if err == nil {
|
||||||
|
t.Errorf("%s; Unpause(%q) returned nil: want error", tc.desc, tc.qname)
|
||||||
|
}
|
||||||
|
|
||||||
|
got, err := r.client.SMembers(base.PausedQueues).Result()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if diff := cmp.Diff(tc.want, got, h.SortStringSliceOpt); diff != "" {
|
||||||
|
t.Errorf("%s; %q has members %v, want %v; (-want,+got)\n%s",
|
||||||
|
tc.desc, base.PausedQueues, got, tc.want, diff)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -54,12 +54,12 @@ return 1`)
|
|||||||
|
|
||||||
// Enqueue inserts the given task to the tail of the queue.
|
// Enqueue inserts the given task to the tail of the queue.
|
||||||
func (r *RDB) Enqueue(msg *base.TaskMessage) error {
|
func (r *RDB) Enqueue(msg *base.TaskMessage) error {
|
||||||
bytes, err := json.Marshal(msg)
|
encoded, err := base.EncodeMessage(msg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
key := base.QueueKey(msg.Queue)
|
key := base.QueueKey(msg.Queue)
|
||||||
return enqueueCmd.Run(r.client, []string{key, base.AllQueues}, bytes).Err()
|
return enqueueCmd.Run(r.client, []string{key, base.AllQueues}, encoded).Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
// KEYS[1] -> unique key in the form <type>:<payload>:<qname>
|
// KEYS[1] -> unique key in the form <type>:<payload>:<qname>
|
||||||
@@ -81,14 +81,14 @@ return 1
|
|||||||
// EnqueueUnique inserts the given task if the task's uniqueness lock can be acquired.
|
// EnqueueUnique inserts the given task if the task's uniqueness lock can be acquired.
|
||||||
// It returns ErrDuplicateTask if the lock cannot be acquired.
|
// It returns ErrDuplicateTask if the lock cannot be acquired.
|
||||||
func (r *RDB) EnqueueUnique(msg *base.TaskMessage, ttl time.Duration) error {
|
func (r *RDB) EnqueueUnique(msg *base.TaskMessage, ttl time.Duration) error {
|
||||||
bytes, err := json.Marshal(msg)
|
encoded, err := base.EncodeMessage(msg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
key := base.QueueKey(msg.Queue)
|
key := base.QueueKey(msg.Queue)
|
||||||
res, err := enqueueUniqueCmd.Run(r.client,
|
res, err := enqueueUniqueCmd.Run(r.client,
|
||||||
[]string{msg.UniqueKey, key, base.AllQueues},
|
[]string{msg.UniqueKey, key, base.AllQueues},
|
||||||
msg.ID.String(), int(ttl.Seconds()), bytes).Result()
|
msg.ID.String(), int(ttl.Seconds()), encoded).Result()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -103,56 +103,43 @@ func (r *RDB) EnqueueUnique(msg *base.TaskMessage, ttl time.Duration) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Dequeue queries given queues in order and pops a task message if there is one and returns it.
|
// Dequeue queries given queues in order and pops a task message if there is one and returns it.
|
||||||
|
// Dequeue skips a queue if the queue is paused.
|
||||||
// If all queues are empty, ErrNoProcessableTask error is returned.
|
// If all queues are empty, ErrNoProcessableTask error is returned.
|
||||||
func (r *RDB) Dequeue(qnames ...string) (*base.TaskMessage, error) {
|
func (r *RDB) Dequeue(qnames ...string) (*base.TaskMessage, error) {
|
||||||
var data string
|
var qkeys []interface{}
|
||||||
var err error
|
for _, q := range qnames {
|
||||||
if len(qnames) == 1 {
|
qkeys = append(qkeys, base.QueueKey(q))
|
||||||
data, err = r.dequeueSingle(base.QueueKey(qnames[0]))
|
|
||||||
} else {
|
|
||||||
var keys []string
|
|
||||||
for _, q := range qnames {
|
|
||||||
keys = append(keys, base.QueueKey(q))
|
|
||||||
}
|
|
||||||
data, err = r.dequeue(keys...)
|
|
||||||
}
|
}
|
||||||
|
data, err := r.dequeue(qkeys...)
|
||||||
if err == redis.Nil {
|
if err == redis.Nil {
|
||||||
return nil, ErrNoProcessableTask
|
return nil, ErrNoProcessableTask
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
var msg base.TaskMessage
|
return base.DecodeMessage(data)
|
||||||
err = json.Unmarshal([]byte(data), &msg)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &msg, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *RDB) dequeueSingle(queue string) (data string, err error) {
|
|
||||||
// timeout needed to avoid blocking forever
|
|
||||||
return r.client.BRPopLPush(queue, base.InProgressQueue, time.Second).Result()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// KEYS[1] -> asynq:in_progress
|
// KEYS[1] -> asynq:in_progress
|
||||||
|
// KEYS[2] -> asynq:paused
|
||||||
// ARGV -> List of queues to query in order
|
// ARGV -> List of queues to query in order
|
||||||
|
//
|
||||||
|
// dequeueCmd checks whether a queue is paused first, before
|
||||||
|
// calling RPOPLPUSH to pop a task from the queue.
|
||||||
var dequeueCmd = redis.NewScript(`
|
var dequeueCmd = redis.NewScript(`
|
||||||
local res
|
|
||||||
for _, qkey in ipairs(ARGV) do
|
for _, qkey in ipairs(ARGV) do
|
||||||
res = redis.call("RPOPLPUSH", qkey, KEYS[1])
|
if redis.call("SISMEMBER", KEYS[2], qkey) == 0 then
|
||||||
if res then
|
local res = redis.call("RPOPLPUSH", qkey, KEYS[1])
|
||||||
return res
|
if res then
|
||||||
|
return res
|
||||||
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
return res`)
|
return nil`)
|
||||||
|
|
||||||
func (r *RDB) dequeue(queues ...string) (data string, err error) {
|
func (r *RDB) dequeue(qkeys ...interface{}) (data string, err error) {
|
||||||
var args []interface{}
|
res, err := dequeueCmd.Run(r.client,
|
||||||
for _, qkey := range queues {
|
[]string{base.InProgressQueue, base.PausedQueues}, qkeys...).Result()
|
||||||
args = append(args, qkey)
|
|
||||||
}
|
|
||||||
res, err := dequeueCmd.Run(r.client, []string{base.InProgressQueue}, args...).Result()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
@@ -167,7 +154,10 @@ func (r *RDB) dequeue(queues ...string) (data string, err error) {
|
|||||||
// ARGV[3] -> task ID
|
// ARGV[3] -> task ID
|
||||||
// Note: LREM count ZERO means "remove all elements equal to val"
|
// Note: LREM count ZERO means "remove all elements equal to val"
|
||||||
var doneCmd = redis.NewScript(`
|
var doneCmd = redis.NewScript(`
|
||||||
redis.call("LREM", KEYS[1], 0, ARGV[1])
|
local x = redis.call("LREM", KEYS[1], 0, ARGV[1])
|
||||||
|
if x == 0 then
|
||||||
|
return redis.error_reply("NOT FOUND")
|
||||||
|
end
|
||||||
local n = redis.call("INCR", KEYS[2])
|
local n = redis.call("INCR", KEYS[2])
|
||||||
if tonumber(n) == 1 then
|
if tonumber(n) == 1 then
|
||||||
redis.call("EXPIREAT", KEYS[2], ARGV[2])
|
redis.call("EXPIREAT", KEYS[2], ARGV[2])
|
||||||
@@ -181,7 +171,7 @@ return redis.status_reply("OK")
|
|||||||
// Done removes the task from in-progress queue to mark the task as done.
|
// Done removes the task from in-progress queue to mark the task as done.
|
||||||
// It removes a uniqueness lock acquired by the task, if any.
|
// It removes a uniqueness lock acquired by the task, if any.
|
||||||
func (r *RDB) Done(msg *base.TaskMessage) error {
|
func (r *RDB) Done(msg *base.TaskMessage) error {
|
||||||
bytes, err := json.Marshal(msg)
|
encoded, err := base.EncodeMessage(msg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -190,7 +180,7 @@ func (r *RDB) Done(msg *base.TaskMessage) error {
|
|||||||
expireAt := now.Add(statsTTL)
|
expireAt := now.Add(statsTTL)
|
||||||
return doneCmd.Run(r.client,
|
return doneCmd.Run(r.client,
|
||||||
[]string{base.InProgressQueue, processedKey, msg.UniqueKey},
|
[]string{base.InProgressQueue, processedKey, msg.UniqueKey},
|
||||||
bytes, expireAt.Unix(), msg.ID.String()).Err()
|
encoded, expireAt.Unix(), msg.ID.String()).Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
// KEYS[1] -> asynq:in_progress
|
// KEYS[1] -> asynq:in_progress
|
||||||
@@ -204,13 +194,13 @@ return redis.status_reply("OK")`)
|
|||||||
|
|
||||||
// Requeue moves the task from in-progress queue to the specified queue.
|
// Requeue moves the task from in-progress queue to the specified queue.
|
||||||
func (r *RDB) Requeue(msg *base.TaskMessage) error {
|
func (r *RDB) Requeue(msg *base.TaskMessage) error {
|
||||||
bytes, err := json.Marshal(msg)
|
encoded, err := base.EncodeMessage(msg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return requeueCmd.Run(r.client,
|
return requeueCmd.Run(r.client,
|
||||||
[]string{base.InProgressQueue, base.QueueKey(msg.Queue)},
|
[]string{base.InProgressQueue, base.QueueKey(msg.Queue)},
|
||||||
string(bytes)).Err()
|
encoded).Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
// KEYS[1] -> asynq:scheduled
|
// KEYS[1] -> asynq:scheduled
|
||||||
@@ -226,7 +216,7 @@ return 1
|
|||||||
|
|
||||||
// Schedule adds the task to the backlog queue to be processed in the future.
|
// Schedule adds the task to the backlog queue to be processed in the future.
|
||||||
func (r *RDB) Schedule(msg *base.TaskMessage, processAt time.Time) error {
|
func (r *RDB) Schedule(msg *base.TaskMessage, processAt time.Time) error {
|
||||||
bytes, err := json.Marshal(msg)
|
encoded, err := base.EncodeMessage(msg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -234,7 +224,7 @@ func (r *RDB) Schedule(msg *base.TaskMessage, processAt time.Time) error {
|
|||||||
score := float64(processAt.Unix())
|
score := float64(processAt.Unix())
|
||||||
return scheduleCmd.Run(r.client,
|
return scheduleCmd.Run(r.client,
|
||||||
[]string{base.ScheduledQueue, base.AllQueues},
|
[]string{base.ScheduledQueue, base.AllQueues},
|
||||||
score, bytes, qkey).Err()
|
score, encoded, qkey).Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
// KEYS[1] -> unique key in the format <type>:<payload>:<qname>
|
// KEYS[1] -> unique key in the format <type>:<payload>:<qname>
|
||||||
@@ -258,7 +248,7 @@ return 1
|
|||||||
// ScheduleUnique adds the task to the backlog queue to be processed in the future if the uniqueness lock can be acquired.
|
// ScheduleUnique adds the task to the backlog queue to be processed in the future if the uniqueness lock can be acquired.
|
||||||
// It returns ErrDuplicateTask if the lock cannot be acquired.
|
// It returns ErrDuplicateTask if the lock cannot be acquired.
|
||||||
func (r *RDB) ScheduleUnique(msg *base.TaskMessage, processAt time.Time, ttl time.Duration) error {
|
func (r *RDB) ScheduleUnique(msg *base.TaskMessage, processAt time.Time, ttl time.Duration) error {
|
||||||
bytes, err := json.Marshal(msg)
|
encoded, err := base.EncodeMessage(msg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -266,7 +256,7 @@ func (r *RDB) ScheduleUnique(msg *base.TaskMessage, processAt time.Time, ttl tim
|
|||||||
score := float64(processAt.Unix())
|
score := float64(processAt.Unix())
|
||||||
res, err := scheduleUniqueCmd.Run(r.client,
|
res, err := scheduleUniqueCmd.Run(r.client,
|
||||||
[]string{msg.UniqueKey, base.ScheduledQueue, base.AllQueues},
|
[]string{msg.UniqueKey, base.ScheduledQueue, base.AllQueues},
|
||||||
msg.ID.String(), int(ttl.Seconds()), score, bytes, qkey).Result()
|
msg.ID.String(), int(ttl.Seconds()), score, encoded, qkey).Result()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -289,7 +279,10 @@ func (r *RDB) ScheduleUnique(msg *base.TaskMessage, processAt time.Time, ttl tim
|
|||||||
// ARGV[3] -> retry_at UNIX timestamp
|
// ARGV[3] -> retry_at UNIX timestamp
|
||||||
// ARGV[4] -> stats expiration timestamp
|
// ARGV[4] -> stats expiration timestamp
|
||||||
var retryCmd = redis.NewScript(`
|
var retryCmd = redis.NewScript(`
|
||||||
redis.call("LREM", KEYS[1], 0, ARGV[1])
|
local x = redis.call("LREM", KEYS[1], 0, ARGV[1])
|
||||||
|
if x == 0 then
|
||||||
|
return redis.error_reply("NOT FOUND")
|
||||||
|
end
|
||||||
redis.call("ZADD", KEYS[2], ARGV[3], ARGV[2])
|
redis.call("ZADD", KEYS[2], ARGV[3], ARGV[2])
|
||||||
local n = redis.call("INCR", KEYS[3])
|
local n = redis.call("INCR", KEYS[3])
|
||||||
if tonumber(n) == 1 then
|
if tonumber(n) == 1 then
|
||||||
@@ -304,14 +297,14 @@ return redis.status_reply("OK")`)
|
|||||||
// Retry moves the task from in-progress to retry queue, incrementing retry count
|
// Retry moves the task from in-progress to retry queue, incrementing retry count
|
||||||
// and assigning error message to the task message.
|
// and assigning error message to the task message.
|
||||||
func (r *RDB) Retry(msg *base.TaskMessage, processAt time.Time, errMsg string) error {
|
func (r *RDB) Retry(msg *base.TaskMessage, processAt time.Time, errMsg string) error {
|
||||||
bytesToRemove, err := json.Marshal(msg)
|
msgToRemove, err := base.EncodeMessage(msg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
modified := *msg
|
modified := *msg
|
||||||
modified.Retried++
|
modified.Retried++
|
||||||
modified.ErrorMsg = errMsg
|
modified.ErrorMsg = errMsg
|
||||||
bytesToAdd, err := json.Marshal(&modified)
|
msgToAdd, err := base.EncodeMessage(&modified)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -321,7 +314,7 @@ func (r *RDB) Retry(msg *base.TaskMessage, processAt time.Time, errMsg string) e
|
|||||||
expireAt := now.Add(statsTTL)
|
expireAt := now.Add(statsTTL)
|
||||||
return retryCmd.Run(r.client,
|
return retryCmd.Run(r.client,
|
||||||
[]string{base.InProgressQueue, base.RetryQueue, processedKey, failureKey},
|
[]string{base.InProgressQueue, base.RetryQueue, processedKey, failureKey},
|
||||||
string(bytesToRemove), string(bytesToAdd), processAt.Unix(), expireAt.Unix()).Err()
|
msgToRemove, msgToAdd, processAt.Unix(), expireAt.Unix()).Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -340,7 +333,10 @@ const (
|
|||||||
// ARGV[5] -> max number of tasks in dead queue (e.g., 100)
|
// ARGV[5] -> max number of tasks in dead queue (e.g., 100)
|
||||||
// ARGV[6] -> stats expiration timestamp
|
// ARGV[6] -> stats expiration timestamp
|
||||||
var killCmd = redis.NewScript(`
|
var killCmd = redis.NewScript(`
|
||||||
redis.call("LREM", KEYS[1], 0, ARGV[1])
|
local x = redis.call("LREM", KEYS[1], 0, ARGV[1])
|
||||||
|
if x == 0 then
|
||||||
|
return redis.error_reply("NOT FOUND")
|
||||||
|
end
|
||||||
redis.call("ZADD", KEYS[2], ARGV[3], ARGV[2])
|
redis.call("ZADD", KEYS[2], ARGV[3], ARGV[2])
|
||||||
redis.call("ZREMRANGEBYSCORE", KEYS[2], "-inf", ARGV[4])
|
redis.call("ZREMRANGEBYSCORE", KEYS[2], "-inf", ARGV[4])
|
||||||
redis.call("ZREMRANGEBYRANK", KEYS[2], 0, -ARGV[5])
|
redis.call("ZREMRANGEBYRANK", KEYS[2], 0, -ARGV[5])
|
||||||
@@ -358,13 +354,13 @@ return redis.status_reply("OK")`)
|
|||||||
// the error message to the task.
|
// the error message to the task.
|
||||||
// It also trims the set by timestamp and set size.
|
// It also trims the set by timestamp and set size.
|
||||||
func (r *RDB) Kill(msg *base.TaskMessage, errMsg string) error {
|
func (r *RDB) Kill(msg *base.TaskMessage, errMsg string) error {
|
||||||
bytesToRemove, err := json.Marshal(msg)
|
msgToRemove, err := base.EncodeMessage(msg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
modified := *msg
|
modified := *msg
|
||||||
modified.ErrorMsg = errMsg
|
modified.ErrorMsg = errMsg
|
||||||
bytesToAdd, err := json.Marshal(&modified)
|
msgToAdd, err := base.EncodeMessage(&modified)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -375,50 +371,20 @@ func (r *RDB) Kill(msg *base.TaskMessage, errMsg string) error {
|
|||||||
expireAt := now.Add(statsTTL)
|
expireAt := now.Add(statsTTL)
|
||||||
return killCmd.Run(r.client,
|
return killCmd.Run(r.client,
|
||||||
[]string{base.InProgressQueue, base.DeadQueue, processedKey, failureKey},
|
[]string{base.InProgressQueue, base.DeadQueue, processedKey, failureKey},
|
||||||
string(bytesToRemove), string(bytesToAdd), now.Unix(), limit, maxDeadTasks, expireAt.Unix()).Err()
|
msgToRemove, msgToAdd, now.Unix(), limit, maxDeadTasks, expireAt.Unix()).Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
// KEYS[1] -> asynq:in_progress
|
// CheckAndEnqueue checks for all scheduled/retry tasks and enqueues any tasks that
|
||||||
// ARGV[1] -> queue prefix
|
// are ready to be processed.
|
||||||
var requeueAllCmd = redis.NewScript(`
|
func (r *RDB) CheckAndEnqueue() (err error) {
|
||||||
local msgs = redis.call("LRANGE", KEYS[1], 0, -1)
|
|
||||||
for _, msg in ipairs(msgs) do
|
|
||||||
local decoded = cjson.decode(msg)
|
|
||||||
local qkey = ARGV[1] .. decoded["Queue"]
|
|
||||||
redis.call("RPUSH", qkey, msg)
|
|
||||||
redis.call("LREM", KEYS[1], 0, msg)
|
|
||||||
end
|
|
||||||
return table.getn(msgs)`)
|
|
||||||
|
|
||||||
// RequeueAll moves all tasks from in-progress list to the queue
|
|
||||||
// and reports the number of tasks restored.
|
|
||||||
func (r *RDB) RequeueAll() (int64, error) {
|
|
||||||
res, err := requeueAllCmd.Run(r.client, []string{base.InProgressQueue}, base.QueuePrefix).Result()
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
n, ok := res.(int64)
|
|
||||||
if !ok {
|
|
||||||
return 0, fmt.Errorf("could not cast %v to int64", res)
|
|
||||||
}
|
|
||||||
return n, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// CheckAndEnqueue checks for all scheduled tasks and enqueues any tasks that
|
|
||||||
// have to be processed.
|
|
||||||
//
|
|
||||||
// qnames specifies to which queues to send tasks.
|
|
||||||
func (r *RDB) CheckAndEnqueue(qnames ...string) error {
|
|
||||||
delayed := []string{base.ScheduledQueue, base.RetryQueue}
|
delayed := []string{base.ScheduledQueue, base.RetryQueue}
|
||||||
for _, zset := range delayed {
|
for _, zset := range delayed {
|
||||||
var err error
|
n := 1
|
||||||
if len(qnames) == 1 {
|
for n != 0 {
|
||||||
err = r.forwardSingle(zset, base.QueueKey(qnames[0]))
|
n, err = r.forward(zset)
|
||||||
} else {
|
if err != nil {
|
||||||
err = r.forward(zset)
|
return err
|
||||||
}
|
}
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@@ -427,53 +393,40 @@ func (r *RDB) CheckAndEnqueue(qnames ...string) error {
|
|||||||
// KEYS[1] -> source queue (e.g. scheduled or retry queue)
|
// KEYS[1] -> source queue (e.g. scheduled or retry queue)
|
||||||
// ARGV[1] -> current unix time
|
// ARGV[1] -> current unix time
|
||||||
// ARGV[2] -> queue prefix
|
// ARGV[2] -> queue prefix
|
||||||
|
// Note: Script moves tasks up to 100 at a time to keep the runtime of script short.
|
||||||
var forwardCmd = redis.NewScript(`
|
var forwardCmd = redis.NewScript(`
|
||||||
local msgs = redis.call("ZRANGEBYSCORE", KEYS[1], "-inf", ARGV[1])
|
local msgs = redis.call("ZRANGEBYSCORE", KEYS[1], "-inf", ARGV[1], "LIMIT", 0, 100)
|
||||||
for _, msg in ipairs(msgs) do
|
for _, msg in ipairs(msgs) do
|
||||||
local decoded = cjson.decode(msg)
|
local decoded = cjson.decode(msg)
|
||||||
local qkey = ARGV[2] .. decoded["Queue"]
|
local qkey = ARGV[2] .. decoded["Queue"]
|
||||||
redis.call("LPUSH", qkey, msg)
|
redis.call("LPUSH", qkey, msg)
|
||||||
redis.call("ZREM", KEYS[1], msg)
|
redis.call("ZREM", KEYS[1], msg)
|
||||||
end
|
end
|
||||||
return msgs`)
|
return table.getn(msgs)`)
|
||||||
|
|
||||||
// forward moves all tasks with a score less than the current unix time
|
// forward moves tasks with a score less than the current unix time
|
||||||
// from the src zset.
|
// from the src zset. It returns the number of tasks moved.
|
||||||
func (r *RDB) forward(src string) error {
|
func (r *RDB) forward(src string) (int, error) {
|
||||||
now := float64(time.Now().Unix())
|
now := float64(time.Now().Unix())
|
||||||
return forwardCmd.Run(r.client,
|
res, err := forwardCmd.Run(r.client,
|
||||||
[]string{src}, now, base.QueuePrefix).Err()
|
[]string{src}, now, base.QueuePrefix).Result()
|
||||||
}
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
// KEYS[1] -> source queue (e.g. scheduled or retry queue)
|
}
|
||||||
// KEYS[2] -> destination queue
|
return cast.ToInt(res), nil
|
||||||
var forwardSingleCmd = redis.NewScript(`
|
|
||||||
local msgs = redis.call("ZRANGEBYSCORE", KEYS[1], "-inf", ARGV[1])
|
|
||||||
for _, msg in ipairs(msgs) do
|
|
||||||
redis.call("LPUSH", KEYS[2], msg)
|
|
||||||
redis.call("ZREM", KEYS[1], msg)
|
|
||||||
end
|
|
||||||
return msgs`)
|
|
||||||
|
|
||||||
// forwardSingle moves all tasks with a score less than the current unix time
|
|
||||||
// from the src zset to dst list.
|
|
||||||
func (r *RDB) forwardSingle(src, dst string) error {
|
|
||||||
now := float64(time.Now().Unix())
|
|
||||||
return forwardSingleCmd.Run(r.client,
|
|
||||||
[]string{src, dst}, now).Err()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// KEYS[1] -> asynq:servers:<host:pid:sid>
|
// KEYS[1] -> asynq:servers:<host:pid:sid>
|
||||||
// KEYS[2] -> asynq:servers
|
// KEYS[2] -> asynq:servers
|
||||||
// KEYS[3] -> asynq:workers<host:pid:sid>
|
// KEYS[3] -> asynq:workers<host:pid:sid>
|
||||||
// keys[4] -> asynq:workers
|
// KEYS[4] -> asynq:workers
|
||||||
// ARGV[1] -> expiration time
|
// ARGV[1] -> expiration time
|
||||||
// ARGV[2] -> TTL in seconds
|
// ARGV[2] -> TTL in seconds
|
||||||
// ARGV[3] -> process info
|
// ARGV[3] -> server info
|
||||||
// ARGV[4:] -> alternate key-value pair of (worker id, worker data)
|
// ARGV[4:] -> alternate key-value pair of (worker id, worker data)
|
||||||
// Note: Add key to ZSET with expiration time as score.
|
// Note: Add key to ZSET with expiration time as score.
|
||||||
// ref: https://github.com/antirez/redis/issues/135#issuecomment-2361996
|
// ref: https://github.com/antirez/redis/issues/135#issuecomment-2361996
|
||||||
var writeProcessInfoCmd = redis.NewScript(`
|
var writeServerStateCmd = redis.NewScript(`
|
||||||
redis.call("SETEX", KEYS[1], ARGV[2], ARGV[3])
|
redis.call("SETEX", KEYS[1], ARGV[2], ARGV[3])
|
||||||
redis.call("ZADD", KEYS[2], ARGV[1], KEYS[1])
|
redis.call("ZADD", KEYS[2], ARGV[1], KEYS[1])
|
||||||
redis.call("DEL", KEYS[3])
|
redis.call("DEL", KEYS[3])
|
||||||
@@ -484,27 +437,24 @@ redis.call("EXPIRE", KEYS[3], ARGV[2])
|
|||||||
redis.call("ZADD", KEYS[4], ARGV[1], KEYS[3])
|
redis.call("ZADD", KEYS[4], ARGV[1], KEYS[3])
|
||||||
return redis.status_reply("OK")`)
|
return redis.status_reply("OK")`)
|
||||||
|
|
||||||
// WriteServerState writes server state data to redis with expiration set to the value ttl.
|
// WriteServerState writes server state data to redis with expiration set to the value ttl.
|
||||||
func (r *RDB) WriteServerState(ss *base.ServerState, ttl time.Duration) error {
|
func (r *RDB) WriteServerState(info *base.ServerInfo, workers []*base.WorkerInfo, ttl time.Duration) error {
|
||||||
info := ss.GetInfo()
|
|
||||||
bytes, err := json.Marshal(info)
|
bytes, err := json.Marshal(info)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
var args []interface{} // args to the lua script
|
|
||||||
exp := time.Now().Add(ttl).UTC()
|
exp := time.Now().Add(ttl).UTC()
|
||||||
workers := ss.GetWorkers()
|
args := []interface{}{float64(exp.Unix()), ttl.Seconds(), bytes} // args to the lua script
|
||||||
args = append(args, float64(exp.Unix()), ttl.Seconds(), bytes)
|
|
||||||
for _, w := range workers {
|
for _, w := range workers {
|
||||||
bytes, err := json.Marshal(w)
|
bytes, err := json.Marshal(w)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
continue // skip bad data
|
continue // skip bad data
|
||||||
}
|
}
|
||||||
args = append(args, w.ID.String(), bytes)
|
args = append(args, w.ID, bytes)
|
||||||
}
|
}
|
||||||
skey := base.ServerInfoKey(info.Host, info.PID, info.ServerID)
|
skey := base.ServerInfoKey(info.Host, info.PID, info.ServerID)
|
||||||
wkey := base.WorkersKey(info.Host, info.PID, info.ServerID)
|
wkey := base.WorkersKey(info.Host, info.PID, info.ServerID)
|
||||||
return writeProcessInfoCmd.Run(r.client,
|
return writeServerStateCmd.Run(r.client,
|
||||||
[]string{skey, base.AllServers, wkey, base.AllWorkers},
|
[]string{skey, base.AllServers, wkey, base.AllWorkers},
|
||||||
args...).Err()
|
args...).Err()
|
||||||
}
|
}
|
||||||
@@ -521,11 +471,9 @@ redis.call("DEL", KEYS[4])
|
|||||||
return redis.status_reply("OK")`)
|
return redis.status_reply("OK")`)
|
||||||
|
|
||||||
// ClearServerState deletes server state data from redis.
|
// ClearServerState deletes server state data from redis.
|
||||||
func (r *RDB) ClearServerState(ss *base.ServerState) error {
|
func (r *RDB) ClearServerState(host string, pid int, serverID string) error {
|
||||||
info := ss.GetInfo()
|
skey := base.ServerInfoKey(host, pid, serverID)
|
||||||
host, pid, id := info.Host, info.PID, info.ServerID
|
wkey := base.WorkersKey(host, pid, serverID)
|
||||||
skey := base.ServerInfoKey(host, pid, id)
|
|
||||||
wkey := base.WorkersKey(host, pid, id)
|
|
||||||
return clearProcessInfoCmd.Run(r.client,
|
return clearProcessInfoCmd.Run(r.client,
|
||||||
[]string{base.AllServers, skey, base.AllWorkers, wkey}).Err()
|
[]string{base.AllServers, skey, base.AllWorkers, wkey}).Err()
|
||||||
}
|
}
|
||||||
|
@@ -227,6 +227,97 @@ func TestDequeue(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestDequeueIgnoresPausedQueues(t *testing.T) {
|
||||||
|
r := setup(t)
|
||||||
|
t1 := h.NewTaskMessage("send_email", map[string]interface{}{"subject": "hello!"})
|
||||||
|
t2 := h.NewTaskMessage("export_csv", nil)
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
paused []string // list of paused queues
|
||||||
|
enqueued map[string][]*base.TaskMessage
|
||||||
|
args []string // list of queues to query
|
||||||
|
want *base.TaskMessage
|
||||||
|
err error
|
||||||
|
wantEnqueued map[string][]*base.TaskMessage
|
||||||
|
wantInProgress []*base.TaskMessage
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
paused: []string{"default"},
|
||||||
|
enqueued: map[string][]*base.TaskMessage{
|
||||||
|
"default": {t1},
|
||||||
|
"critical": {t2},
|
||||||
|
},
|
||||||
|
args: []string{"default", "critical"},
|
||||||
|
want: t2,
|
||||||
|
err: nil,
|
||||||
|
wantEnqueued: map[string][]*base.TaskMessage{
|
||||||
|
"default": {t1},
|
||||||
|
"critical": {},
|
||||||
|
},
|
||||||
|
wantInProgress: []*base.TaskMessage{t2},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
paused: []string{"default"},
|
||||||
|
enqueued: map[string][]*base.TaskMessage{
|
||||||
|
"default": {t1},
|
||||||
|
},
|
||||||
|
args: []string{"default"},
|
||||||
|
want: nil,
|
||||||
|
err: ErrNoProcessableTask,
|
||||||
|
wantEnqueued: map[string][]*base.TaskMessage{
|
||||||
|
"default": {t1},
|
||||||
|
},
|
||||||
|
wantInProgress: []*base.TaskMessage{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
paused: []string{"critical", "default"},
|
||||||
|
enqueued: map[string][]*base.TaskMessage{
|
||||||
|
"default": {t1},
|
||||||
|
"critical": {t2},
|
||||||
|
},
|
||||||
|
args: []string{"default", "critical"},
|
||||||
|
want: nil,
|
||||||
|
err: ErrNoProcessableTask,
|
||||||
|
wantEnqueued: map[string][]*base.TaskMessage{
|
||||||
|
"default": {t1},
|
||||||
|
"critical": {t2},
|
||||||
|
},
|
||||||
|
wantInProgress: []*base.TaskMessage{},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
h.FlushDB(t, r.client) // clean up db before each test case
|
||||||
|
for _, qname := range tc.paused {
|
||||||
|
if err := r.Pause(qname); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for queue, msgs := range tc.enqueued {
|
||||||
|
h.SeedEnqueuedQueue(t, r.client, msgs, queue)
|
||||||
|
}
|
||||||
|
|
||||||
|
got, err := r.Dequeue(tc.args...)
|
||||||
|
if !cmp.Equal(got, tc.want) || err != tc.err {
|
||||||
|
t.Errorf("Dequeue(%v) = %v, %v; want %v, %v",
|
||||||
|
tc.args, got, err, tc.want, tc.err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
for queue, want := range tc.wantEnqueued {
|
||||||
|
gotEnqueued := h.GetEnqueuedMessages(t, r.client, queue)
|
||||||
|
if diff := cmp.Diff(want, gotEnqueued, h.SortMsgOpt); diff != "" {
|
||||||
|
t.Errorf("mismatch found in %q: (-want,+got):\n%s", base.QueueKey(queue), diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
gotInProgress := h.GetInProgressMessages(t, r.client)
|
||||||
|
if diff := cmp.Diff(tc.wantInProgress, gotInProgress, h.SortMsgOpt); diff != "" {
|
||||||
|
t.Errorf("mismatch found in %q: (-want,+got):\n%s", base.InProgressQueue, diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestDone(t *testing.T) {
|
func TestDone(t *testing.T) {
|
||||||
r := setup(t)
|
r := setup(t)
|
||||||
t1 := h.NewTaskMessage("send_email", nil)
|
t1 := h.NewTaskMessage("send_email", nil)
|
||||||
@@ -662,98 +753,6 @@ func TestKill(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRequeueAll(t *testing.T) {
|
|
||||||
r := setup(t)
|
|
||||||
t1 := h.NewTaskMessage("send_email", nil)
|
|
||||||
t2 := h.NewTaskMessage("export_csv", nil)
|
|
||||||
t3 := h.NewTaskMessage("sync_stuff", nil)
|
|
||||||
t4 := h.NewTaskMessageWithQueue("important", nil, "critical")
|
|
||||||
t5 := h.NewTaskMessageWithQueue("minor", nil, "low")
|
|
||||||
|
|
||||||
tests := []struct {
|
|
||||||
inProgress []*base.TaskMessage
|
|
||||||
enqueued map[string][]*base.TaskMessage
|
|
||||||
want int64
|
|
||||||
wantInProgress []*base.TaskMessage
|
|
||||||
wantEnqueued map[string][]*base.TaskMessage
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
inProgress: []*base.TaskMessage{t1, t2, t3},
|
|
||||||
enqueued: map[string][]*base.TaskMessage{
|
|
||||||
base.DefaultQueueName: {},
|
|
||||||
},
|
|
||||||
want: 3,
|
|
||||||
wantInProgress: []*base.TaskMessage{},
|
|
||||||
wantEnqueued: map[string][]*base.TaskMessage{
|
|
||||||
base.DefaultQueueName: {t1, t2, t3},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
inProgress: []*base.TaskMessage{},
|
|
||||||
enqueued: map[string][]*base.TaskMessage{
|
|
||||||
base.DefaultQueueName: {t1, t2, t3},
|
|
||||||
},
|
|
||||||
want: 0,
|
|
||||||
wantInProgress: []*base.TaskMessage{},
|
|
||||||
wantEnqueued: map[string][]*base.TaskMessage{
|
|
||||||
base.DefaultQueueName: {t1, t2, t3},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
inProgress: []*base.TaskMessage{t2, t3},
|
|
||||||
enqueued: map[string][]*base.TaskMessage{
|
|
||||||
base.DefaultQueueName: {t1},
|
|
||||||
},
|
|
||||||
want: 2,
|
|
||||||
wantInProgress: []*base.TaskMessage{},
|
|
||||||
wantEnqueued: map[string][]*base.TaskMessage{
|
|
||||||
base.DefaultQueueName: {t1, t2, t3},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
inProgress: []*base.TaskMessage{t2, t3, t4, t5},
|
|
||||||
enqueued: map[string][]*base.TaskMessage{
|
|
||||||
base.DefaultQueueName: {t1},
|
|
||||||
"critical": {},
|
|
||||||
"low": {},
|
|
||||||
},
|
|
||||||
want: 4,
|
|
||||||
wantInProgress: []*base.TaskMessage{},
|
|
||||||
wantEnqueued: map[string][]*base.TaskMessage{
|
|
||||||
base.DefaultQueueName: {t1, t2, t3},
|
|
||||||
"critical": {t4},
|
|
||||||
"low": {t5},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range tests {
|
|
||||||
h.FlushDB(t, r.client) // clean up db before each test case
|
|
||||||
h.SeedInProgressQueue(t, r.client, tc.inProgress)
|
|
||||||
for qname, msgs := range tc.enqueued {
|
|
||||||
h.SeedEnqueuedQueue(t, r.client, msgs, qname)
|
|
||||||
}
|
|
||||||
|
|
||||||
got, err := r.RequeueAll()
|
|
||||||
if got != tc.want || err != nil {
|
|
||||||
t.Errorf("(*RDB).RequeueAll() = %v %v, want %v nil", got, err, tc.want)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
gotInProgress := h.GetInProgressMessages(t, r.client)
|
|
||||||
if diff := cmp.Diff(tc.wantInProgress, gotInProgress, h.SortMsgOpt); diff != "" {
|
|
||||||
t.Errorf("mismatch found in %q: (-want, +got):\n%s", base.InProgressQueue, diff)
|
|
||||||
}
|
|
||||||
|
|
||||||
for qname, want := range tc.wantEnqueued {
|
|
||||||
gotEnqueued := h.GetEnqueuedMessages(t, r.client, qname)
|
|
||||||
if diff := cmp.Diff(want, gotEnqueued, h.SortMsgOpt); diff != "" {
|
|
||||||
t.Errorf("mismatch found in %q: (-want, +got):\n%s", base.QueueKey(qname), diff)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCheckAndEnqueue(t *testing.T) {
|
func TestCheckAndEnqueue(t *testing.T) {
|
||||||
r := setup(t)
|
r := setup(t)
|
||||||
t1 := h.NewTaskMessage("send_email", nil)
|
t1 := h.NewTaskMessage("send_email", nil)
|
||||||
@@ -769,7 +768,6 @@ func TestCheckAndEnqueue(t *testing.T) {
|
|||||||
tests := []struct {
|
tests := []struct {
|
||||||
scheduled []h.ZSetEntry
|
scheduled []h.ZSetEntry
|
||||||
retry []h.ZSetEntry
|
retry []h.ZSetEntry
|
||||||
qnames []string
|
|
||||||
wantEnqueued map[string][]*base.TaskMessage
|
wantEnqueued map[string][]*base.TaskMessage
|
||||||
wantScheduled []*base.TaskMessage
|
wantScheduled []*base.TaskMessage
|
||||||
wantRetry []*base.TaskMessage
|
wantRetry []*base.TaskMessage
|
||||||
@@ -781,7 +779,6 @@ func TestCheckAndEnqueue(t *testing.T) {
|
|||||||
},
|
},
|
||||||
retry: []h.ZSetEntry{
|
retry: []h.ZSetEntry{
|
||||||
{Msg: t3, Score: float64(secondAgo.Unix())}},
|
{Msg: t3, Score: float64(secondAgo.Unix())}},
|
||||||
qnames: []string{"default"},
|
|
||||||
wantEnqueued: map[string][]*base.TaskMessage{
|
wantEnqueued: map[string][]*base.TaskMessage{
|
||||||
"default": {t1, t2, t3},
|
"default": {t1, t2, t3},
|
||||||
},
|
},
|
||||||
@@ -794,7 +791,6 @@ func TestCheckAndEnqueue(t *testing.T) {
|
|||||||
{Msg: t2, Score: float64(secondAgo.Unix())}},
|
{Msg: t2, Score: float64(secondAgo.Unix())}},
|
||||||
retry: []h.ZSetEntry{
|
retry: []h.ZSetEntry{
|
||||||
{Msg: t3, Score: float64(secondAgo.Unix())}},
|
{Msg: t3, Score: float64(secondAgo.Unix())}},
|
||||||
qnames: []string{"default"},
|
|
||||||
wantEnqueued: map[string][]*base.TaskMessage{
|
wantEnqueued: map[string][]*base.TaskMessage{
|
||||||
"default": {t2, t3},
|
"default": {t2, t3},
|
||||||
},
|
},
|
||||||
@@ -807,7 +803,6 @@ func TestCheckAndEnqueue(t *testing.T) {
|
|||||||
{Msg: t2, Score: float64(hourFromNow.Unix())}},
|
{Msg: t2, Score: float64(hourFromNow.Unix())}},
|
||||||
retry: []h.ZSetEntry{
|
retry: []h.ZSetEntry{
|
||||||
{Msg: t3, Score: float64(hourFromNow.Unix())}},
|
{Msg: t3, Score: float64(hourFromNow.Unix())}},
|
||||||
qnames: []string{"default"},
|
|
||||||
wantEnqueued: map[string][]*base.TaskMessage{
|
wantEnqueued: map[string][]*base.TaskMessage{
|
||||||
"default": {},
|
"default": {},
|
||||||
},
|
},
|
||||||
@@ -821,7 +816,6 @@ func TestCheckAndEnqueue(t *testing.T) {
|
|||||||
},
|
},
|
||||||
retry: []h.ZSetEntry{
|
retry: []h.ZSetEntry{
|
||||||
{Msg: t5, Score: float64(secondAgo.Unix())}},
|
{Msg: t5, Score: float64(secondAgo.Unix())}},
|
||||||
qnames: []string{"default", "critical", "low"},
|
|
||||||
wantEnqueued: map[string][]*base.TaskMessage{
|
wantEnqueued: map[string][]*base.TaskMessage{
|
||||||
"default": {t1},
|
"default": {t1},
|
||||||
"critical": {t4},
|
"critical": {t4},
|
||||||
@@ -837,7 +831,7 @@ func TestCheckAndEnqueue(t *testing.T) {
|
|||||||
h.SeedScheduledQueue(t, r.client, tc.scheduled)
|
h.SeedScheduledQueue(t, r.client, tc.scheduled)
|
||||||
h.SeedRetryQueue(t, r.client, tc.retry)
|
h.SeedRetryQueue(t, r.client, tc.retry)
|
||||||
|
|
||||||
err := r.CheckAndEnqueue(tc.qnames...)
|
err := r.CheckAndEnqueue()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("(*RDB).CheckScheduled() = %v, want nil", err)
|
t.Errorf("(*RDB).CheckScheduled() = %v, want nil", err)
|
||||||
continue
|
continue
|
||||||
@@ -864,64 +858,63 @@ func TestCheckAndEnqueue(t *testing.T) {
|
|||||||
|
|
||||||
func TestWriteServerState(t *testing.T) {
|
func TestWriteServerState(t *testing.T) {
|
||||||
r := setup(t)
|
r := setup(t)
|
||||||
queues := map[string]int{"default": 2, "email": 5, "low": 1}
|
|
||||||
|
|
||||||
started := time.Now()
|
var (
|
||||||
ss := base.NewServerState("localhost", 4242, 10, queues, false)
|
host = "localhost"
|
||||||
ss.SetStarted(started)
|
pid = 4242
|
||||||
ss.SetStatus(base.StatusRunning)
|
serverID = "server123"
|
||||||
ttl := 5 * time.Second
|
|
||||||
|
|
||||||
h.FlushDB(t, r.client)
|
ttl = 5 * time.Second
|
||||||
|
)
|
||||||
|
|
||||||
err := r.WriteServerState(ss, ttl)
|
info := base.ServerInfo{
|
||||||
|
Host: host,
|
||||||
|
PID: pid,
|
||||||
|
ServerID: serverID,
|
||||||
|
Concurrency: 10,
|
||||||
|
Queues: map[string]int{"default": 2, "email": 5, "low": 1},
|
||||||
|
StrictPriority: false,
|
||||||
|
Started: time.Now(),
|
||||||
|
Status: "running",
|
||||||
|
ActiveWorkerCount: 0,
|
||||||
|
}
|
||||||
|
|
||||||
|
err := r.WriteServerState(&info, nil /* workers */, ttl)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("r.WriteServerState returned an error: %v", err)
|
t.Errorf("r.WriteServerState returned an error: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check ServerInfo was written correctly
|
// Check ServerInfo was written correctly.
|
||||||
info := ss.GetInfo()
|
skey := base.ServerInfoKey(host, pid, serverID)
|
||||||
skey := base.ServerInfoKey(info.Host, info.PID, info.ServerID)
|
|
||||||
data := r.client.Get(skey).Val()
|
data := r.client.Get(skey).Val()
|
||||||
var got base.ServerInfo
|
var got base.ServerInfo
|
||||||
err = json.Unmarshal([]byte(data), &got)
|
err = json.Unmarshal([]byte(data), &got)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("could not decode json: %v", err)
|
t.Fatalf("could not decode json: %v", err)
|
||||||
}
|
}
|
||||||
want := base.ServerInfo{
|
if diff := cmp.Diff(info, got); diff != "" {
|
||||||
Host: info.Host,
|
|
||||||
PID: info.PID,
|
|
||||||
Concurrency: info.Concurrency,
|
|
||||||
Queues: map[string]int{"default": 2, "email": 5, "low": 1},
|
|
||||||
StrictPriority: false,
|
|
||||||
Status: "running",
|
|
||||||
Started: started,
|
|
||||||
ActiveWorkerCount: 0,
|
|
||||||
}
|
|
||||||
ignoreOpt := cmpopts.IgnoreFields(base.ServerInfo{}, "ServerID")
|
|
||||||
if diff := cmp.Diff(want, got, ignoreOpt); diff != "" {
|
|
||||||
t.Errorf("persisted ServerInfo was %v, want %v; (-want,+got)\n%s",
|
t.Errorf("persisted ServerInfo was %v, want %v; (-want,+got)\n%s",
|
||||||
got, want, diff)
|
got, info, diff)
|
||||||
}
|
}
|
||||||
// Check ServerInfo TTL was set correctly
|
// Check ServerInfo TTL was set correctly.
|
||||||
gotTTL := r.client.TTL(skey).Val()
|
gotTTL := r.client.TTL(skey).Val()
|
||||||
if !cmp.Equal(ttl.Seconds(), gotTTL.Seconds(), cmpopts.EquateApprox(0, 1)) {
|
if !cmp.Equal(ttl.Seconds(), gotTTL.Seconds(), cmpopts.EquateApprox(0, 1)) {
|
||||||
t.Errorf("TTL of %q was %v, want %v", skey, gotTTL, ttl)
|
t.Errorf("TTL of %q was %v, want %v", skey, gotTTL, ttl)
|
||||||
}
|
}
|
||||||
// Check ServerInfo key was added to the set correctly
|
// Check ServerInfo key was added to the set all server keys correctly.
|
||||||
gotProcesses := r.client.ZRange(base.AllServers, 0, -1).Val()
|
gotServerKeys := r.client.ZRange(base.AllServers, 0, -1).Val()
|
||||||
wantProcesses := []string{skey}
|
wantServerKeys := []string{skey}
|
||||||
if diff := cmp.Diff(wantProcesses, gotProcesses); diff != "" {
|
if diff := cmp.Diff(wantServerKeys, gotServerKeys); diff != "" {
|
||||||
t.Errorf("%q contained %v, want %v", base.AllServers, gotProcesses, wantProcesses)
|
t.Errorf("%q contained %v, want %v", base.AllServers, gotServerKeys, wantServerKeys)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check WorkersInfo was written correctly
|
// Check WorkersInfo was written correctly.
|
||||||
wkey := base.WorkersKey(info.Host, info.PID, info.ServerID)
|
wkey := base.WorkersKey(host, pid, serverID)
|
||||||
workerExist := r.client.Exists(wkey).Val()
|
workerExist := r.client.Exists(wkey).Val()
|
||||||
if workerExist != 0 {
|
if workerExist != 0 {
|
||||||
t.Errorf("%q key exists", wkey)
|
t.Errorf("%q key exists", wkey)
|
||||||
}
|
}
|
||||||
// Check WorkersInfo key was added to the set correctly
|
// Check WorkersInfo key was added to the set correctly.
|
||||||
gotWorkerKeys := r.client.ZRange(base.AllWorkers, 0, -1).Val()
|
gotWorkerKeys := r.client.ZRange(base.AllWorkers, 0, -1).Val()
|
||||||
wantWorkerKeys := []string{wkey}
|
wantWorkerKeys := []string{wkey}
|
||||||
if diff := cmp.Diff(wantWorkerKeys, gotWorkerKeys); diff != "" {
|
if diff := cmp.Diff(wantWorkerKeys, gotWorkerKeys); diff != "" {
|
||||||
@@ -931,109 +924,105 @@ func TestWriteServerState(t *testing.T) {
|
|||||||
|
|
||||||
func TestWriteServerStateWithWorkers(t *testing.T) {
|
func TestWriteServerStateWithWorkers(t *testing.T) {
|
||||||
r := setup(t)
|
r := setup(t)
|
||||||
queues := map[string]int{"default": 2, "email": 5, "low": 1}
|
|
||||||
concurrency := 10
|
|
||||||
|
|
||||||
started := time.Now().Add(-10 * time.Minute)
|
var (
|
||||||
w1Started := time.Now().Add(-time.Minute)
|
host = "127.0.0.1"
|
||||||
w2Started := time.Now().Add(-time.Second)
|
pid = 4242
|
||||||
msg1 := h.NewTaskMessage("send_email", map[string]interface{}{"user_id": "123"})
|
serverID = "server123"
|
||||||
msg2 := h.NewTaskMessage("gen_thumbnail", map[string]interface{}{"path": "some/path/to/imgfile"})
|
|
||||||
ss := base.NewServerState("127.0.01", 4242, concurrency, queues, false)
|
|
||||||
ss.SetStarted(started)
|
|
||||||
ss.SetStatus(base.StatusRunning)
|
|
||||||
ss.AddWorkerStats(msg1, w1Started)
|
|
||||||
ss.AddWorkerStats(msg2, w2Started)
|
|
||||||
ttl := 5 * time.Second
|
|
||||||
|
|
||||||
h.FlushDB(t, r.client)
|
msg1 = h.NewTaskMessage("send_email", map[string]interface{}{"user_id": "123"})
|
||||||
|
msg2 = h.NewTaskMessage("gen_thumbnail", map[string]interface{}{"path": "some/path/to/imgfile"})
|
||||||
|
|
||||||
err := r.WriteServerState(ss, ttl)
|
ttl = 5 * time.Second
|
||||||
if err != nil {
|
)
|
||||||
t.Errorf("r.WriteServerState returned an error: %v", err)
|
|
||||||
|
workers := []*base.WorkerInfo{
|
||||||
|
{
|
||||||
|
Host: host,
|
||||||
|
PID: pid,
|
||||||
|
ID: msg1.ID.String(),
|
||||||
|
Type: msg1.Type,
|
||||||
|
Queue: msg1.Queue,
|
||||||
|
Payload: msg1.Payload,
|
||||||
|
Started: time.Now().Add(-10 * time.Second),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Host: host,
|
||||||
|
PID: pid,
|
||||||
|
ID: msg2.ID.String(),
|
||||||
|
Type: msg2.Type,
|
||||||
|
Queue: msg2.Queue,
|
||||||
|
Payload: msg2.Payload,
|
||||||
|
Started: time.Now().Add(-2 * time.Minute),
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check ServerInfo was written correctly
|
serverInfo := base.ServerInfo{
|
||||||
info := ss.GetInfo()
|
Host: host,
|
||||||
skey := base.ServerInfoKey(info.Host, info.PID, info.ServerID)
|
PID: pid,
|
||||||
|
ServerID: serverID,
|
||||||
|
Concurrency: 10,
|
||||||
|
Queues: map[string]int{"default": 2, "email": 5, "low": 1},
|
||||||
|
StrictPriority: false,
|
||||||
|
Started: time.Now().Add(-10 * time.Minute),
|
||||||
|
Status: "running",
|
||||||
|
ActiveWorkerCount: len(workers),
|
||||||
|
}
|
||||||
|
|
||||||
|
err := r.WriteServerState(&serverInfo, workers, ttl)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("r.WriteServerState returned an error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check ServerInfo was written correctly.
|
||||||
|
skey := base.ServerInfoKey(host, pid, serverID)
|
||||||
data := r.client.Get(skey).Val()
|
data := r.client.Get(skey).Val()
|
||||||
var got base.ServerInfo
|
var got base.ServerInfo
|
||||||
err = json.Unmarshal([]byte(data), &got)
|
err = json.Unmarshal([]byte(data), &got)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("could not decode json: %v", err)
|
t.Fatalf("could not decode json: %v", err)
|
||||||
}
|
}
|
||||||
want := base.ServerInfo{
|
if diff := cmp.Diff(serverInfo, got); diff != "" {
|
||||||
Host: info.Host,
|
|
||||||
PID: info.PID,
|
|
||||||
ServerID: info.ServerID,
|
|
||||||
Concurrency: concurrency,
|
|
||||||
Queues: queues,
|
|
||||||
StrictPriority: false,
|
|
||||||
Status: "running",
|
|
||||||
Started: started,
|
|
||||||
ActiveWorkerCount: 2,
|
|
||||||
}
|
|
||||||
if diff := cmp.Diff(want, got); diff != "" {
|
|
||||||
t.Errorf("persisted ServerInfo was %v, want %v; (-want,+got)\n%s",
|
t.Errorf("persisted ServerInfo was %v, want %v; (-want,+got)\n%s",
|
||||||
got, want, diff)
|
got, serverInfo, diff)
|
||||||
}
|
}
|
||||||
// Check ServerInfo TTL was set correctly
|
// Check ServerInfo TTL was set correctly.
|
||||||
gotTTL := r.client.TTL(skey).Val()
|
gotTTL := r.client.TTL(skey).Val()
|
||||||
if !cmp.Equal(ttl.Seconds(), gotTTL.Seconds(), cmpopts.EquateApprox(0, 1)) {
|
if !cmp.Equal(ttl.Seconds(), gotTTL.Seconds(), cmpopts.EquateApprox(0, 1)) {
|
||||||
t.Errorf("TTL of %q was %v, want %v", skey, gotTTL, ttl)
|
t.Errorf("TTL of %q was %v, want %v", skey, gotTTL, ttl)
|
||||||
}
|
}
|
||||||
// Check ServerInfo key was added to the set correctly
|
// Check ServerInfo key was added to the set correctly.
|
||||||
gotProcesses := r.client.ZRange(base.AllServers, 0, -1).Val()
|
gotServerKeys := r.client.ZRange(base.AllServers, 0, -1).Val()
|
||||||
wantProcesses := []string{skey}
|
wantServerKeys := []string{skey}
|
||||||
if diff := cmp.Diff(wantProcesses, gotProcesses); diff != "" {
|
if diff := cmp.Diff(wantServerKeys, gotServerKeys); diff != "" {
|
||||||
t.Errorf("%q contained %v, want %v", base.AllServers, gotProcesses, wantProcesses)
|
t.Errorf("%q contained %v, want %v", base.AllServers, gotServerKeys, wantServerKeys)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check WorkersInfo was written correctly
|
// Check WorkersInfo was written correctly.
|
||||||
wkey := base.WorkersKey(info.Host, info.PID, info.ServerID)
|
wkey := base.WorkersKey(host, pid, serverID)
|
||||||
wdata := r.client.HGetAll(wkey).Val()
|
wdata := r.client.HGetAll(wkey).Val()
|
||||||
if len(wdata) != 2 {
|
if len(wdata) != 2 {
|
||||||
t.Fatalf("HGETALL %q returned a hash of size %d, want 2", wkey, len(wdata))
|
t.Fatalf("HGETALL %q returned a hash of size %d, want 2", wkey, len(wdata))
|
||||||
}
|
}
|
||||||
gotWorkers := make(map[string]*base.WorkerInfo)
|
var gotWorkers []*base.WorkerInfo
|
||||||
for key, val := range wdata {
|
for _, val := range wdata {
|
||||||
var w base.WorkerInfo
|
var w base.WorkerInfo
|
||||||
if err := json.Unmarshal([]byte(val), &w); err != nil {
|
if err := json.Unmarshal([]byte(val), &w); err != nil {
|
||||||
t.Fatalf("could not unmarshal worker's data: %v", err)
|
t.Fatalf("could not unmarshal worker's data: %v", err)
|
||||||
}
|
}
|
||||||
gotWorkers[key] = &w
|
gotWorkers = append(gotWorkers, &w)
|
||||||
}
|
}
|
||||||
wantWorkers := map[string]*base.WorkerInfo{
|
if diff := cmp.Diff(workers, gotWorkers, h.SortWorkerInfoOpt); diff != "" {
|
||||||
msg1.ID.String(): {
|
|
||||||
Host: info.Host,
|
|
||||||
PID: info.PID,
|
|
||||||
ID: msg1.ID,
|
|
||||||
Type: msg1.Type,
|
|
||||||
Queue: msg1.Queue,
|
|
||||||
Payload: msg1.Payload,
|
|
||||||
Started: w1Started,
|
|
||||||
},
|
|
||||||
msg2.ID.String(): {
|
|
||||||
Host: info.Host,
|
|
||||||
PID: info.PID,
|
|
||||||
ID: msg2.ID,
|
|
||||||
Type: msg2.Type,
|
|
||||||
Queue: msg2.Queue,
|
|
||||||
Payload: msg2.Payload,
|
|
||||||
Started: w2Started,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
if diff := cmp.Diff(wantWorkers, gotWorkers); diff != "" {
|
|
||||||
t.Errorf("persisted workers info was %v, want %v; (-want,+got)\n%s",
|
t.Errorf("persisted workers info was %v, want %v; (-want,+got)\n%s",
|
||||||
gotWorkers, wantWorkers, diff)
|
gotWorkers, workers, diff)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check WorkersInfo TTL was set correctly
|
// Check WorkersInfo TTL was set correctly.
|
||||||
gotTTL = r.client.TTL(wkey).Val()
|
gotTTL = r.client.TTL(wkey).Val()
|
||||||
if !cmp.Equal(ttl, gotTTL, timeCmpOpt) {
|
if !cmp.Equal(ttl.Seconds(), gotTTL.Seconds(), cmpopts.EquateApprox(0, 1)) {
|
||||||
t.Errorf("TTL of %q was %v, want %v", wkey, gotTTL, ttl)
|
t.Errorf("TTL of %q was %v, want %v", wkey, gotTTL, ttl)
|
||||||
}
|
}
|
||||||
// Check WorkersInfo key was added to the set correctly
|
// Check WorkersInfo key was added to the set correctly.
|
||||||
gotWorkerKeys := r.client.ZRange(base.AllWorkers, 0, -1).Val()
|
gotWorkerKeys := r.client.ZRange(base.AllWorkers, 0, -1).Val()
|
||||||
wantWorkerKeys := []string{wkey}
|
wantWorkerKeys := []string{wkey}
|
||||||
if diff := cmp.Diff(wantWorkerKeys, gotWorkerKeys); diff != "" {
|
if diff := cmp.Diff(wantWorkerKeys, gotWorkerKeys); diff != "" {
|
||||||
@@ -1043,51 +1032,96 @@ func TestWriteServerStateWithWorkers(t *testing.T) {
|
|||||||
|
|
||||||
func TestClearServerState(t *testing.T) {
|
func TestClearServerState(t *testing.T) {
|
||||||
r := setup(t)
|
r := setup(t)
|
||||||
ss := base.NewServerState("127.0.01", 4242, 10, map[string]int{"default": 1}, false)
|
|
||||||
info := ss.GetInfo()
|
|
||||||
|
|
||||||
h.FlushDB(t, r.client)
|
var (
|
||||||
|
host = "127.0.0.1"
|
||||||
|
pid = 1234
|
||||||
|
serverID = "server123"
|
||||||
|
|
||||||
skey := base.ServerInfoKey(info.Host, info.PID, info.ServerID)
|
otherHost = "127.0.0.2"
|
||||||
wkey := base.WorkersKey(info.Host, info.PID, info.ServerID)
|
otherPID = 9876
|
||||||
otherSKey := base.ServerInfoKey("otherhost", 12345, "server98")
|
otherServerID = "server987"
|
||||||
otherWKey := base.WorkersKey("otherhost", 12345, "server98")
|
|
||||||
// Populate the keys.
|
msg1 = h.NewTaskMessage("send_email", map[string]interface{}{"user_id": "123"})
|
||||||
if err := r.client.Set(skey, "process-info", 0).Err(); err != nil {
|
msg2 = h.NewTaskMessage("gen_thumbnail", map[string]interface{}{"path": "some/path/to/imgfile"})
|
||||||
t.Fatal(err)
|
|
||||||
|
ttl = 5 * time.Second
|
||||||
|
)
|
||||||
|
|
||||||
|
workers1 := []*base.WorkerInfo{
|
||||||
|
{
|
||||||
|
Host: host,
|
||||||
|
PID: pid,
|
||||||
|
ID: msg1.ID.String(),
|
||||||
|
Type: msg1.Type,
|
||||||
|
Queue: msg1.Queue,
|
||||||
|
Payload: msg1.Payload,
|
||||||
|
Started: time.Now().Add(-10 * time.Second),
|
||||||
|
},
|
||||||
}
|
}
|
||||||
if err := r.client.HSet(wkey, "worker-key", "worker-info").Err(); err != nil {
|
serverInfo1 := base.ServerInfo{
|
||||||
t.Fatal(err)
|
Host: host,
|
||||||
}
|
PID: pid,
|
||||||
if err := r.client.ZAdd(base.AllServers, &redis.Z{Member: skey}).Err(); err != nil {
|
ServerID: serverID,
|
||||||
t.Fatal(err)
|
Concurrency: 10,
|
||||||
}
|
Queues: map[string]int{"default": 2, "email": 5, "low": 1},
|
||||||
if err := r.client.ZAdd(base.AllServers, &redis.Z{Member: otherSKey}).Err(); err != nil {
|
StrictPriority: false,
|
||||||
t.Fatal(err)
|
Started: time.Now().Add(-10 * time.Minute),
|
||||||
}
|
Status: "running",
|
||||||
if err := r.client.ZAdd(base.AllWorkers, &redis.Z{Member: wkey}).Err(); err != nil {
|
ActiveWorkerCount: len(workers1),
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if err := r.client.ZAdd(base.AllWorkers, &redis.Z{Member: otherWKey}).Err(); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
err := r.ClearServerState(ss)
|
workers2 := []*base.WorkerInfo{
|
||||||
|
{
|
||||||
|
Host: otherHost,
|
||||||
|
PID: otherPID,
|
||||||
|
ID: msg2.ID.String(),
|
||||||
|
Type: msg2.Type,
|
||||||
|
Queue: msg2.Queue,
|
||||||
|
Payload: msg2.Payload,
|
||||||
|
Started: time.Now().Add(-30 * time.Second),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
serverInfo2 := base.ServerInfo{
|
||||||
|
Host: otherHost,
|
||||||
|
PID: otherPID,
|
||||||
|
ServerID: otherServerID,
|
||||||
|
Concurrency: 10,
|
||||||
|
Queues: map[string]int{"default": 2, "email": 5, "low": 1},
|
||||||
|
StrictPriority: false,
|
||||||
|
Started: time.Now().Add(-15 * time.Minute),
|
||||||
|
Status: "running",
|
||||||
|
ActiveWorkerCount: len(workers2),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write server and workers data.
|
||||||
|
if err := r.WriteServerState(&serverInfo1, workers1, ttl); err != nil {
|
||||||
|
t.Fatalf("could not write server state: %v", err)
|
||||||
|
}
|
||||||
|
if err := r.WriteServerState(&serverInfo2, workers2, ttl); err != nil {
|
||||||
|
t.Fatalf("could not write server state: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err := r.ClearServerState(host, pid, serverID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("(*RDB).ClearServerState failed: %v", err)
|
t.Fatalf("(*RDB).ClearServerState failed: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check all keys are cleared
|
skey := base.ServerInfoKey(host, pid, serverID)
|
||||||
|
wkey := base.WorkersKey(host, pid, serverID)
|
||||||
|
otherSKey := base.ServerInfoKey(otherHost, otherPID, otherServerID)
|
||||||
|
otherWKey := base.WorkersKey(otherHost, otherPID, otherServerID)
|
||||||
|
// Check all keys are cleared.
|
||||||
if r.client.Exists(skey).Val() != 0 {
|
if r.client.Exists(skey).Val() != 0 {
|
||||||
t.Errorf("Redis key %q exists", skey)
|
t.Errorf("Redis key %q exists", skey)
|
||||||
}
|
}
|
||||||
if r.client.Exists(wkey).Val() != 0 {
|
if r.client.Exists(wkey).Val() != 0 {
|
||||||
t.Errorf("Redis key %q exists", wkey)
|
t.Errorf("Redis key %q exists", wkey)
|
||||||
}
|
}
|
||||||
gotProcessKeys := r.client.ZRange(base.AllServers, 0, -1).Val()
|
gotServerKeys := r.client.ZRange(base.AllServers, 0, -1).Val()
|
||||||
wantProcessKeys := []string{otherSKey}
|
wantServerKeys := []string{otherSKey}
|
||||||
if diff := cmp.Diff(wantProcessKeys, gotProcessKeys); diff != "" {
|
if diff := cmp.Diff(wantServerKeys, gotServerKeys); diff != "" {
|
||||||
t.Errorf("%q contained %v, want %v", base.AllServers, gotProcessKeys, wantProcessKeys)
|
t.Errorf("%q contained %v, want %v", base.AllServers, gotServerKeys, wantServerKeys)
|
||||||
}
|
}
|
||||||
gotWorkerKeys := r.client.ZRange(base.AllWorkers, 0, -1).Val()
|
gotWorkerKeys := r.client.ZRange(base.AllWorkers, 0, -1).Val()
|
||||||
wantWorkerKeys := []string{otherWKey}
|
wantWorkerKeys := []string{otherWKey}
|
||||||
|
@@ -123,16 +123,7 @@ func (tb *TestBroker) Kill(msg *base.TaskMessage, errMsg string) error {
|
|||||||
return tb.real.Kill(msg, errMsg)
|
return tb.real.Kill(msg, errMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tb *TestBroker) RequeueAll() (int64, error) {
|
func (tb *TestBroker) CheckAndEnqueue() error {
|
||||||
tb.mu.Lock()
|
|
||||||
defer tb.mu.Unlock()
|
|
||||||
if tb.sleeping {
|
|
||||||
return 0, errRedisDown
|
|
||||||
}
|
|
||||||
return tb.real.RequeueAll()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tb *TestBroker) CheckAndEnqueue(qnames ...string) error {
|
|
||||||
tb.mu.Lock()
|
tb.mu.Lock()
|
||||||
defer tb.mu.Unlock()
|
defer tb.mu.Unlock()
|
||||||
if tb.sleeping {
|
if tb.sleeping {
|
||||||
@@ -141,22 +132,22 @@ func (tb *TestBroker) CheckAndEnqueue(qnames ...string) error {
|
|||||||
return tb.real.CheckAndEnqueue()
|
return tb.real.CheckAndEnqueue()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tb *TestBroker) WriteServerState(ss *base.ServerState, ttl time.Duration) error {
|
func (tb *TestBroker) WriteServerState(info *base.ServerInfo, workers []*base.WorkerInfo, ttl time.Duration) error {
|
||||||
tb.mu.Lock()
|
tb.mu.Lock()
|
||||||
defer tb.mu.Unlock()
|
defer tb.mu.Unlock()
|
||||||
if tb.sleeping {
|
if tb.sleeping {
|
||||||
return errRedisDown
|
return errRedisDown
|
||||||
}
|
}
|
||||||
return tb.real.WriteServerState(ss, ttl)
|
return tb.real.WriteServerState(info, workers, ttl)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tb *TestBroker) ClearServerState(ss *base.ServerState) error {
|
func (tb *TestBroker) ClearServerState(host string, pid int, serverID string) error {
|
||||||
tb.mu.Lock()
|
tb.mu.Lock()
|
||||||
defer tb.mu.Unlock()
|
defer tb.mu.Unlock()
|
||||||
if tb.sleeping {
|
if tb.sleeping {
|
||||||
return errRedisDown
|
return errRedisDown
|
||||||
}
|
}
|
||||||
return tb.real.ClearServerState(ss)
|
return tb.real.ClearServerState(host, pid, serverID)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tb *TestBroker) CancelationPubSub() (*redis.PubSub, error) {
|
func (tb *TestBroker) CancelationPubSub() (*redis.PubSub, error) {
|
||||||
|
64
payload.go
64
payload.go
@@ -5,6 +5,7 @@
|
|||||||
package asynq
|
package asynq
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -30,6 +31,19 @@ func (p Payload) Has(key string) bool {
|
|||||||
return ok
|
return ok
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func toInt(v interface{}) (int, error) {
|
||||||
|
switch v := v.(type) {
|
||||||
|
case json.Number:
|
||||||
|
val, err := v.Int64()
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return int(val), nil
|
||||||
|
default:
|
||||||
|
return cast.ToIntE(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// GetString returns a string value if a string type is associated with
|
// GetString returns a string value if a string type is associated with
|
||||||
// the key, otherwise reports an error.
|
// the key, otherwise reports an error.
|
||||||
func (p Payload) GetString(key string) (string, error) {
|
func (p Payload) GetString(key string) (string, error) {
|
||||||
@@ -47,7 +61,7 @@ func (p Payload) GetInt(key string) (int, error) {
|
|||||||
if !ok {
|
if !ok {
|
||||||
return 0, &errKeyNotFound{key}
|
return 0, &errKeyNotFound{key}
|
||||||
}
|
}
|
||||||
return cast.ToIntE(v)
|
return toInt(v)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetFloat64 returns a float64 value if a numeric type is associated with
|
// GetFloat64 returns a float64 value if a numeric type is associated with
|
||||||
@@ -57,7 +71,12 @@ func (p Payload) GetFloat64(key string) (float64, error) {
|
|||||||
if !ok {
|
if !ok {
|
||||||
return 0, &errKeyNotFound{key}
|
return 0, &errKeyNotFound{key}
|
||||||
}
|
}
|
||||||
return cast.ToFloat64E(v)
|
switch v := v.(type) {
|
||||||
|
case json.Number:
|
||||||
|
return v.Float64()
|
||||||
|
default:
|
||||||
|
return cast.ToFloat64E(v)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetBool returns a boolean value if a boolean type is associated with
|
// GetBool returns a boolean value if a boolean type is associated with
|
||||||
@@ -87,7 +106,20 @@ func (p Payload) GetIntSlice(key string) ([]int, error) {
|
|||||||
if !ok {
|
if !ok {
|
||||||
return nil, &errKeyNotFound{key}
|
return nil, &errKeyNotFound{key}
|
||||||
}
|
}
|
||||||
return cast.ToIntSliceE(v)
|
switch v := v.(type) {
|
||||||
|
case []interface{}:
|
||||||
|
var res []int
|
||||||
|
for _, elem := range v {
|
||||||
|
val, err := toInt(elem)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
res = append(res, int(val))
|
||||||
|
}
|
||||||
|
return res, nil
|
||||||
|
default:
|
||||||
|
return cast.ToIntSliceE(v)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetStringMap returns a map of string to empty interface
|
// GetStringMap returns a map of string to empty interface
|
||||||
@@ -131,7 +163,20 @@ func (p Payload) GetStringMapInt(key string) (map[string]int, error) {
|
|||||||
if !ok {
|
if !ok {
|
||||||
return nil, &errKeyNotFound{key}
|
return nil, &errKeyNotFound{key}
|
||||||
}
|
}
|
||||||
return cast.ToStringMapIntE(v)
|
switch v := v.(type) {
|
||||||
|
case map[string]interface{}:
|
||||||
|
res := make(map[string]int)
|
||||||
|
for key, val := range v {
|
||||||
|
ival, err := toInt(val)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
res[key] = ival
|
||||||
|
}
|
||||||
|
return res, nil
|
||||||
|
default:
|
||||||
|
return cast.ToStringMapIntE(v)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetStringMapBool returns a map of string to boolean
|
// GetStringMapBool returns a map of string to boolean
|
||||||
@@ -162,5 +207,14 @@ func (p Payload) GetDuration(key string) (time.Duration, error) {
|
|||||||
if !ok {
|
if !ok {
|
||||||
return 0, &errKeyNotFound{key}
|
return 0, &errKeyNotFound{key}
|
||||||
}
|
}
|
||||||
return cast.ToDurationE(v)
|
switch v := v.(type) {
|
||||||
|
case json.Number:
|
||||||
|
val, err := v.Int64()
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return time.Duration(val), nil
|
||||||
|
default:
|
||||||
|
return cast.ToDurationE(v)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@@ -10,6 +10,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/google/go-cmp/cmp"
|
"github.com/google/go-cmp/cmp"
|
||||||
|
"github.com/google/go-cmp/cmp/cmpopts"
|
||||||
h "github.com/hibiken/asynq/internal/asynqtest"
|
h "github.com/hibiken/asynq/internal/asynqtest"
|
||||||
"github.com/hibiken/asynq/internal/base"
|
"github.com/hibiken/asynq/internal/base"
|
||||||
)
|
)
|
||||||
@@ -40,12 +41,11 @@ func TestPayloadString(t *testing.T) {
|
|||||||
|
|
||||||
// encode and then decode task messsage.
|
// encode and then decode task messsage.
|
||||||
in := h.NewTaskMessage("testing", tc.data)
|
in := h.NewTaskMessage("testing", tc.data)
|
||||||
b, err := json.Marshal(in)
|
encoded, err := base.EncodeMessage(in)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
var out base.TaskMessage
|
out, err := base.DecodeMessage(encoded)
|
||||||
err = json.Unmarshal(b, &out)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -85,12 +85,11 @@ func TestPayloadInt(t *testing.T) {
|
|||||||
|
|
||||||
// encode and then decode task messsage.
|
// encode and then decode task messsage.
|
||||||
in := h.NewTaskMessage("testing", tc.data)
|
in := h.NewTaskMessage("testing", tc.data)
|
||||||
b, err := json.Marshal(in)
|
encoded, err := base.EncodeMessage(in)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
var out base.TaskMessage
|
out, err := base.DecodeMessage(encoded)
|
||||||
err = json.Unmarshal(b, &out)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -130,12 +129,11 @@ func TestPayloadFloat64(t *testing.T) {
|
|||||||
|
|
||||||
// encode and then decode task messsage.
|
// encode and then decode task messsage.
|
||||||
in := h.NewTaskMessage("testing", tc.data)
|
in := h.NewTaskMessage("testing", tc.data)
|
||||||
b, err := json.Marshal(in)
|
encoded, err := base.EncodeMessage(in)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
var out base.TaskMessage
|
out, err := base.DecodeMessage(encoded)
|
||||||
err = json.Unmarshal(b, &out)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -175,12 +173,11 @@ func TestPayloadBool(t *testing.T) {
|
|||||||
|
|
||||||
// encode and then decode task messsage.
|
// encode and then decode task messsage.
|
||||||
in := h.NewTaskMessage("testing", tc.data)
|
in := h.NewTaskMessage("testing", tc.data)
|
||||||
b, err := json.Marshal(in)
|
encoded, err := base.EncodeMessage(in)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
var out base.TaskMessage
|
out, err := base.DecodeMessage(encoded)
|
||||||
err = json.Unmarshal(b, &out)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -221,12 +218,11 @@ func TestPayloadStringSlice(t *testing.T) {
|
|||||||
|
|
||||||
// encode and then decode task messsage.
|
// encode and then decode task messsage.
|
||||||
in := h.NewTaskMessage("testing", tc.data)
|
in := h.NewTaskMessage("testing", tc.data)
|
||||||
b, err := json.Marshal(in)
|
encoded, err := base.EncodeMessage(in)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
var out base.TaskMessage
|
out, err := base.DecodeMessage(encoded)
|
||||||
err = json.Unmarshal(b, &out)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -268,12 +264,11 @@ func TestPayloadIntSlice(t *testing.T) {
|
|||||||
|
|
||||||
// encode and then decode task messsage.
|
// encode and then decode task messsage.
|
||||||
in := h.NewTaskMessage("testing", tc.data)
|
in := h.NewTaskMessage("testing", tc.data)
|
||||||
b, err := json.Marshal(in)
|
encoded, err := base.EncodeMessage(in)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
var out base.TaskMessage
|
out, err := base.DecodeMessage(encoded)
|
||||||
err = json.Unmarshal(b, &out)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -315,21 +310,28 @@ func TestPayloadStringMap(t *testing.T) {
|
|||||||
|
|
||||||
// encode and then decode task messsage.
|
// encode and then decode task messsage.
|
||||||
in := h.NewTaskMessage("testing", tc.data)
|
in := h.NewTaskMessage("testing", tc.data)
|
||||||
b, err := json.Marshal(in)
|
encoded, err := base.EncodeMessage(in)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
var out base.TaskMessage
|
out, err := base.DecodeMessage(encoded)
|
||||||
err = json.Unmarshal(b, &out)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
payload = Payload{out.Payload}
|
payload = Payload{out.Payload}
|
||||||
got, err = payload.GetStringMap(tc.key)
|
got, err = payload.GetStringMap(tc.key)
|
||||||
diff = cmp.Diff(got, tc.data[tc.key])
|
ignoreOpt := cmpopts.IgnoreMapEntries(func(key string, val interface{}) bool {
|
||||||
|
switch val.(type) {
|
||||||
|
case json.Number:
|
||||||
|
return true
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
})
|
||||||
|
diff = cmp.Diff(got, tc.data[tc.key], ignoreOpt)
|
||||||
if err != nil || diff != "" {
|
if err != nil || diff != "" {
|
||||||
t.Errorf("With Marshaling: Payload.GetStringMap(%q) = %v, %v, want %v, nil",
|
t.Errorf("With Marshaling: Payload.GetStringMap(%q) = %v, %v, want %v, nil;(-want,+got)\n%s",
|
||||||
tc.key, got, err, tc.data[tc.key])
|
tc.key, got, err, tc.data[tc.key], diff)
|
||||||
}
|
}
|
||||||
|
|
||||||
// access non-existent key.
|
// access non-existent key.
|
||||||
@@ -362,12 +364,11 @@ func TestPayloadStringMapString(t *testing.T) {
|
|||||||
|
|
||||||
// encode and then decode task messsage.
|
// encode and then decode task messsage.
|
||||||
in := h.NewTaskMessage("testing", tc.data)
|
in := h.NewTaskMessage("testing", tc.data)
|
||||||
b, err := json.Marshal(in)
|
encoded, err := base.EncodeMessage(in)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
var out base.TaskMessage
|
out, err := base.DecodeMessage(encoded)
|
||||||
err = json.Unmarshal(b, &out)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -413,12 +414,11 @@ func TestPayloadStringMapStringSlice(t *testing.T) {
|
|||||||
|
|
||||||
// encode and then decode task messsage.
|
// encode and then decode task messsage.
|
||||||
in := h.NewTaskMessage("testing", tc.data)
|
in := h.NewTaskMessage("testing", tc.data)
|
||||||
b, err := json.Marshal(in)
|
encoded, err := base.EncodeMessage(in)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
var out base.TaskMessage
|
out, err := base.DecodeMessage(encoded)
|
||||||
err = json.Unmarshal(b, &out)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -465,12 +465,11 @@ func TestPayloadStringMapInt(t *testing.T) {
|
|||||||
|
|
||||||
// encode and then decode task messsage.
|
// encode and then decode task messsage.
|
||||||
in := h.NewTaskMessage("testing", tc.data)
|
in := h.NewTaskMessage("testing", tc.data)
|
||||||
b, err := json.Marshal(in)
|
encoded, err := base.EncodeMessage(in)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
var out base.TaskMessage
|
out, err := base.DecodeMessage(encoded)
|
||||||
err = json.Unmarshal(b, &out)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -517,12 +516,11 @@ func TestPayloadStringMapBool(t *testing.T) {
|
|||||||
|
|
||||||
// encode and then decode task messsage.
|
// encode and then decode task messsage.
|
||||||
in := h.NewTaskMessage("testing", tc.data)
|
in := h.NewTaskMessage("testing", tc.data)
|
||||||
b, err := json.Marshal(in)
|
encoded, err := base.EncodeMessage(in)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
var out base.TaskMessage
|
out, err := base.DecodeMessage(encoded)
|
||||||
err = json.Unmarshal(b, &out)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -564,12 +562,11 @@ func TestPayloadTime(t *testing.T) {
|
|||||||
|
|
||||||
// encode and then decode task messsage.
|
// encode and then decode task messsage.
|
||||||
in := h.NewTaskMessage("testing", tc.data)
|
in := h.NewTaskMessage("testing", tc.data)
|
||||||
b, err := json.Marshal(in)
|
encoded, err := base.EncodeMessage(in)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
var out base.TaskMessage
|
out, err := base.DecodeMessage(encoded)
|
||||||
err = json.Unmarshal(b, &out)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -611,12 +608,11 @@ func TestPayloadDuration(t *testing.T) {
|
|||||||
|
|
||||||
// encode and then decode task messsage.
|
// encode and then decode task messsage.
|
||||||
in := h.NewTaskMessage("testing", tc.data)
|
in := h.NewTaskMessage("testing", tc.data)
|
||||||
b, err := json.Marshal(in)
|
encoded, err := base.EncodeMessage(in)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
var out base.TaskMessage
|
out, err := base.DecodeMessage(encoded)
|
||||||
err = json.Unmarshal(b, &out)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
73
processor.go
73
processor.go
@@ -22,8 +22,6 @@ type processor struct {
|
|||||||
logger *log.Logger
|
logger *log.Logger
|
||||||
broker base.Broker
|
broker base.Broker
|
||||||
|
|
||||||
ss *base.ServerState
|
|
||||||
|
|
||||||
handler Handler
|
handler Handler
|
||||||
|
|
||||||
queueConfig map[string]int
|
queueConfig map[string]int
|
||||||
@@ -60,6 +58,9 @@ type processor struct {
|
|||||||
|
|
||||||
// cancelations is a set of cancel functions for all in-progress tasks.
|
// cancelations is a set of cancel functions for all in-progress tasks.
|
||||||
cancelations *base.Cancelations
|
cancelations *base.Cancelations
|
||||||
|
|
||||||
|
starting chan<- *base.TaskMessage
|
||||||
|
finished chan<- *base.TaskMessage
|
||||||
}
|
}
|
||||||
|
|
||||||
type retryDelayFunc func(n int, err error, task *Task) time.Duration
|
type retryDelayFunc func(n int, err error, task *Task) time.Duration
|
||||||
@@ -67,38 +68,42 @@ type retryDelayFunc func(n int, err error, task *Task) time.Duration
|
|||||||
type processorParams struct {
|
type processorParams struct {
|
||||||
logger *log.Logger
|
logger *log.Logger
|
||||||
broker base.Broker
|
broker base.Broker
|
||||||
ss *base.ServerState
|
|
||||||
retryDelayFunc retryDelayFunc
|
retryDelayFunc retryDelayFunc
|
||||||
syncCh chan<- *syncRequest
|
syncCh chan<- *syncRequest
|
||||||
cancelations *base.Cancelations
|
cancelations *base.Cancelations
|
||||||
|
concurrency int
|
||||||
|
queues map[string]int
|
||||||
|
strictPriority bool
|
||||||
errHandler ErrorHandler
|
errHandler ErrorHandler
|
||||||
shutdownTimeout time.Duration
|
shutdownTimeout time.Duration
|
||||||
|
starting chan<- *base.TaskMessage
|
||||||
|
finished chan<- *base.TaskMessage
|
||||||
}
|
}
|
||||||
|
|
||||||
// newProcessor constructs a new processor.
|
// newProcessor constructs a new processor.
|
||||||
func newProcessor(params processorParams) *processor {
|
func newProcessor(params processorParams) *processor {
|
||||||
info := params.ss.GetInfo()
|
queues := normalizeQueues(params.queues)
|
||||||
qcfg := normalizeQueueCfg(info.Queues)
|
|
||||||
orderedQueues := []string(nil)
|
orderedQueues := []string(nil)
|
||||||
if info.StrictPriority {
|
if params.strictPriority {
|
||||||
orderedQueues = sortByPriority(qcfg)
|
orderedQueues = sortByPriority(queues)
|
||||||
}
|
}
|
||||||
return &processor{
|
return &processor{
|
||||||
logger: params.logger,
|
logger: params.logger,
|
||||||
broker: params.broker,
|
broker: params.broker,
|
||||||
ss: params.ss,
|
queueConfig: queues,
|
||||||
queueConfig: qcfg,
|
|
||||||
orderedQueues: orderedQueues,
|
orderedQueues: orderedQueues,
|
||||||
retryDelayFunc: params.retryDelayFunc,
|
retryDelayFunc: params.retryDelayFunc,
|
||||||
syncRequestCh: params.syncCh,
|
syncRequestCh: params.syncCh,
|
||||||
cancelations: params.cancelations,
|
cancelations: params.cancelations,
|
||||||
errLogLimiter: rate.NewLimiter(rate.Every(3*time.Second), 1),
|
errLogLimiter: rate.NewLimiter(rate.Every(3*time.Second), 1),
|
||||||
sema: make(chan struct{}, info.Concurrency),
|
sema: make(chan struct{}, params.concurrency),
|
||||||
done: make(chan struct{}),
|
done: make(chan struct{}),
|
||||||
abort: make(chan struct{}),
|
abort: make(chan struct{}),
|
||||||
quit: make(chan struct{}),
|
quit: make(chan struct{}),
|
||||||
errHandler: params.errHandler,
|
errHandler: params.errHandler,
|
||||||
handler: HandlerFunc(func(ctx context.Context, t *Task) error { return fmt.Errorf("handler not set") }),
|
handler: HandlerFunc(func(ctx context.Context, t *Task) error { return fmt.Errorf("handler not set") }),
|
||||||
|
starting: params.starting,
|
||||||
|
finished: params.finished,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -132,13 +137,9 @@ func (p *processor) terminate() {
|
|||||||
p.sema <- struct{}{}
|
p.sema <- struct{}{}
|
||||||
}
|
}
|
||||||
p.logger.Info("All workers have finished")
|
p.logger.Info("All workers have finished")
|
||||||
p.restore() // move any unfinished tasks back to the queue.
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *processor) start(wg *sync.WaitGroup) {
|
func (p *processor) start(wg *sync.WaitGroup) {
|
||||||
// NOTE: The call to "restore" needs to complete before starting
|
|
||||||
// the processor goroutine.
|
|
||||||
p.restore()
|
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
@@ -161,14 +162,12 @@ func (p *processor) exec() {
|
|||||||
msg, err := p.broker.Dequeue(qnames...)
|
msg, err := p.broker.Dequeue(qnames...)
|
||||||
switch {
|
switch {
|
||||||
case err == rdb.ErrNoProcessableTask:
|
case err == rdb.ErrNoProcessableTask:
|
||||||
// queues are empty, this is a normal behavior.
|
|
||||||
if len(qnames) > 1 {
|
|
||||||
// sleep to avoid slamming redis and let scheduler move tasks into queues.
|
|
||||||
// Note: With multiple queues, we are not using blocking pop operation and
|
|
||||||
// polling queues instead. This adds significant load to redis.
|
|
||||||
time.Sleep(time.Second)
|
|
||||||
}
|
|
||||||
p.logger.Debug("All queues are empty")
|
p.logger.Debug("All queues are empty")
|
||||||
|
// Queues are empty, this is a normal behavior.
|
||||||
|
// Sleep to avoid slamming redis and let scheduler move tasks into queues.
|
||||||
|
// Note: We are not using blocking pop operation and polling queues instead.
|
||||||
|
// This adds significant load to redis.
|
||||||
|
time.Sleep(time.Second)
|
||||||
return
|
return
|
||||||
case err != nil:
|
case err != nil:
|
||||||
if p.errLogLimiter.Allow() {
|
if p.errLogLimiter.Allow() {
|
||||||
@@ -183,10 +182,10 @@ func (p *processor) exec() {
|
|||||||
p.requeue(msg)
|
p.requeue(msg)
|
||||||
return
|
return
|
||||||
case p.sema <- struct{}{}: // acquire token
|
case p.sema <- struct{}{}: // acquire token
|
||||||
p.ss.AddWorkerStats(msg, time.Now())
|
p.starting <- msg
|
||||||
go func() {
|
go func() {
|
||||||
defer func() {
|
defer func() {
|
||||||
p.ss.DeleteWorkerStats(msg)
|
p.finished <- msg
|
||||||
<-p.sema // release token
|
<-p.sema // release token
|
||||||
}()
|
}()
|
||||||
|
|
||||||
@@ -203,8 +202,9 @@ func (p *processor) exec() {
|
|||||||
|
|
||||||
select {
|
select {
|
||||||
case <-p.quit:
|
case <-p.quit:
|
||||||
// time is up, quit this worker goroutine.
|
// time is up, push the message back to queue and quit this worker goroutine.
|
||||||
p.logger.Warnf("Quitting worker. task id=%s", msg.ID)
|
p.logger.Warnf("Quitting worker. task id=%s", msg.ID)
|
||||||
|
p.requeue(msg)
|
||||||
return
|
return
|
||||||
case resErr := <-resCh:
|
case resErr := <-resCh:
|
||||||
// Note: One of three things should happen.
|
// Note: One of three things should happen.
|
||||||
@@ -228,29 +228,19 @@ func (p *processor) exec() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// restore moves all tasks from "in-progress" back to queue
|
|
||||||
// to restore all unfinished tasks.
|
|
||||||
func (p *processor) restore() {
|
|
||||||
n, err := p.broker.RequeueAll()
|
|
||||||
if err != nil {
|
|
||||||
p.logger.Errorf("Could not restore unfinished tasks: %v", err)
|
|
||||||
}
|
|
||||||
if n > 0 {
|
|
||||||
p.logger.Infof("Restored %d unfinished tasks back to queue", n)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *processor) requeue(msg *base.TaskMessage) {
|
func (p *processor) requeue(msg *base.TaskMessage) {
|
||||||
err := p.broker.Requeue(msg)
|
err := p.broker.Requeue(msg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
p.logger.Errorf("Could not push task id=%s back to queue: %v", msg.ID, err)
|
p.logger.Errorf("Could not push task id=%s back to queue: %v", msg.ID, err)
|
||||||
|
} else {
|
||||||
|
p.logger.Infof("Pushed task id=%s back to queue", msg.ID)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *processor) markAsDone(msg *base.TaskMessage) {
|
func (p *processor) markAsDone(msg *base.TaskMessage) {
|
||||||
err := p.broker.Done(msg)
|
err := p.broker.Done(msg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errMsg := fmt.Sprintf("Could not remove task id=%s from %q", msg.ID, base.InProgressQueue)
|
errMsg := fmt.Sprintf("Could not remove task id=%s type=%q from %q err: %+v", msg.ID, msg.Type, base.InProgressQueue, err)
|
||||||
p.logger.Warnf("%s; Will retry syncing", errMsg)
|
p.logger.Warnf("%s; Will retry syncing", errMsg)
|
||||||
p.syncRequestCh <- &syncRequest{
|
p.syncRequestCh <- &syncRequest{
|
||||||
fn: func() error {
|
fn: func() error {
|
||||||
@@ -374,16 +364,15 @@ func (x byPriority) Len() int { return len(x) }
|
|||||||
func (x byPriority) Less(i, j int) bool { return x[i].priority < x[j].priority }
|
func (x byPriority) Less(i, j int) bool { return x[i].priority < x[j].priority }
|
||||||
func (x byPriority) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
func (x byPriority) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||||
|
|
||||||
// normalizeQueueCfg divides priority numbers by their
|
// normalizeQueues divides priority numbers by their greatest common divisor.
|
||||||
// greatest common divisor.
|
func normalizeQueues(queues map[string]int) map[string]int {
|
||||||
func normalizeQueueCfg(queueCfg map[string]int) map[string]int {
|
|
||||||
var xs []int
|
var xs []int
|
||||||
for _, x := range queueCfg {
|
for _, x := range queues {
|
||||||
xs = append(xs, x)
|
xs = append(xs, x)
|
||||||
}
|
}
|
||||||
d := gcd(xs...)
|
d := gcd(xs...)
|
||||||
res := make(map[string]int)
|
res := make(map[string]int)
|
||||||
for q, x := range queueCfg {
|
for q, x := range queues {
|
||||||
res[q] = x / d
|
res[q] = x / d
|
||||||
}
|
}
|
||||||
return res
|
return res
|
||||||
|
@@ -19,6 +19,29 @@ import (
|
|||||||
"github.com/hibiken/asynq/internal/rdb"
|
"github.com/hibiken/asynq/internal/rdb"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// fakeHeartbeater receives from starting and finished channels and do nothing.
|
||||||
|
func fakeHeartbeater(starting, finished <-chan *base.TaskMessage, done <-chan struct{}) {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-starting:
|
||||||
|
case <-finished:
|
||||||
|
case <-done:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// fakeSyncer receives from sync channel and do nothing.
|
||||||
|
func fakeSyncer(syncCh <-chan *syncRequest, done <-chan struct{}) {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-syncCh:
|
||||||
|
case <-done:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestProcessorSuccess(t *testing.T) {
|
func TestProcessorSuccess(t *testing.T) {
|
||||||
r := setup(t)
|
r := setup(t)
|
||||||
rdbClient := rdb.NewRDB(r)
|
rdbClient := rdb.NewRDB(r)
|
||||||
@@ -63,16 +86,26 @@ func TestProcessorSuccess(t *testing.T) {
|
|||||||
processed = append(processed, task)
|
processed = append(processed, task)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
ss := base.NewServerState("localhost", 1234, 10, defaultQueueConfig, false)
|
starting := make(chan *base.TaskMessage)
|
||||||
|
finished := make(chan *base.TaskMessage)
|
||||||
|
syncCh := make(chan *syncRequest)
|
||||||
|
done := make(chan struct{})
|
||||||
|
defer func() { close(done) }()
|
||||||
|
go fakeHeartbeater(starting, finished, done)
|
||||||
|
go fakeSyncer(syncCh, done)
|
||||||
p := newProcessor(processorParams{
|
p := newProcessor(processorParams{
|
||||||
logger: testLogger,
|
logger: testLogger,
|
||||||
broker: rdbClient,
|
broker: rdbClient,
|
||||||
ss: ss,
|
|
||||||
retryDelayFunc: defaultDelayFunc,
|
retryDelayFunc: defaultDelayFunc,
|
||||||
syncCh: nil,
|
syncCh: syncCh,
|
||||||
cancelations: base.NewCancelations(),
|
cancelations: base.NewCancelations(),
|
||||||
|
concurrency: 10,
|
||||||
|
queues: defaultQueueConfig,
|
||||||
|
strictPriority: false,
|
||||||
errHandler: nil,
|
errHandler: nil,
|
||||||
shutdownTimeout: defaultShutdownTimeout,
|
shutdownTimeout: defaultShutdownTimeout,
|
||||||
|
starting: starting,
|
||||||
|
finished: finished,
|
||||||
})
|
})
|
||||||
p.handler = HandlerFunc(handler)
|
p.handler = HandlerFunc(handler)
|
||||||
|
|
||||||
@@ -84,16 +117,90 @@ func TestProcessorSuccess(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
time.Sleep(time.Second) // wait for one second to allow all enqueued tasks to be processed.
|
time.Sleep(2 * time.Second) // wait for two second to allow all enqueued tasks to be processed.
|
||||||
p.terminate()
|
|
||||||
|
|
||||||
if diff := cmp.Diff(tc.wantProcessed, processed, sortTaskOpt, cmp.AllowUnexported(Payload{})); diff != "" {
|
|
||||||
t.Errorf("mismatch found in processed tasks; (-want, +got)\n%s", diff)
|
|
||||||
}
|
|
||||||
|
|
||||||
if l := r.LLen(base.InProgressQueue).Val(); l != 0 {
|
if l := r.LLen(base.InProgressQueue).Val(); l != 0 {
|
||||||
t.Errorf("%q has %d tasks, want 0", base.InProgressQueue, l)
|
t.Errorf("%q has %d tasks, want 0", base.InProgressQueue, l)
|
||||||
}
|
}
|
||||||
|
p.terminate()
|
||||||
|
|
||||||
|
mu.Lock()
|
||||||
|
if diff := cmp.Diff(tc.wantProcessed, processed, sortTaskOpt, cmp.AllowUnexported(Payload{})); diff != "" {
|
||||||
|
t.Errorf("mismatch found in processed tasks; (-want, +got)\n%s", diff)
|
||||||
|
}
|
||||||
|
mu.Unlock()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// https://github.com/hibiken/asynq/issues/166
|
||||||
|
func TestProcessTasksWithLargeNumberInPayload(t *testing.T) {
|
||||||
|
r := setup(t)
|
||||||
|
rdbClient := rdb.NewRDB(r)
|
||||||
|
|
||||||
|
m1 := h.NewTaskMessage("large_number", map[string]interface{}{"data": 111111111111111111})
|
||||||
|
t1 := NewTask(m1.Type, m1.Payload)
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
enqueued []*base.TaskMessage // initial default queue state
|
||||||
|
wantProcessed []*Task // tasks to be processed at the end
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
enqueued: []*base.TaskMessage{m1},
|
||||||
|
wantProcessed: []*Task{t1},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
h.FlushDB(t, r) // clean up db before each test case.
|
||||||
|
h.SeedEnqueuedQueue(t, r, tc.enqueued) // initialize default queue.
|
||||||
|
|
||||||
|
var mu sync.Mutex
|
||||||
|
var processed []*Task
|
||||||
|
handler := func(ctx context.Context, task *Task) error {
|
||||||
|
mu.Lock()
|
||||||
|
defer mu.Unlock()
|
||||||
|
if data, err := task.Payload.GetInt("data"); err != nil {
|
||||||
|
t.Errorf("coult not get data from payload: %v", err)
|
||||||
|
} else {
|
||||||
|
t.Logf("data == %d", data)
|
||||||
|
}
|
||||||
|
processed = append(processed, task)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
starting := make(chan *base.TaskMessage)
|
||||||
|
finished := make(chan *base.TaskMessage)
|
||||||
|
syncCh := make(chan *syncRequest)
|
||||||
|
done := make(chan struct{})
|
||||||
|
defer func() { close(done) }()
|
||||||
|
go fakeHeartbeater(starting, finished, done)
|
||||||
|
go fakeSyncer(syncCh, done)
|
||||||
|
p := newProcessor(processorParams{
|
||||||
|
logger: testLogger,
|
||||||
|
broker: rdbClient,
|
||||||
|
retryDelayFunc: defaultDelayFunc,
|
||||||
|
syncCh: syncCh,
|
||||||
|
cancelations: base.NewCancelations(),
|
||||||
|
concurrency: 10,
|
||||||
|
queues: defaultQueueConfig,
|
||||||
|
strictPriority: false,
|
||||||
|
errHandler: nil,
|
||||||
|
shutdownTimeout: defaultShutdownTimeout,
|
||||||
|
starting: starting,
|
||||||
|
finished: finished,
|
||||||
|
})
|
||||||
|
p.handler = HandlerFunc(handler)
|
||||||
|
|
||||||
|
p.start(&sync.WaitGroup{})
|
||||||
|
time.Sleep(2 * time.Second) // wait for two second to allow all enqueued tasks to be processed.
|
||||||
|
if l := r.LLen(base.InProgressQueue).Val(); l != 0 {
|
||||||
|
t.Errorf("%q has %d tasks, want 0", base.InProgressQueue, l)
|
||||||
|
}
|
||||||
|
p.terminate()
|
||||||
|
|
||||||
|
mu.Lock()
|
||||||
|
if diff := cmp.Diff(tc.wantProcessed, processed, sortTaskOpt, cmpopts.IgnoreUnexported(Payload{})); diff != "" {
|
||||||
|
t.Errorf("mismatch found in processed tasks; (-want, +got)\n%s", diff)
|
||||||
|
}
|
||||||
|
mu.Unlock()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -140,7 +247,7 @@ func TestProcessorRetry(t *testing.T) {
|
|||||||
handler: HandlerFunc(func(ctx context.Context, task *Task) error {
|
handler: HandlerFunc(func(ctx context.Context, task *Task) error {
|
||||||
return fmt.Errorf(errMsg)
|
return fmt.Errorf(errMsg)
|
||||||
}),
|
}),
|
||||||
wait: time.Second,
|
wait: 2 * time.Second,
|
||||||
wantRetry: []h.ZSetEntry{
|
wantRetry: []h.ZSetEntry{
|
||||||
{Msg: &r2, Score: float64(now.Add(time.Minute).Unix())},
|
{Msg: &r2, Score: float64(now.Add(time.Minute).Unix())},
|
||||||
{Msg: &r3, Score: float64(now.Add(time.Minute).Unix())},
|
{Msg: &r3, Score: float64(now.Add(time.Minute).Unix())},
|
||||||
@@ -168,16 +275,24 @@ func TestProcessorRetry(t *testing.T) {
|
|||||||
defer mu.Unlock()
|
defer mu.Unlock()
|
||||||
n++
|
n++
|
||||||
}
|
}
|
||||||
ss := base.NewServerState("localhost", 1234, 10, defaultQueueConfig, false)
|
starting := make(chan *base.TaskMessage)
|
||||||
|
finished := make(chan *base.TaskMessage)
|
||||||
|
done := make(chan struct{})
|
||||||
|
defer func() { close(done) }()
|
||||||
|
go fakeHeartbeater(starting, finished, done)
|
||||||
p := newProcessor(processorParams{
|
p := newProcessor(processorParams{
|
||||||
logger: testLogger,
|
logger: testLogger,
|
||||||
broker: rdbClient,
|
broker: rdbClient,
|
||||||
ss: ss,
|
|
||||||
retryDelayFunc: delayFunc,
|
retryDelayFunc: delayFunc,
|
||||||
syncCh: nil,
|
syncCh: nil,
|
||||||
cancelations: base.NewCancelations(),
|
cancelations: base.NewCancelations(),
|
||||||
|
concurrency: 10,
|
||||||
|
queues: defaultQueueConfig,
|
||||||
|
strictPriority: false,
|
||||||
errHandler: ErrorHandlerFunc(errHandler),
|
errHandler: ErrorHandlerFunc(errHandler),
|
||||||
shutdownTimeout: defaultShutdownTimeout,
|
shutdownTimeout: defaultShutdownTimeout,
|
||||||
|
starting: starting,
|
||||||
|
finished: finished,
|
||||||
})
|
})
|
||||||
p.handler = tc.handler
|
p.handler = tc.handler
|
||||||
|
|
||||||
@@ -189,7 +304,7 @@ func TestProcessorRetry(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
time.Sleep(tc.wait)
|
time.Sleep(tc.wait) // FIXME: This makes test flaky.
|
||||||
p.terminate()
|
p.terminate()
|
||||||
|
|
||||||
cmpOpt := cmpopts.EquateApprox(0, float64(time.Second)) // allow up to a second difference in zset score
|
cmpOpt := cmpopts.EquateApprox(0, float64(time.Second)) // allow up to a second difference in zset score
|
||||||
@@ -241,16 +356,24 @@ func TestProcessorQueues(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
ss := base.NewServerState("localhost", 1234, 10, tc.queueCfg, false)
|
starting := make(chan *base.TaskMessage)
|
||||||
|
finished := make(chan *base.TaskMessage)
|
||||||
|
done := make(chan struct{})
|
||||||
|
defer func() { close(done) }()
|
||||||
|
go fakeHeartbeater(starting, finished, done)
|
||||||
p := newProcessor(processorParams{
|
p := newProcessor(processorParams{
|
||||||
logger: testLogger,
|
logger: testLogger,
|
||||||
broker: nil,
|
broker: nil,
|
||||||
ss: ss,
|
|
||||||
retryDelayFunc: defaultDelayFunc,
|
retryDelayFunc: defaultDelayFunc,
|
||||||
syncCh: nil,
|
syncCh: nil,
|
||||||
cancelations: base.NewCancelations(),
|
cancelations: base.NewCancelations(),
|
||||||
|
concurrency: 10,
|
||||||
|
queues: tc.queueCfg,
|
||||||
|
strictPriority: false,
|
||||||
errHandler: nil,
|
errHandler: nil,
|
||||||
shutdownTimeout: defaultShutdownTimeout,
|
shutdownTimeout: defaultShutdownTimeout,
|
||||||
|
starting: starting,
|
||||||
|
finished: finished,
|
||||||
})
|
})
|
||||||
got := p.queues()
|
got := p.queues()
|
||||||
if diff := cmp.Diff(tc.want, got, sortOpt); diff != "" {
|
if diff := cmp.Diff(tc.want, got, sortOpt); diff != "" {
|
||||||
@@ -316,17 +439,24 @@ func TestProcessorWithStrictPriority(t *testing.T) {
|
|||||||
base.DefaultQueueName: 2,
|
base.DefaultQueueName: 2,
|
||||||
"low": 1,
|
"low": 1,
|
||||||
}
|
}
|
||||||
// Note: Set concurrency to 1 to make sure tasks are processed one at a time.
|
starting := make(chan *base.TaskMessage)
|
||||||
ss := base.NewServerState("localhost", 1234, 1 /* concurrency */, queueCfg, true /*strict*/)
|
finished := make(chan *base.TaskMessage)
|
||||||
|
done := make(chan struct{})
|
||||||
|
defer func() { close(done) }()
|
||||||
|
go fakeHeartbeater(starting, finished, done)
|
||||||
p := newProcessor(processorParams{
|
p := newProcessor(processorParams{
|
||||||
logger: testLogger,
|
logger: testLogger,
|
||||||
broker: rdbClient,
|
broker: rdbClient,
|
||||||
ss: ss,
|
|
||||||
retryDelayFunc: defaultDelayFunc,
|
retryDelayFunc: defaultDelayFunc,
|
||||||
syncCh: nil,
|
syncCh: nil,
|
||||||
cancelations: base.NewCancelations(),
|
cancelations: base.NewCancelations(),
|
||||||
|
concurrency: 1, // Set concurrency to 1 to make sure tasks are processed one at a time.
|
||||||
|
queues: queueCfg,
|
||||||
|
strictPriority: true,
|
||||||
errHandler: nil,
|
errHandler: nil,
|
||||||
shutdownTimeout: defaultShutdownTimeout,
|
shutdownTimeout: defaultShutdownTimeout,
|
||||||
|
starting: starting,
|
||||||
|
finished: finished,
|
||||||
})
|
})
|
||||||
p.handler = HandlerFunc(handler)
|
p.handler = HandlerFunc(handler)
|
||||||
|
|
||||||
@@ -412,7 +542,7 @@ func TestGCD(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNormalizeQueueCfg(t *testing.T) {
|
func TestNormalizeQueues(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
input map[string]int
|
input map[string]int
|
||||||
want map[string]int
|
want map[string]int
|
||||||
@@ -462,9 +592,9 @@ func TestNormalizeQueueCfg(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
got := normalizeQueueCfg(tc.input)
|
got := normalizeQueues(tc.input)
|
||||||
if diff := cmp.Diff(tc.want, got); diff != "" {
|
if diff := cmp.Diff(tc.want, got); diff != "" {
|
||||||
t.Errorf("normalizeQueueCfg(%v) = %v, want %v; (-want, +got):\n%s",
|
t.Errorf("normalizeQueues(%v) = %v, want %v; (-want, +got):\n%s",
|
||||||
tc.input, got, tc.want, diff)
|
tc.input, got, tc.want, diff)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
11
scheduler.go
11
scheduler.go
@@ -21,29 +21,20 @@ type scheduler struct {
|
|||||||
|
|
||||||
// poll interval on average
|
// poll interval on average
|
||||||
avgInterval time.Duration
|
avgInterval time.Duration
|
||||||
|
|
||||||
// list of queues to move the tasks into.
|
|
||||||
qnames []string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type schedulerParams struct {
|
type schedulerParams struct {
|
||||||
logger *log.Logger
|
logger *log.Logger
|
||||||
broker base.Broker
|
broker base.Broker
|
||||||
interval time.Duration
|
interval time.Duration
|
||||||
queues map[string]int
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func newScheduler(params schedulerParams) *scheduler {
|
func newScheduler(params schedulerParams) *scheduler {
|
||||||
var qnames []string
|
|
||||||
for q := range params.queues {
|
|
||||||
qnames = append(qnames, q)
|
|
||||||
}
|
|
||||||
return &scheduler{
|
return &scheduler{
|
||||||
logger: params.logger,
|
logger: params.logger,
|
||||||
broker: params.broker,
|
broker: params.broker,
|
||||||
done: make(chan struct{}),
|
done: make(chan struct{}),
|
||||||
avgInterval: params.interval,
|
avgInterval: params.interval,
|
||||||
qnames: qnames,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -71,7 +62,7 @@ func (s *scheduler) start(wg *sync.WaitGroup) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *scheduler) exec() {
|
func (s *scheduler) exec() {
|
||||||
if err := s.broker.CheckAndEnqueue(s.qnames...); err != nil {
|
if err := s.broker.CheckAndEnqueue(); err != nil {
|
||||||
s.logger.Errorf("Could not enqueue scheduled tasks: %v", err)
|
s.logger.Errorf("Could not enqueue scheduled tasks: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -23,7 +23,6 @@ func TestScheduler(t *testing.T) {
|
|||||||
logger: testLogger,
|
logger: testLogger,
|
||||||
broker: rdbClient,
|
broker: rdbClient,
|
||||||
interval: pollInterval,
|
interval: pollInterval,
|
||||||
queues: defaultQueueConfig,
|
|
||||||
})
|
})
|
||||||
t1 := h.NewTaskMessage("gen_thumbnail", nil)
|
t1 := h.NewTaskMessage("gen_thumbnail", nil)
|
||||||
t2 := h.NewTaskMessage("send_email", nil)
|
t2 := h.NewTaskMessage("send_email", nil)
|
||||||
|
48
server.go
48
server.go
@@ -10,7 +10,6 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"os"
|
|
||||||
"runtime"
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
@@ -34,12 +33,12 @@ import (
|
|||||||
// (e.g., queue size reaches a certain limit, or the task has been in the
|
// (e.g., queue size reaches a certain limit, or the task has been in the
|
||||||
// queue for a certain amount of time).
|
// queue for a certain amount of time).
|
||||||
type Server struct {
|
type Server struct {
|
||||||
ss *base.ServerState
|
|
||||||
|
|
||||||
logger *log.Logger
|
logger *log.Logger
|
||||||
|
|
||||||
broker base.Broker
|
broker base.Broker
|
||||||
|
|
||||||
|
status *base.ServerStatus
|
||||||
|
|
||||||
// wait group to wait for all goroutines to finish.
|
// wait group to wait for all goroutines to finish.
|
||||||
wg sync.WaitGroup
|
wg sync.WaitGroup
|
||||||
scheduler *scheduler
|
scheduler *scheduler
|
||||||
@@ -283,15 +282,11 @@ func NewServer(r RedisConnOpt, cfg Config) *Server {
|
|||||||
}
|
}
|
||||||
logger.SetLevel(toInternalLogLevel(loglevel))
|
logger.SetLevel(toInternalLogLevel(loglevel))
|
||||||
|
|
||||||
host, err := os.Hostname()
|
|
||||||
if err != nil {
|
|
||||||
host = "unknown-host"
|
|
||||||
}
|
|
||||||
pid := os.Getpid()
|
|
||||||
|
|
||||||
rdb := rdb.NewRDB(createRedisClient(r))
|
rdb := rdb.NewRDB(createRedisClient(r))
|
||||||
ss := base.NewServerState(host, pid, n, queues, cfg.StrictPriority)
|
starting := make(chan *base.TaskMessage)
|
||||||
|
finished := make(chan *base.TaskMessage)
|
||||||
syncCh := make(chan *syncRequest)
|
syncCh := make(chan *syncRequest)
|
||||||
|
status := base.NewServerStatus(base.StatusIdle)
|
||||||
cancels := base.NewCancelations()
|
cancels := base.NewCancelations()
|
||||||
|
|
||||||
syncer := newSyncer(syncerParams{
|
syncer := newSyncer(syncerParams{
|
||||||
@@ -300,16 +295,20 @@ func NewServer(r RedisConnOpt, cfg Config) *Server {
|
|||||||
interval: 5 * time.Second,
|
interval: 5 * time.Second,
|
||||||
})
|
})
|
||||||
heartbeater := newHeartbeater(heartbeaterParams{
|
heartbeater := newHeartbeater(heartbeaterParams{
|
||||||
logger: logger,
|
logger: logger,
|
||||||
broker: rdb,
|
broker: rdb,
|
||||||
serverState: ss,
|
interval: 5 * time.Second,
|
||||||
interval: 5 * time.Second,
|
concurrency: n,
|
||||||
|
queues: queues,
|
||||||
|
strictPriority: cfg.StrictPriority,
|
||||||
|
status: status,
|
||||||
|
starting: starting,
|
||||||
|
finished: finished,
|
||||||
})
|
})
|
||||||
scheduler := newScheduler(schedulerParams{
|
scheduler := newScheduler(schedulerParams{
|
||||||
logger: logger,
|
logger: logger,
|
||||||
broker: rdb,
|
broker: rdb,
|
||||||
interval: 5 * time.Second,
|
interval: 5 * time.Second,
|
||||||
queues: queues,
|
|
||||||
})
|
})
|
||||||
subscriber := newSubscriber(subscriberParams{
|
subscriber := newSubscriber(subscriberParams{
|
||||||
logger: logger,
|
logger: logger,
|
||||||
@@ -319,17 +318,21 @@ func NewServer(r RedisConnOpt, cfg Config) *Server {
|
|||||||
processor := newProcessor(processorParams{
|
processor := newProcessor(processorParams{
|
||||||
logger: logger,
|
logger: logger,
|
||||||
broker: rdb,
|
broker: rdb,
|
||||||
ss: ss,
|
|
||||||
retryDelayFunc: delayFunc,
|
retryDelayFunc: delayFunc,
|
||||||
syncCh: syncCh,
|
syncCh: syncCh,
|
||||||
cancelations: cancels,
|
cancelations: cancels,
|
||||||
|
concurrency: n,
|
||||||
|
queues: queues,
|
||||||
|
strictPriority: cfg.StrictPriority,
|
||||||
errHandler: cfg.ErrorHandler,
|
errHandler: cfg.ErrorHandler,
|
||||||
shutdownTimeout: shutdownTimeout,
|
shutdownTimeout: shutdownTimeout,
|
||||||
|
starting: starting,
|
||||||
|
finished: finished,
|
||||||
})
|
})
|
||||||
return &Server{
|
return &Server{
|
||||||
ss: ss,
|
|
||||||
logger: logger,
|
logger: logger,
|
||||||
broker: rdb,
|
broker: rdb,
|
||||||
|
status: status,
|
||||||
scheduler: scheduler,
|
scheduler: scheduler,
|
||||||
processor: processor,
|
processor: processor,
|
||||||
syncer: syncer,
|
syncer: syncer,
|
||||||
@@ -390,13 +393,13 @@ func (srv *Server) Start(handler Handler) error {
|
|||||||
if handler == nil {
|
if handler == nil {
|
||||||
return fmt.Errorf("asynq: server cannot run with nil handler")
|
return fmt.Errorf("asynq: server cannot run with nil handler")
|
||||||
}
|
}
|
||||||
switch srv.ss.Status() {
|
switch srv.status.Get() {
|
||||||
case base.StatusRunning:
|
case base.StatusRunning:
|
||||||
return fmt.Errorf("asynq: the server is already running")
|
return fmt.Errorf("asynq: the server is already running")
|
||||||
case base.StatusStopped:
|
case base.StatusStopped:
|
||||||
return ErrServerStopped
|
return ErrServerStopped
|
||||||
}
|
}
|
||||||
srv.ss.SetStatus(base.StatusRunning)
|
srv.status.Set(base.StatusRunning)
|
||||||
srv.processor.handler = handler
|
srv.processor.handler = handler
|
||||||
|
|
||||||
srv.logger.Info("Starting processing")
|
srv.logger.Info("Starting processing")
|
||||||
@@ -414,7 +417,7 @@ func (srv *Server) Start(handler Handler) error {
|
|||||||
// active workers to finish processing tasks for duration specified in Config.ShutdownTimeout.
|
// active workers to finish processing tasks for duration specified in Config.ShutdownTimeout.
|
||||||
// If worker didn't finish processing a task during the timeout, the task will be pushed back to Redis.
|
// If worker didn't finish processing a task during the timeout, the task will be pushed back to Redis.
|
||||||
func (srv *Server) Stop() {
|
func (srv *Server) Stop() {
|
||||||
switch srv.ss.Status() {
|
switch srv.status.Get() {
|
||||||
case base.StatusIdle, base.StatusStopped:
|
case base.StatusIdle, base.StatusStopped:
|
||||||
// server is not running, do nothing and return.
|
// server is not running, do nothing and return.
|
||||||
return
|
return
|
||||||
@@ -424,6 +427,7 @@ func (srv *Server) Stop() {
|
|||||||
// Note: The order of termination is important.
|
// Note: The order of termination is important.
|
||||||
// Sender goroutines should be terminated before the receiver goroutines.
|
// Sender goroutines should be terminated before the receiver goroutines.
|
||||||
// processor -> syncer (via syncCh)
|
// processor -> syncer (via syncCh)
|
||||||
|
// processor -> heartbeater (via starting, finished channels)
|
||||||
srv.scheduler.terminate()
|
srv.scheduler.terminate()
|
||||||
srv.processor.terminate()
|
srv.processor.terminate()
|
||||||
srv.syncer.terminate()
|
srv.syncer.terminate()
|
||||||
@@ -433,7 +437,7 @@ func (srv *Server) Stop() {
|
|||||||
srv.wg.Wait()
|
srv.wg.Wait()
|
||||||
|
|
||||||
srv.broker.Close()
|
srv.broker.Close()
|
||||||
srv.ss.SetStatus(base.StatusStopped)
|
srv.status.Set(base.StatusStopped)
|
||||||
|
|
||||||
srv.logger.Info("Exiting")
|
srv.logger.Info("Exiting")
|
||||||
}
|
}
|
||||||
@@ -443,6 +447,6 @@ func (srv *Server) Stop() {
|
|||||||
func (srv *Server) Quiet() {
|
func (srv *Server) Quiet() {
|
||||||
srv.logger.Info("Stopping processor")
|
srv.logger.Info("Stopping processor")
|
||||||
srv.processor.stop()
|
srv.processor.stop()
|
||||||
srv.ss.SetStatus(base.StatusQuiet)
|
srv.status.Set(base.StatusQuiet)
|
||||||
srv.logger.Info("Processor stopped")
|
srv.logger.Info("Processor stopped")
|
||||||
}
|
}
|
||||||
|
@@ -14,6 +14,7 @@ Asynq CLI is a command line tool to monitor the tasks managed by `asynq` package
|
|||||||
- [Delete](#delete)
|
- [Delete](#delete)
|
||||||
- [Kill](#kill)
|
- [Kill](#kill)
|
||||||
- [Cancel](#cancel)
|
- [Cancel](#cancel)
|
||||||
|
- [Pause](#pause)
|
||||||
- [Config File](#config-file)
|
- [Config File](#config-file)
|
||||||
|
|
||||||
## Installation
|
## Installation
|
||||||
@@ -144,6 +145,17 @@ Example:
|
|||||||
|
|
||||||
asynq cancel bnogo8gt6toe23vhef0g
|
asynq cancel bnogo8gt6toe23vhef0g
|
||||||
|
|
||||||
|
### Pause
|
||||||
|
|
||||||
|
Command `pause` pauses the spcified queue. Tasks in paused queues are not processed by servers.
|
||||||
|
To resume processing from the queue, use `unpause` command.
|
||||||
|
To see which queues are currently paused, use `stats` command.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
asynq pause email
|
||||||
|
asynq unpause email
|
||||||
|
|
||||||
## Config File
|
## Config File
|
||||||
|
|
||||||
You can use a config file to set default values for the flags.
|
You can use a config file to set default values for the flags.
|
||||||
|
47
tools/asynq/cmd/pause.go
Normal file
47
tools/asynq/cmd/pause.go
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license
|
||||||
|
// that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package cmd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/go-redis/redis/v7"
|
||||||
|
"github.com/hibiken/asynq/internal/rdb"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
"github.com/spf13/viper"
|
||||||
|
)
|
||||||
|
|
||||||
|
// pauseCmd represents the pause command
|
||||||
|
var pauseCmd = &cobra.Command{
|
||||||
|
Use: "pause [queue name]",
|
||||||
|
Short: "Pauses the specified queue",
|
||||||
|
Long: `Pause (asynq pause) will pause the specified queue.
|
||||||
|
Asynq servers will not process tasks from paused queues.
|
||||||
|
Use the "unpause" command to resume a paused queue.
|
||||||
|
|
||||||
|
Example: asynq pause default -> Pause the "default" queue`,
|
||||||
|
Args: cobra.ExactValidArgs(1),
|
||||||
|
Run: pause,
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
rootCmd.AddCommand(pauseCmd)
|
||||||
|
}
|
||||||
|
|
||||||
|
func pause(cmd *cobra.Command, args []string) {
|
||||||
|
c := redis.NewClient(&redis.Options{
|
||||||
|
Addr: viper.GetString("uri"),
|
||||||
|
DB: viper.GetInt("db"),
|
||||||
|
Password: viper.GetString("password"),
|
||||||
|
})
|
||||||
|
r := rdb.NewRDB(c)
|
||||||
|
err := r.Pause(args[0])
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
fmt.Printf("Successfully paused queue %q\n", args[0])
|
||||||
|
}
|
@@ -7,7 +7,6 @@ package cmd
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"sort"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"text/tabwriter"
|
"text/tabwriter"
|
||||||
@@ -96,24 +95,31 @@ func printStates(s *rdb.Stats) {
|
|||||||
tw.Flush()
|
tw.Flush()
|
||||||
}
|
}
|
||||||
|
|
||||||
func printQueues(queues map[string]int) {
|
func printQueues(queues []*rdb.Queue) {
|
||||||
var qnames, seps, counts []string
|
var headers, seps, counts []string
|
||||||
for q := range queues {
|
for _, q := range queues {
|
||||||
qnames = append(qnames, strings.Title(q))
|
title := queueTitle(q)
|
||||||
|
headers = append(headers, title)
|
||||||
|
seps = append(seps, strings.Repeat("-", len(title)))
|
||||||
|
counts = append(counts, strconv.Itoa(q.Size))
|
||||||
}
|
}
|
||||||
sort.Strings(qnames) // sort for stable order
|
format := strings.Repeat("%v\t", len(headers)) + "\n"
|
||||||
for _, q := range qnames {
|
|
||||||
seps = append(seps, strings.Repeat("-", len(q)))
|
|
||||||
counts = append(counts, strconv.Itoa(queues[strings.ToLower(q)]))
|
|
||||||
}
|
|
||||||
format := strings.Repeat("%v\t", len(qnames)) + "\n"
|
|
||||||
tw := new(tabwriter.Writer).Init(os.Stdout, 0, 8, 2, ' ', 0)
|
tw := new(tabwriter.Writer).Init(os.Stdout, 0, 8, 2, ' ', 0)
|
||||||
fmt.Fprintf(tw, format, toInterfaceSlice(qnames)...)
|
fmt.Fprintf(tw, format, toInterfaceSlice(headers)...)
|
||||||
fmt.Fprintf(tw, format, toInterfaceSlice(seps)...)
|
fmt.Fprintf(tw, format, toInterfaceSlice(seps)...)
|
||||||
fmt.Fprintf(tw, format, toInterfaceSlice(counts)...)
|
fmt.Fprintf(tw, format, toInterfaceSlice(counts)...)
|
||||||
tw.Flush()
|
tw.Flush()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func queueTitle(q *rdb.Queue) string {
|
||||||
|
var b strings.Builder
|
||||||
|
b.WriteString(strings.Title(q.Name))
|
||||||
|
if q.Paused {
|
||||||
|
b.WriteString(" (Paused)")
|
||||||
|
}
|
||||||
|
return b.String()
|
||||||
|
}
|
||||||
|
|
||||||
func printStats(s *rdb.Stats) {
|
func printStats(s *rdb.Stats) {
|
||||||
format := strings.Repeat("%v\t", 3) + "\n"
|
format := strings.Repeat("%v\t", 3) + "\n"
|
||||||
tw := new(tabwriter.Writer).Init(os.Stdout, 0, 8, 2, ' ', 0)
|
tw := new(tabwriter.Writer).Init(os.Stdout, 0, 8, 2, ' ', 0)
|
||||||
|
46
tools/asynq/cmd/unpause.go
Normal file
46
tools/asynq/cmd/unpause.go
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license
|
||||||
|
// that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package cmd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/go-redis/redis/v7"
|
||||||
|
"github.com/hibiken/asynq/internal/rdb"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
"github.com/spf13/viper"
|
||||||
|
)
|
||||||
|
|
||||||
|
// unpauseCmd represents the unpause command
|
||||||
|
var unpauseCmd = &cobra.Command{
|
||||||
|
Use: "unpause [queue name]",
|
||||||
|
Short: "Unpauses the specified queue",
|
||||||
|
Long: `Unpause (asynq unpause) will unpause the specified queue.
|
||||||
|
Asynq servers will process tasks from unpaused/resumed queues.
|
||||||
|
|
||||||
|
Example: asynq unpause default -> Resume the "default" queue`,
|
||||||
|
Args: cobra.ExactValidArgs(1),
|
||||||
|
Run: unpause,
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
rootCmd.AddCommand(unpauseCmd)
|
||||||
|
}
|
||||||
|
|
||||||
|
func unpause(cmd *cobra.Command, args []string) {
|
||||||
|
c := redis.NewClient(&redis.Options{
|
||||||
|
Addr: viper.GetString("uri"),
|
||||||
|
DB: viper.GetInt("db"),
|
||||||
|
Password: viper.GetString("password"),
|
||||||
|
})
|
||||||
|
r := rdb.NewRDB(c)
|
||||||
|
err := r.Unpause(args[0])
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
fmt.Printf("Successfully resumed queue %q\n", args[0])
|
||||||
|
}
|
@@ -61,7 +61,7 @@ func workers(cmd *cobra.Command, args []string) {
|
|||||||
if x.Started != y.Started {
|
if x.Started != y.Started {
|
||||||
return x.Started.Before(y.Started)
|
return x.Started.Before(y.Started)
|
||||||
}
|
}
|
||||||
return x.ID.String() < y.ID.String()
|
return x.ID < y.ID
|
||||||
})
|
})
|
||||||
|
|
||||||
cols := []string{"Process", "ID", "Type", "Payload", "Queue", "Started"}
|
cols := []string{"Process", "ID", "Type", "Payload", "Queue", "Started"}
|
||||||
|
Reference in New Issue
Block a user