mirror of
https://github.com/hibiken/asynq.git
synced 2024-12-25 07:12:17 +08:00
Add test for processor
This commit is contained in:
parent
eed375b138
commit
d53e5d3350
22
processor.go
22
processor.go
@ -11,8 +11,13 @@ type processor struct {
|
|||||||
|
|
||||||
handler TaskHandler
|
handler TaskHandler
|
||||||
|
|
||||||
|
// timeout for blocking dequeue operation.
|
||||||
|
// dequeue needs to timeout to avoid blocking forever
|
||||||
|
// in case of a program shutdown or additon of a new queue.
|
||||||
|
dequeueTimeout time.Duration
|
||||||
|
|
||||||
// sema is a counting semaphore to ensure the number of active workers
|
// sema is a counting semaphore to ensure the number of active workers
|
||||||
// does not exceed the limit
|
// does not exceed the limit.
|
||||||
sema chan struct{}
|
sema chan struct{}
|
||||||
|
|
||||||
// channel to communicate back to the long running "processor" goroutine.
|
// channel to communicate back to the long running "processor" goroutine.
|
||||||
@ -21,13 +26,15 @@ type processor struct {
|
|||||||
|
|
||||||
func newProcessor(rdb *rdb, numWorkers int, handler TaskHandler) *processor {
|
func newProcessor(rdb *rdb, numWorkers int, handler TaskHandler) *processor {
|
||||||
return &processor{
|
return &processor{
|
||||||
rdb: rdb,
|
rdb: rdb,
|
||||||
handler: handler,
|
handler: handler,
|
||||||
sema: make(chan struct{}, numWorkers),
|
dequeueTimeout: 5 * time.Second,
|
||||||
done: make(chan struct{}),
|
sema: make(chan struct{}, numWorkers),
|
||||||
|
done: make(chan struct{}),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NOTE: once terminated, processor cannot be re-started.
|
||||||
func (p *processor) terminate() {
|
func (p *processor) terminate() {
|
||||||
log.Println("[INFO] Processor shutting down...")
|
log.Println("[INFO] Processor shutting down...")
|
||||||
// Signal the processor goroutine to stop processing tasks from the queue.
|
// Signal the processor goroutine to stop processing tasks from the queue.
|
||||||
@ -61,10 +68,7 @@ func (p *processor) start() {
|
|||||||
// exec pulls a task out of the queue and starts a worker goroutine to
|
// exec pulls a task out of the queue and starts a worker goroutine to
|
||||||
// process the task.
|
// process the task.
|
||||||
func (p *processor) exec() {
|
func (p *processor) exec() {
|
||||||
// NOTE: dequeue needs to timeout to avoid blocking forever
|
msg, err := p.rdb.dequeue(defaultQueue, p.dequeueTimeout)
|
||||||
// in case of a program shutdown or additon of a new queue.
|
|
||||||
const timeout = 5 * time.Second
|
|
||||||
msg, err := p.rdb.dequeue(defaultQueue, timeout)
|
|
||||||
if err == errDequeueTimeout {
|
if err == errDequeueTimeout {
|
||||||
// timed out, this is a normal behavior.
|
// timed out, this is a normal behavior.
|
||||||
return
|
return
|
||||||
|
@ -2,9 +2,179 @@ package asynq
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/google/go-cmp/cmp"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func TestProcessorSuccess(t *testing.T) {
|
||||||
|
r := setup(t)
|
||||||
|
|
||||||
|
m1 := randomTask("send_email", "default", nil)
|
||||||
|
m2 := randomTask("gen_thumbnail", "default", nil)
|
||||||
|
m3 := randomTask("reindex", "default", nil)
|
||||||
|
m4 := randomTask("sync", "default", nil)
|
||||||
|
|
||||||
|
t1 := &Task{Type: m1.Type, Payload: m1.Payload}
|
||||||
|
t2 := &Task{Type: m2.Type, Payload: m2.Payload}
|
||||||
|
t3 := &Task{Type: m3.Type, Payload: m3.Payload}
|
||||||
|
t4 := &Task{Type: m4.Type, Payload: m4.Payload}
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
initQueue []*taskMessage // initial default queue state
|
||||||
|
incoming []*taskMessage // tasks to be enqueued during run
|
||||||
|
wait time.Duration // wait duration between starting and stopping processor for this test case
|
||||||
|
wantProcessed []*Task // tasks to be processed at the end
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
initQueue: []*taskMessage{m1},
|
||||||
|
incoming: []*taskMessage{m2, m3, m4},
|
||||||
|
wait: time.Second,
|
||||||
|
wantProcessed: []*Task{t1, t2, t3, t4},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
initQueue: []*taskMessage{},
|
||||||
|
incoming: []*taskMessage{m1},
|
||||||
|
wait: time.Second,
|
||||||
|
wantProcessed: []*Task{t1},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
// clean up db before each test case.
|
||||||
|
if err := r.client.FlushDB().Err(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
// instantiate a new processor
|
||||||
|
var mu sync.Mutex
|
||||||
|
var processed []*Task
|
||||||
|
h := func(task *Task) error {
|
||||||
|
mu.Lock()
|
||||||
|
defer mu.Unlock()
|
||||||
|
processed = append(processed, task)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
p := newProcessor(r, 10, h)
|
||||||
|
p.dequeueTimeout = time.Second // short time out for test purpose
|
||||||
|
// initialize default queue.
|
||||||
|
for _, msg := range tc.initQueue {
|
||||||
|
err := r.enqueue(msg)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
p.start()
|
||||||
|
|
||||||
|
for _, msg := range tc.incoming {
|
||||||
|
err := r.enqueue(msg)
|
||||||
|
if err != nil {
|
||||||
|
p.terminate()
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
time.Sleep(tc.wait)
|
||||||
|
p.terminate()
|
||||||
|
|
||||||
|
if diff := cmp.Diff(tc.wantProcessed, processed, sortTaskOpt); diff != "" {
|
||||||
|
t.Errorf("mismatch found in processed tasks; (-want, +got)\n%s", diff)
|
||||||
|
}
|
||||||
|
|
||||||
|
if l := r.client.LLen(inProgress).Val(); l != 0 {
|
||||||
|
t.Errorf("%q has %d tasks, want 0", inProgress, l)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProcessorRetry(t *testing.T) {
|
||||||
|
r := setup(t)
|
||||||
|
|
||||||
|
m1 := randomTask("send_email", "default", nil)
|
||||||
|
m1.Retried = m1.Retry // m1 has reached its max retry count
|
||||||
|
m2 := randomTask("gen_thumbnail", "default", nil)
|
||||||
|
m3 := randomTask("reindex", "default", nil)
|
||||||
|
m4 := randomTask("sync", "default", nil)
|
||||||
|
|
||||||
|
errMsg := "something went wrong"
|
||||||
|
// r* is m* after retry
|
||||||
|
r1 := *m1
|
||||||
|
r1.ErrorMsg = errMsg
|
||||||
|
r2 := *m2
|
||||||
|
r2.ErrorMsg = errMsg
|
||||||
|
r2.Retried = m2.Retried + 1
|
||||||
|
r3 := *m3
|
||||||
|
r3.ErrorMsg = errMsg
|
||||||
|
r3.Retried = m3.Retried + 1
|
||||||
|
r4 := *m4
|
||||||
|
r4.ErrorMsg = errMsg
|
||||||
|
r4.Retried = m4.Retried + 1
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
initQueue []*taskMessage // initial default queue state
|
||||||
|
incoming []*taskMessage // tasks to be enqueued during run
|
||||||
|
wait time.Duration // wait duration between starting and stopping processor for this test case
|
||||||
|
wantRetry []*taskMessage // tasks in retry queue at the end
|
||||||
|
wantDead []*taskMessage // tasks in dead queue at the end
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
initQueue: []*taskMessage{m1, m2},
|
||||||
|
incoming: []*taskMessage{m3, m4},
|
||||||
|
wait: time.Second,
|
||||||
|
wantRetry: []*taskMessage{&r2, &r3, &r4},
|
||||||
|
wantDead: []*taskMessage{&r1},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
// clean up db before each test case.
|
||||||
|
if err := r.client.FlushDB().Err(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
// instantiate a new processor
|
||||||
|
h := func(task *Task) error {
|
||||||
|
return fmt.Errorf(errMsg)
|
||||||
|
}
|
||||||
|
p := newProcessor(r, 10, h)
|
||||||
|
p.dequeueTimeout = time.Second // short time out for test purpose
|
||||||
|
// initialize default queue.
|
||||||
|
for _, msg := range tc.initQueue {
|
||||||
|
err := r.enqueue(msg)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
p.start()
|
||||||
|
for _, msg := range tc.incoming {
|
||||||
|
err := r.enqueue(msg)
|
||||||
|
if err != nil {
|
||||||
|
p.terminate()
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
time.Sleep(tc.wait)
|
||||||
|
p.terminate()
|
||||||
|
|
||||||
|
gotRetryRaw := r.client.ZRange(retry, 0, -1).Val()
|
||||||
|
gotRetry := mustUnmarshalSlice(t, gotRetryRaw)
|
||||||
|
if diff := cmp.Diff(tc.wantRetry, gotRetry, sortMsgOpt); diff != "" {
|
||||||
|
t.Errorf("mismatch found in %q after running processor; (-want, +got)\n%s", retry, diff)
|
||||||
|
}
|
||||||
|
|
||||||
|
gotDeadRaw := r.client.ZRange(dead, 0, -1).Val()
|
||||||
|
gotDead := mustUnmarshalSlice(t, gotDeadRaw)
|
||||||
|
if diff := cmp.Diff(tc.wantDead, gotDead, sortMsgOpt); diff != "" {
|
||||||
|
t.Errorf("mismatch found in %q after running processor; (-want, +got)\n%s", dead, diff)
|
||||||
|
}
|
||||||
|
|
||||||
|
if l := r.client.LLen(inProgress).Val(); l != 0 {
|
||||||
|
t.Errorf("%q has %d tasks, want 0", inProgress, l)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestPerform(t *testing.T) {
|
func TestPerform(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
desc string
|
desc string
|
||||||
@ -15,7 +185,6 @@ func TestPerform(t *testing.T) {
|
|||||||
{
|
{
|
||||||
desc: "handler returns nil",
|
desc: "handler returns nil",
|
||||||
handler: func(t *Task) error {
|
handler: func(t *Task) error {
|
||||||
fmt.Println("processing...")
|
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
task: &Task{Type: "gen_thumbnail", Payload: map[string]interface{}{"src": "some/img/path"}},
|
task: &Task{Type: "gen_thumbnail", Payload: map[string]interface{}{"src": "some/img/path"}},
|
||||||
@ -24,7 +193,6 @@ func TestPerform(t *testing.T) {
|
|||||||
{
|
{
|
||||||
desc: "handler returns error",
|
desc: "handler returns error",
|
||||||
handler: func(t *Task) error {
|
handler: func(t *Task) error {
|
||||||
fmt.Println("processing...")
|
|
||||||
return fmt.Errorf("something went wrong")
|
return fmt.Errorf("something went wrong")
|
||||||
},
|
},
|
||||||
task: &Task{Type: "gen_thumbnail", Payload: map[string]interface{}{"src": "some/img/path"}},
|
task: &Task{Type: "gen_thumbnail", Payload: map[string]interface{}{"src": "some/img/path"}},
|
||||||
@ -33,7 +201,6 @@ func TestPerform(t *testing.T) {
|
|||||||
{
|
{
|
||||||
desc: "handler panics",
|
desc: "handler panics",
|
||||||
handler: func(t *Task) error {
|
handler: func(t *Task) error {
|
||||||
fmt.Println("processing...")
|
|
||||||
panic("something went terribly wrong")
|
panic("something went terribly wrong")
|
||||||
},
|
},
|
||||||
task: &Task{Type: "gen_thumbnail", Payload: map[string]interface{}{"src": "some/img/path"}},
|
task: &Task{Type: "gen_thumbnail", Payload: map[string]interface{}{"src": "some/img/path"}},
|
||||||
|
@ -25,6 +25,14 @@ var sortMsgOpt = cmp.Transformer("SortMsg", func(in []*taskMessage) []*taskMessa
|
|||||||
return out
|
return out
|
||||||
})
|
})
|
||||||
|
|
||||||
|
var sortTaskOpt = cmp.Transformer("SortMsg", func(in []*Task) []*Task {
|
||||||
|
out := append([]*Task(nil), in...) // Copy input to avoid mutating it
|
||||||
|
sort.Slice(out, func(i, j int) bool {
|
||||||
|
return out[i].Type < out[j].Type
|
||||||
|
})
|
||||||
|
return out
|
||||||
|
})
|
||||||
|
|
||||||
// setup connects to a redis database and flush all keys
|
// setup connects to a redis database and flush all keys
|
||||||
// before returning an instance of rdb.
|
// before returning an instance of rdb.
|
||||||
func setup(t *testing.T) *rdb {
|
func setup(t *testing.T) *rdb {
|
||||||
|
Loading…
Reference in New Issue
Block a user