2020-01-03 10:13:16 +08:00
|
|
|
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
|
|
|
// Use of this source code is governed by a MIT license
|
|
|
|
// that can be found in the LICENSE file.
|
|
|
|
|
2019-11-19 22:48:54 +08:00
|
|
|
package asynq
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"log"
|
2019-12-17 22:18:22 +08:00
|
|
|
"sync"
|
2019-11-19 22:48:54 +08:00
|
|
|
"time"
|
2019-12-04 13:01:26 +08:00
|
|
|
|
2019-12-22 23:15:45 +08:00
|
|
|
"github.com/hibiken/asynq/internal/base"
|
2019-12-04 13:01:26 +08:00
|
|
|
"github.com/hibiken/asynq/internal/rdb"
|
2019-11-19 22:48:54 +08:00
|
|
|
)
|
|
|
|
|
2019-11-21 12:08:03 +08:00
|
|
|
type processor struct {
|
2019-12-04 13:01:26 +08:00
|
|
|
rdb *rdb.RDB
|
2019-11-19 22:48:54 +08:00
|
|
|
|
2019-12-03 12:42:21 +08:00
|
|
|
handler Handler
|
2019-11-19 22:48:54 +08:00
|
|
|
|
2020-01-06 23:15:59 +08:00
|
|
|
queueConfig map[string]uint
|
|
|
|
|
2019-12-30 09:43:19 +08:00
|
|
|
retryDelayFunc retryDelayFunc
|
|
|
|
|
2019-11-30 04:48:54 +08:00
|
|
|
// timeout for blocking dequeue operation.
|
|
|
|
// dequeue needs to timeout to avoid blocking forever
|
|
|
|
// in case of a program shutdown or additon of a new queue.
|
|
|
|
dequeueTimeout time.Duration
|
|
|
|
|
2019-11-19 22:48:54 +08:00
|
|
|
// sema is a counting semaphore to ensure the number of active workers
|
2019-11-30 04:48:54 +08:00
|
|
|
// does not exceed the limit.
|
2019-11-19 22:48:54 +08:00
|
|
|
sema chan struct{}
|
|
|
|
|
2019-11-21 12:08:03 +08:00
|
|
|
// channel to communicate back to the long running "processor" goroutine.
|
2019-12-18 12:34:56 +08:00
|
|
|
// once is used to send value to the channel only once.
|
2019-11-19 22:48:54 +08:00
|
|
|
done chan struct{}
|
2019-12-18 12:34:56 +08:00
|
|
|
once sync.Once
|
2019-12-18 12:07:17 +08:00
|
|
|
|
2019-12-19 10:57:48 +08:00
|
|
|
// abort channel is closed when the shutdown of the "processor" goroutine starts.
|
|
|
|
abort chan struct{}
|
2019-12-16 13:00:09 +08:00
|
|
|
|
|
|
|
// quit channel communicates to the in-flight worker goroutines to stop.
|
|
|
|
quit chan struct{}
|
2019-11-19 22:48:54 +08:00
|
|
|
}
|
|
|
|
|
2019-12-30 09:43:19 +08:00
|
|
|
type retryDelayFunc func(n int, err error, task *Task) time.Duration
|
|
|
|
|
2020-01-06 23:15:59 +08:00
|
|
|
func newProcessor(r *rdb.RDB, n int, qcfg map[string]uint, fn retryDelayFunc) *processor {
|
2019-11-21 12:08:03 +08:00
|
|
|
return &processor{
|
2019-12-04 13:01:26 +08:00
|
|
|
rdb: r,
|
2020-01-06 23:15:59 +08:00
|
|
|
queueConfig: qcfg,
|
2019-12-30 09:43:19 +08:00
|
|
|
retryDelayFunc: fn,
|
2019-12-18 12:07:17 +08:00
|
|
|
dequeueTimeout: 2 * time.Second,
|
2019-12-30 09:43:19 +08:00
|
|
|
sema: make(chan struct{}, n),
|
2019-11-30 04:48:54 +08:00
|
|
|
done: make(chan struct{}),
|
2019-12-19 10:57:48 +08:00
|
|
|
abort: make(chan struct{}),
|
2019-12-16 13:00:09 +08:00
|
|
|
quit: make(chan struct{}),
|
2019-12-30 09:43:19 +08:00
|
|
|
handler: HandlerFunc(func(t *Task) error { return fmt.Errorf("handler not set") }),
|
2019-11-19 22:48:54 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-17 22:18:22 +08:00
|
|
|
// Note: stops only the "processor" goroutine, does not stop workers.
|
|
|
|
// It's safe to call this method multiple times.
|
|
|
|
func (p *processor) stop() {
|
2019-12-18 12:34:56 +08:00
|
|
|
p.once.Do(func() {
|
|
|
|
log.Println("[INFO] Processor shutting down...")
|
|
|
|
// Unblock if processor is waiting for sema token.
|
2019-12-19 10:57:48 +08:00
|
|
|
close(p.abort)
|
2019-12-18 12:34:56 +08:00
|
|
|
// Signal the processor goroutine to stop processing tasks
|
|
|
|
// from the queue.
|
|
|
|
p.done <- struct{}{}
|
|
|
|
})
|
2019-12-17 22:18:22 +08:00
|
|
|
}
|
|
|
|
|
2019-11-30 04:48:54 +08:00
|
|
|
// NOTE: once terminated, processor cannot be re-started.
|
2019-11-21 12:08:03 +08:00
|
|
|
func (p *processor) terminate() {
|
2019-12-17 22:18:22 +08:00
|
|
|
p.stop()
|
2019-11-19 23:38:09 +08:00
|
|
|
|
2019-12-30 23:10:13 +08:00
|
|
|
// IDEA: Allow user to customize this timeout value.
|
2019-12-16 13:00:09 +08:00
|
|
|
const timeout = 8 * time.Second
|
|
|
|
time.AfterFunc(timeout, func() { close(p.quit) })
|
2019-11-29 23:14:28 +08:00
|
|
|
log.Println("[INFO] Waiting for all workers to finish...")
|
2019-11-28 11:36:56 +08:00
|
|
|
// block until all workers have released the token
|
2019-11-21 12:08:03 +08:00
|
|
|
for i := 0; i < cap(p.sema); i++ {
|
|
|
|
p.sema <- struct{}{}
|
2019-11-19 23:38:09 +08:00
|
|
|
}
|
2019-11-29 23:14:28 +08:00
|
|
|
log.Println("[INFO] All workers have finished.")
|
2019-12-16 13:00:09 +08:00
|
|
|
p.restore() // move any unfinished tasks back to the queue.
|
2019-11-19 22:48:54 +08:00
|
|
|
}
|
|
|
|
|
2019-11-21 12:08:03 +08:00
|
|
|
func (p *processor) start() {
|
2019-11-24 07:09:50 +08:00
|
|
|
// NOTE: The call to "restore" needs to complete before starting
|
|
|
|
// the processor goroutine.
|
|
|
|
p.restore()
|
2019-11-19 22:48:54 +08:00
|
|
|
go func() {
|
|
|
|
for {
|
|
|
|
select {
|
2019-11-21 12:08:03 +08:00
|
|
|
case <-p.done:
|
2019-11-29 23:14:28 +08:00
|
|
|
log.Println("[INFO] Processor done.")
|
2019-11-19 23:38:09 +08:00
|
|
|
return
|
2019-11-19 22:48:54 +08:00
|
|
|
default:
|
2019-11-21 12:08:03 +08:00
|
|
|
p.exec()
|
2019-11-19 22:48:54 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
2019-11-22 13:45:27 +08:00
|
|
|
// exec pulls a task out of the queue and starts a worker goroutine to
|
|
|
|
// process the task.
|
2019-11-21 12:08:03 +08:00
|
|
|
func (p *processor) exec() {
|
2020-01-07 22:28:34 +08:00
|
|
|
// TODO(hibiken): Randomize the order to avoid starving low priority queues
|
|
|
|
var qnames []string
|
|
|
|
for q := range p.queueConfig {
|
|
|
|
qnames = append(qnames, q)
|
|
|
|
}
|
|
|
|
|
|
|
|
msg, err := p.rdb.Dequeue(qnames...)
|
|
|
|
if err == rdb.ErrNoProcessableTask {
|
|
|
|
// queues are empty, this is a normal behavior.
|
|
|
|
// sleep to avoid slamming redis and let scheduler move tasks into queues.
|
|
|
|
time.Sleep(time.Second)
|
2019-11-28 11:36:56 +08:00
|
|
|
return
|
|
|
|
}
|
2019-11-19 22:48:54 +08:00
|
|
|
if err != nil {
|
2019-11-28 11:36:56 +08:00
|
|
|
log.Printf("[ERROR] unexpected error while pulling a task out of queue: %v\n", err)
|
|
|
|
return
|
2019-11-19 22:48:54 +08:00
|
|
|
}
|
|
|
|
|
2019-12-18 12:07:17 +08:00
|
|
|
select {
|
2019-12-19 10:57:48 +08:00
|
|
|
case <-p.abort:
|
2019-12-18 12:07:17 +08:00
|
|
|
// shutdown is starting, return immediately after requeuing the message.
|
|
|
|
p.requeue(msg)
|
|
|
|
return
|
|
|
|
case p.sema <- struct{}{}: // acquire token
|
2019-12-16 13:00:09 +08:00
|
|
|
go func() {
|
2019-12-18 12:07:17 +08:00
|
|
|
defer func() { <-p.sema /* release token */ }()
|
|
|
|
|
|
|
|
resCh := make(chan error, 1)
|
2020-01-05 05:13:46 +08:00
|
|
|
task := NewTask(msg.Type, msg.Payload)
|
2019-12-18 12:07:17 +08:00
|
|
|
go func() {
|
|
|
|
resCh <- perform(p.handler, task)
|
|
|
|
}()
|
2019-12-16 13:00:09 +08:00
|
|
|
|
2019-12-18 12:07:17 +08:00
|
|
|
select {
|
|
|
|
case <-p.quit:
|
|
|
|
// time is up, quit this worker goroutine.
|
2019-12-19 10:55:08 +08:00
|
|
|
log.Printf("[WARN] Terminating in-progress task %+v\n", msg)
|
2019-12-18 12:07:17 +08:00
|
|
|
return
|
|
|
|
case resErr := <-resCh:
|
|
|
|
// Note: One of three things should happen.
|
|
|
|
// 1) Done -> Removes the message from InProgress
|
|
|
|
// 2) Retry -> Removes the message from InProgress & Adds the message to Retry
|
|
|
|
// 3) Kill -> Removes the message from InProgress & Adds the message to Dead
|
|
|
|
if resErr != nil {
|
|
|
|
if msg.Retried >= msg.Retry {
|
2019-12-30 09:43:19 +08:00
|
|
|
p.kill(msg, resErr)
|
2019-12-26 12:04:29 +08:00
|
|
|
} else {
|
2019-12-30 09:43:19 +08:00
|
|
|
p.retry(msg, resErr)
|
2019-12-18 12:07:17 +08:00
|
|
|
}
|
2019-12-16 13:00:09 +08:00
|
|
|
return
|
|
|
|
}
|
2019-12-18 12:07:17 +08:00
|
|
|
p.markAsDone(msg)
|
2019-11-22 13:45:27 +08:00
|
|
|
}
|
2019-12-18 12:07:17 +08:00
|
|
|
}()
|
|
|
|
}
|
2019-11-19 22:48:54 +08:00
|
|
|
}
|
2019-11-24 07:09:50 +08:00
|
|
|
|
|
|
|
// restore moves all tasks from "in-progress" back to queue
|
|
|
|
// to restore all unfinished tasks.
|
|
|
|
func (p *processor) restore() {
|
2019-12-19 10:55:08 +08:00
|
|
|
n, err := p.rdb.RestoreUnfinished()
|
2019-11-24 07:09:50 +08:00
|
|
|
if err != nil {
|
2019-12-16 13:00:09 +08:00
|
|
|
log.Printf("[ERROR] Could not restore unfinished tasks: %v\n", err)
|
|
|
|
}
|
2019-12-19 10:55:08 +08:00
|
|
|
if n > 0 {
|
|
|
|
log.Printf("[INFO] Restored %d unfinished tasks back to queue.\n", n)
|
|
|
|
}
|
2019-12-16 13:00:09 +08:00
|
|
|
}
|
|
|
|
|
2019-12-22 23:15:45 +08:00
|
|
|
func (p *processor) requeue(msg *base.TaskMessage) {
|
2019-12-18 12:07:17 +08:00
|
|
|
err := p.rdb.Requeue(msg)
|
|
|
|
if err != nil {
|
|
|
|
log.Printf("[ERROR] Could not move task from InProgress back to queue: %v\n", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-22 23:15:45 +08:00
|
|
|
func (p *processor) markAsDone(msg *base.TaskMessage) {
|
2019-12-16 13:00:09 +08:00
|
|
|
err := p.rdb.Done(msg)
|
|
|
|
if err != nil {
|
|
|
|
log.Printf("[ERROR] Could not remove task from InProgress queue: %v\n", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-30 09:43:19 +08:00
|
|
|
func (p *processor) retry(msg *base.TaskMessage, e error) {
|
2020-01-05 05:13:46 +08:00
|
|
|
d := p.retryDelayFunc(msg.Retried, e, NewTask(msg.Type, msg.Payload))
|
2019-12-30 09:43:19 +08:00
|
|
|
retryAt := time.Now().Add(d)
|
|
|
|
err := p.rdb.Retry(msg, retryAt, e.Error())
|
2019-12-16 13:00:09 +08:00
|
|
|
if err != nil {
|
|
|
|
log.Printf("[ERROR] Could not send task %+v to Retry queue: %v\n", msg, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-30 09:43:19 +08:00
|
|
|
func (p *processor) kill(msg *base.TaskMessage, e error) {
|
2019-12-16 13:00:09 +08:00
|
|
|
log.Printf("[WARN] Retry exhausted for task(Type: %q, ID: %v)\n", msg.Type, msg.ID)
|
2019-12-30 09:43:19 +08:00
|
|
|
err := p.rdb.Kill(msg, e.Error())
|
2019-12-16 13:00:09 +08:00
|
|
|
if err != nil {
|
|
|
|
log.Printf("[ERROR] Could not send task %+v to Dead queue: %v\n", msg, err)
|
2019-11-24 07:09:50 +08:00
|
|
|
}
|
|
|
|
}
|
2019-11-28 06:03:04 +08:00
|
|
|
|
|
|
|
// perform calls the handler with the given task.
|
|
|
|
// If the call returns without panic, it simply returns the value,
|
|
|
|
// otherwise, it recovers from panic and returns an error.
|
2019-12-03 12:42:21 +08:00
|
|
|
func perform(h Handler, task *Task) (err error) {
|
2019-11-28 06:03:04 +08:00
|
|
|
defer func() {
|
|
|
|
if x := recover(); x != nil {
|
|
|
|
err = fmt.Errorf("panic: %v", x)
|
|
|
|
}
|
|
|
|
}()
|
2019-12-03 12:42:21 +08:00
|
|
|
return h.ProcessTask(task)
|
2019-11-28 06:03:04 +08:00
|
|
|
}
|