2
0
mirror of https://github.com/hibiken/asynq.git synced 2024-12-26 07:42:17 +08:00
asynq/background.go

237 lines
6.1 KiB
Go
Raw Normal View History

2020-01-03 10:13:16 +08:00
// Copyright 2020 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
2019-11-24 07:22:43 +08:00
package asynq
import (
2019-11-27 22:33:04 +08:00
"fmt"
"log"
"math"
"math/rand"
2019-11-24 07:44:42 +08:00
"os"
"os/signal"
2019-11-24 07:22:43 +08:00
"sync"
"syscall"
2019-11-24 07:22:43 +08:00
"time"
2019-12-04 13:01:26 +08:00
2020-01-06 23:15:59 +08:00
"github.com/hibiken/asynq/internal/base"
2019-12-04 13:01:26 +08:00
"github.com/hibiken/asynq/internal/rdb"
2019-11-24 07:22:43 +08:00
)
2019-12-07 14:00:09 +08:00
// Background is responsible for managing the background-task processing.
//
2020-01-17 11:50:45 +08:00
// Background manages task queues to process tasks.
// If the processing of a task is unsuccessful, background will
// schedule it for a retry until either the task gets processed successfully
// or it exhausts its max retry count.
2019-12-07 14:00:09 +08:00
//
// Once a task exhausts its retries, it will be moved to the "dead" queue and
// will be kept in the queue for some time until a certain condition is met
// (e.g., queue size reaches a certain limit, or the task has been in the
// queue for a certain amount of time).
2019-11-24 07:22:43 +08:00
type Background struct {
mu sync.Mutex
running bool
2019-11-24 07:22:43 +08:00
2019-12-04 13:01:26 +08:00
rdb *rdb.RDB
2019-12-29 05:33:24 +08:00
scheduler *scheduler
2019-11-24 07:22:43 +08:00
processor *processor
syncer *syncer
2019-11-24 07:22:43 +08:00
}
// Config specifies the background-task processing behavior.
type Config struct {
2020-01-17 11:50:45 +08:00
// Maximum number of concurrent processing of tasks.
//
// If set to zero or negative value, NewBackground will overwrite the value to one.
Concurrency int
// Function to calculate retry delay for a failed task.
//
// By default, it uses exponential backoff algorithm to calculate the delay.
//
// n is the number of times the task has been retried.
// e is the error returned by the task handler.
// t is the task in question.
RetryDelayFunc func(n int, e error, t *Task) time.Duration
2020-01-06 23:15:59 +08:00
// List of queues to process with given priority level. Keys are the names of the
// queues and values are associated priority level.
//
// If set to nil or not specified, the background will process only the "default" queue.
//
2020-01-08 13:53:38 +08:00
// Priority is treated as follows to avoid starving low priority queues.
//
2020-01-06 23:15:59 +08:00
// Example:
// Queues: map[string]uint{
// "critical": 6,
// "default": 3,
// "low": 1,
// }
2020-01-08 13:53:38 +08:00
// With the above config and given that all queues are not empty, the tasks
2020-01-06 23:15:59 +08:00
// in "critical", "default", "low" should be processed 60%, 30%, 10% of
// the time respectively.
Queues map[string]uint
2020-01-12 23:46:51 +08:00
// StrictPriority indicates whether the queue priority should be treated strictly.
//
// If set to true, tasks in the queue with the highest priority is processed first.
// The tasks in lower priority queues are processed only when those queues with
// higher priorities are empty.
StrictPriority bool
}
// Formula taken from https://github.com/mperham/sidekiq.
func defaultDelayFunc(n int, e error, t *Task) time.Duration {
r := rand.New(rand.NewSource(time.Now().UnixNano()))
s := int(math.Pow(float64(n), 4)) + 15 + (r.Intn(30) * (n + 1))
return time.Duration(s) * time.Second
}
2020-01-06 23:15:59 +08:00
var defaultQueueConfig = map[string]uint{
base.DefaultQueueName: 1,
}
2020-01-17 11:50:45 +08:00
// NewBackground returns a new Background given a redis connection option
// and background processing configuration.
func NewBackground(r RedisConnOpt, cfg *Config) *Background {
n := cfg.Concurrency
if n < 1 {
n = 1
}
delayFunc := cfg.RetryDelayFunc
if delayFunc == nil {
delayFunc = defaultDelayFunc
}
2020-01-06 23:15:59 +08:00
queues := cfg.Queues
if queues == nil || len(queues) == 0 {
2020-01-06 23:15:59 +08:00
queues = defaultQueueConfig
}
qcfg := normalizeQueueCfg(queues)
syncRequestCh := make(chan *syncRequest)
syncer := newSyncer(syncRequestCh, 5*time.Second)
rdb := rdb.NewRDB(createRedisClient(r))
scheduler := newScheduler(rdb, 5*time.Second, qcfg)
processor := newProcessor(rdb, n, qcfg, cfg.StrictPriority, delayFunc, syncRequestCh)
2019-11-24 07:22:43 +08:00
return &Background{
rdb: rdb,
2019-12-29 05:33:24 +08:00
scheduler: scheduler,
2019-11-24 07:22:43 +08:00
processor: processor,
syncer: syncer,
2019-11-24 07:22:43 +08:00
}
}
// A Handler processes a task.
//
// ProcessTask should return nil if the processing of a task
// is successful.
//
// If ProcessTask return a non-nil error or panics, the task
// will be retried after delay.
type Handler interface {
ProcessTask(*Task) error
}
// The HandlerFunc type is an adapter to allow the use of
// ordinary functions as a Handler. If f is a function
// with the appropriate signature, HandlerFunc(f) is a
// Handler that calls f.
type HandlerFunc func(*Task) error
// ProcessTask calls fn(task)
func (fn HandlerFunc) ProcessTask(task *Task) error {
return fn(task)
}
2019-11-24 07:22:43 +08:00
2019-11-24 07:44:42 +08:00
// Run starts the background-task processing and blocks until
// an os signal to exit the program is received. Once it receives
// a signal, it gracefully shuts down all pending workers and other
// goroutines to process the tasks.
func (bg *Background) Run(handler Handler) {
2019-11-24 07:44:42 +08:00
bg.start(handler)
defer bg.stop()
// Wait for a signal to terminate.
2019-11-24 07:44:42 +08:00
sigs := make(chan os.Signal, 1)
2019-12-17 21:32:31 +08:00
signal.Notify(sigs, syscall.SIGTERM, syscall.SIGINT, syscall.SIGTSTP)
for {
sig := <-sigs
if sig == syscall.SIGTSTP {
bg.processor.stop()
2019-12-17 21:32:31 +08:00
continue
}
break
}
fmt.Println()
log.Println("[INFO] Starting graceful shutdown...")
2019-11-24 07:44:42 +08:00
}
// starts the background-task processing.
func (bg *Background) start(handler Handler) {
2019-11-24 07:22:43 +08:00
bg.mu.Lock()
defer bg.mu.Unlock()
if bg.running {
return
}
2019-11-24 07:22:43 +08:00
bg.running = true
bg.processor.handler = handler
bg.syncer.start()
2019-12-29 05:33:24 +08:00
bg.scheduler.start()
2019-11-24 07:22:43 +08:00
bg.processor.start()
}
2019-11-24 07:44:42 +08:00
// stops the background-task processing.
func (bg *Background) stop() {
2019-11-24 07:22:43 +08:00
bg.mu.Lock()
defer bg.mu.Unlock()
if !bg.running {
return
}
2019-12-29 05:33:24 +08:00
bg.scheduler.terminate()
2019-11-24 07:22:43 +08:00
bg.processor.terminate()
2020-01-19 02:17:39 +08:00
// Note: processor and all worker goroutines need to be exited
// before shutting down syncer to avoid goroutine leak.
bg.syncer.terminate()
2019-12-04 13:01:26 +08:00
bg.rdb.Close()
bg.processor.handler = nil
bg.running = false
2019-11-24 07:22:43 +08:00
}
// normalizeQueueCfg divides priority numbers by their
// greatest common divisor.
func normalizeQueueCfg(queueCfg map[string]uint) map[string]uint {
var xs []uint
for _, x := range queueCfg {
xs = append(xs, x)
}
d := gcd(xs...)
res := make(map[string]uint)
for q, x := range queueCfg {
res[q] = x / d
}
return res
}
func gcd(xs ...uint) uint {
fn := func(x, y uint) uint {
for y > 0 {
x, y = y, x%y
}
return x
}
res := xs[0]
for i := 0; i < len(xs); i++ {
res = fn(xs[i], res)
if res == 1 {
return 1
}
}
return res
}