2020-01-03 10:13:16 +08:00
|
|
|
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
|
|
|
// Use of this source code is governed by a MIT license
|
|
|
|
// that can be found in the LICENSE file.
|
|
|
|
|
2019-11-24 07:22:43 +08:00
|
|
|
package asynq
|
|
|
|
|
|
|
|
import (
|
2020-02-11 23:06:52 +08:00
|
|
|
"context"
|
2020-04-13 02:00:45 +08:00
|
|
|
"errors"
|
2019-11-27 22:33:04 +08:00
|
|
|
"fmt"
|
2019-12-30 09:43:19 +08:00
|
|
|
"math"
|
|
|
|
"math/rand"
|
2020-04-19 03:44:52 +08:00
|
|
|
"runtime"
|
2020-05-10 01:59:50 +08:00
|
|
|
"strings"
|
2019-11-24 07:22:43 +08:00
|
|
|
"sync"
|
|
|
|
"time"
|
2019-12-04 13:01:26 +08:00
|
|
|
|
2020-01-06 23:15:59 +08:00
|
|
|
"github.com/hibiken/asynq/internal/base"
|
2020-03-09 22:11:16 +08:00
|
|
|
"github.com/hibiken/asynq/internal/log"
|
2019-12-04 13:01:26 +08:00
|
|
|
"github.com/hibiken/asynq/internal/rdb"
|
2019-11-24 07:22:43 +08:00
|
|
|
)
|
|
|
|
|
2020-04-12 23:16:42 +08:00
|
|
|
// Server is responsible for managing the background-task processing.
|
2019-12-07 14:00:09 +08:00
|
|
|
//
|
2020-04-13 23:14:55 +08:00
|
|
|
// Server pulls tasks off queues and processes them.
|
2020-04-12 23:16:42 +08:00
|
|
|
// If the processing of a task is unsuccessful, server will
|
2020-04-13 23:14:55 +08:00
|
|
|
// schedule it for a retry.
|
|
|
|
// A task will be retried until either the task gets processed successfully
|
|
|
|
// or until it reaches its max retry count.
|
2019-12-07 14:00:09 +08:00
|
|
|
//
|
2020-04-13 23:14:55 +08:00
|
|
|
// If a task exhausts its retries, it will be moved to the "dead" queue and
|
2019-12-07 14:00:09 +08:00
|
|
|
// will be kept in the queue for some time until a certain condition is met
|
|
|
|
// (e.g., queue size reaches a certain limit, or the task has been in the
|
|
|
|
// queue for a certain amount of time).
|
2020-04-12 23:16:42 +08:00
|
|
|
type Server struct {
|
2020-05-06 13:10:11 +08:00
|
|
|
logger *log.Logger
|
2020-03-09 22:11:16 +08:00
|
|
|
|
2020-04-18 22:55:10 +08:00
|
|
|
broker base.Broker
|
2020-04-13 02:41:50 +08:00
|
|
|
|
2020-05-19 11:47:35 +08:00
|
|
|
status *base.ServerStatus
|
|
|
|
|
2020-04-13 02:41:50 +08:00
|
|
|
// wait group to wait for all goroutines to finish.
|
|
|
|
wg sync.WaitGroup
|
2020-01-31 22:48:58 +08:00
|
|
|
scheduler *scheduler
|
|
|
|
processor *processor
|
|
|
|
syncer *syncer
|
|
|
|
heartbeater *heartbeater
|
2020-02-13 09:12:09 +08:00
|
|
|
subscriber *subscriber
|
2020-06-21 22:05:57 +08:00
|
|
|
recoverer *recoverer
|
2019-11-24 07:22:43 +08:00
|
|
|
}
|
|
|
|
|
2020-04-12 23:16:42 +08:00
|
|
|
// Config specifies the server's background-task processing behavior.
|
2019-12-30 08:55:51 +08:00
|
|
|
type Config struct {
|
2020-01-17 11:50:45 +08:00
|
|
|
// Maximum number of concurrent processing of tasks.
|
2019-12-30 08:55:51 +08:00
|
|
|
//
|
2020-04-19 03:44:52 +08:00
|
|
|
// If set to a zero or negative value, NewServer will overwrite the value
|
|
|
|
// to the number of CPUs usable by the currennt process.
|
2019-12-30 08:55:51 +08:00
|
|
|
Concurrency int
|
|
|
|
|
2019-12-30 09:43:19 +08:00
|
|
|
// Function to calculate retry delay for a failed task.
|
|
|
|
//
|
|
|
|
// By default, it uses exponential backoff algorithm to calculate the delay.
|
|
|
|
//
|
|
|
|
// n is the number of times the task has been retried.
|
|
|
|
// e is the error returned by the task handler.
|
2020-01-05 05:13:46 +08:00
|
|
|
// t is the task in question.
|
2019-12-30 09:43:19 +08:00
|
|
|
RetryDelayFunc func(n int, e error, t *Task) time.Duration
|
2020-01-06 23:15:59 +08:00
|
|
|
|
2020-02-13 14:23:25 +08:00
|
|
|
// List of queues to process with given priority value. Keys are the names of the
|
|
|
|
// queues and values are associated priority value.
|
2020-01-06 23:15:59 +08:00
|
|
|
//
|
2020-04-12 23:16:42 +08:00
|
|
|
// If set to nil or not specified, the server will process only the "default" queue.
|
2020-01-06 23:15:59 +08:00
|
|
|
//
|
2020-01-08 13:53:38 +08:00
|
|
|
// Priority is treated as follows to avoid starving low priority queues.
|
|
|
|
//
|
2020-01-06 23:15:59 +08:00
|
|
|
// Example:
|
2020-02-13 14:23:25 +08:00
|
|
|
// Queues: map[string]int{
|
2020-01-06 23:15:59 +08:00
|
|
|
// "critical": 6,
|
|
|
|
// "default": 3,
|
|
|
|
// "low": 1,
|
|
|
|
// }
|
2020-01-08 13:53:38 +08:00
|
|
|
// With the above config and given that all queues are not empty, the tasks
|
2020-01-06 23:15:59 +08:00
|
|
|
// in "critical", "default", "low" should be processed 60%, 30%, 10% of
|
|
|
|
// the time respectively.
|
2020-02-13 14:23:25 +08:00
|
|
|
//
|
|
|
|
// If a queue has a zero or negative priority value, the queue will be ignored.
|
|
|
|
Queues map[string]int
|
2020-01-12 23:46:51 +08:00
|
|
|
|
|
|
|
// StrictPriority indicates whether the queue priority should be treated strictly.
|
|
|
|
//
|
|
|
|
// If set to true, tasks in the queue with the highest priority is processed first.
|
|
|
|
// The tasks in lower priority queues are processed only when those queues with
|
|
|
|
// higher priorities are empty.
|
|
|
|
StrictPriority bool
|
2020-02-28 23:26:46 +08:00
|
|
|
|
|
|
|
// ErrorHandler handles errors returned by the task handler.
|
|
|
|
//
|
|
|
|
// HandleError is invoked only if the task handler returns a non-nil error.
|
|
|
|
//
|
|
|
|
// Example:
|
|
|
|
// func reportError(task *asynq.Task, err error, retried, maxRetry int) {
|
|
|
|
// if retried >= maxRetry {
|
|
|
|
// err = fmt.Errorf("retry exhausted for task %s: %w", task.Type, err)
|
|
|
|
// }
|
|
|
|
// errorReportingService.Notify(err)
|
|
|
|
// })
|
|
|
|
//
|
|
|
|
// ErrorHandler: asynq.ErrorHandlerFunc(reportError)
|
|
|
|
ErrorHandler ErrorHandler
|
2020-03-12 22:31:10 +08:00
|
|
|
|
2020-04-12 23:16:42 +08:00
|
|
|
// Logger specifies the logger used by the server instance.
|
2020-03-12 22:31:10 +08:00
|
|
|
//
|
|
|
|
// If unset, default logger is used.
|
|
|
|
Logger Logger
|
2020-04-15 22:15:01 +08:00
|
|
|
|
2020-05-08 12:28:06 +08:00
|
|
|
// LogLevel specifies the minimum log level to enable.
|
|
|
|
//
|
2020-05-11 21:55:04 +08:00
|
|
|
// If unset, InfoLevel is used by default.
|
2020-05-08 12:28:06 +08:00
|
|
|
LogLevel LogLevel
|
|
|
|
|
2020-04-15 22:15:01 +08:00
|
|
|
// ShutdownTimeout specifies the duration to wait to let workers finish their tasks
|
|
|
|
// before forcing them to abort when stopping the server.
|
|
|
|
//
|
|
|
|
// If unset or zero, default timeout of 8 seconds is used.
|
|
|
|
ShutdownTimeout time.Duration
|
2020-02-28 23:26:46 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// An ErrorHandler handles errors returned by the task handler.
|
|
|
|
type ErrorHandler interface {
|
|
|
|
HandleError(task *Task, err error, retried, maxRetry int)
|
|
|
|
}
|
|
|
|
|
|
|
|
// The ErrorHandlerFunc type is an adapter to allow the use of ordinary functions as a ErrorHandler.
|
|
|
|
// If f is a function with the appropriate signature, ErrorHandlerFunc(f) is a ErrorHandler that calls f.
|
|
|
|
type ErrorHandlerFunc func(task *Task, err error, retried, maxRetry int)
|
|
|
|
|
|
|
|
// HandleError calls fn(task, err, retried, maxRetry)
|
|
|
|
func (fn ErrorHandlerFunc) HandleError(task *Task, err error, retried, maxRetry int) {
|
|
|
|
fn(task, err, retried, maxRetry)
|
2019-12-30 09:43:19 +08:00
|
|
|
}
|
|
|
|
|
2020-05-10 01:59:50 +08:00
|
|
|
// Logger supports logging at various log levels.
|
2020-03-12 22:31:10 +08:00
|
|
|
type Logger interface {
|
|
|
|
// Debug logs a message at Debug level.
|
2020-05-06 13:10:11 +08:00
|
|
|
Debug(args ...interface{})
|
2020-03-12 22:31:10 +08:00
|
|
|
|
|
|
|
// Info logs a message at Info level.
|
2020-05-06 13:10:11 +08:00
|
|
|
Info(args ...interface{})
|
2020-03-12 22:31:10 +08:00
|
|
|
|
|
|
|
// Warn logs a message at Warning level.
|
2020-05-06 13:10:11 +08:00
|
|
|
Warn(args ...interface{})
|
2020-03-12 22:31:10 +08:00
|
|
|
|
|
|
|
// Error logs a message at Error level.
|
2020-05-06 13:10:11 +08:00
|
|
|
Error(args ...interface{})
|
2020-03-12 22:31:10 +08:00
|
|
|
|
|
|
|
// Fatal logs a message at Fatal level
|
|
|
|
// and process will exit with status set to 1.
|
2020-05-06 13:10:11 +08:00
|
|
|
Fatal(args ...interface{})
|
2020-03-12 22:31:10 +08:00
|
|
|
}
|
|
|
|
|
2020-05-08 12:28:06 +08:00
|
|
|
// LogLevel represents logging level.
|
2020-05-10 01:59:50 +08:00
|
|
|
//
|
|
|
|
// It satisfies flag.Value interface.
|
2020-05-08 12:28:06 +08:00
|
|
|
type LogLevel int32
|
|
|
|
|
|
|
|
const (
|
2020-05-11 21:55:04 +08:00
|
|
|
// Note: reserving value zero to differentiate unspecified case.
|
|
|
|
level_unspecified LogLevel = iota
|
|
|
|
|
2020-05-08 12:28:06 +08:00
|
|
|
// DebugLevel is the lowest level of logging.
|
|
|
|
// Debug logs are intended for debugging and development purposes.
|
2020-05-11 21:55:04 +08:00
|
|
|
DebugLevel
|
2020-05-08 12:28:06 +08:00
|
|
|
|
|
|
|
// InfoLevel is used for general informational log messages.
|
|
|
|
InfoLevel
|
|
|
|
|
|
|
|
// WarnLevel is used for undesired but relatively expected events,
|
|
|
|
// which may indicate a problem.
|
|
|
|
WarnLevel
|
|
|
|
|
|
|
|
// ErrorLevel is used for undesired and unexpected events that
|
|
|
|
// the program can recover from.
|
|
|
|
ErrorLevel
|
|
|
|
|
|
|
|
// FatalLevel is used for undesired and unexpected events that
|
|
|
|
// the program cannot recover from.
|
|
|
|
FatalLevel
|
|
|
|
)
|
|
|
|
|
2020-05-10 01:59:50 +08:00
|
|
|
// String is part of the flag.Value interface.
|
|
|
|
func (l *LogLevel) String() string {
|
|
|
|
switch *l {
|
|
|
|
case DebugLevel:
|
|
|
|
return "debug"
|
|
|
|
case InfoLevel:
|
|
|
|
return "info"
|
|
|
|
case WarnLevel:
|
|
|
|
return "warn"
|
|
|
|
case ErrorLevel:
|
|
|
|
return "error"
|
|
|
|
case FatalLevel:
|
|
|
|
return "fatal"
|
|
|
|
}
|
|
|
|
panic(fmt.Sprintf("asynq: unexpected log level: %v", *l))
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set is part of the flag.Value interface.
|
|
|
|
func (l *LogLevel) Set(val string) error {
|
|
|
|
switch strings.ToLower(val) {
|
|
|
|
case "debug":
|
|
|
|
*l = DebugLevel
|
|
|
|
case "info":
|
|
|
|
*l = InfoLevel
|
|
|
|
case "warn", "warning":
|
|
|
|
*l = WarnLevel
|
|
|
|
case "error":
|
|
|
|
*l = ErrorLevel
|
|
|
|
case "fatal":
|
|
|
|
*l = FatalLevel
|
|
|
|
default:
|
|
|
|
return fmt.Errorf("asynq: unsupported log level %q", val)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-05-11 21:55:04 +08:00
|
|
|
func toInternalLogLevel(l LogLevel) log.Level {
|
|
|
|
switch l {
|
|
|
|
case DebugLevel:
|
|
|
|
return log.DebugLevel
|
|
|
|
case InfoLevel:
|
|
|
|
return log.InfoLevel
|
|
|
|
case WarnLevel:
|
|
|
|
return log.WarnLevel
|
|
|
|
case ErrorLevel:
|
|
|
|
return log.ErrorLevel
|
|
|
|
case FatalLevel:
|
|
|
|
return log.FatalLevel
|
|
|
|
}
|
|
|
|
panic(fmt.Sprintf("asynq: unexpected log level: %v", l))
|
|
|
|
}
|
|
|
|
|
2019-12-30 09:43:19 +08:00
|
|
|
// Formula taken from https://github.com/mperham/sidekiq.
|
|
|
|
func defaultDelayFunc(n int, e error, t *Task) time.Duration {
|
|
|
|
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
|
|
|
s := int(math.Pow(float64(n), 4)) + 15 + (r.Intn(30) * (n + 1))
|
|
|
|
return time.Duration(s) * time.Second
|
2019-12-30 08:55:51 +08:00
|
|
|
}
|
|
|
|
|
2020-02-13 14:23:25 +08:00
|
|
|
var defaultQueueConfig = map[string]int{
|
2020-01-06 23:15:59 +08:00
|
|
|
base.DefaultQueueName: 1,
|
|
|
|
}
|
|
|
|
|
2020-04-15 22:15:01 +08:00
|
|
|
const defaultShutdownTimeout = 8 * time.Second
|
|
|
|
|
2020-04-12 23:16:42 +08:00
|
|
|
// NewServer returns a new Server given a redis connection option
|
2019-12-30 08:55:51 +08:00
|
|
|
// and background processing configuration.
|
2020-04-12 23:16:42 +08:00
|
|
|
func NewServer(r RedisConnOpt, cfg Config) *Server {
|
2019-12-30 08:55:51 +08:00
|
|
|
n := cfg.Concurrency
|
|
|
|
if n < 1 {
|
2020-04-19 03:44:52 +08:00
|
|
|
n = runtime.NumCPU()
|
2019-12-30 08:55:51 +08:00
|
|
|
}
|
2019-12-30 09:43:19 +08:00
|
|
|
delayFunc := cfg.RetryDelayFunc
|
|
|
|
if delayFunc == nil {
|
|
|
|
delayFunc = defaultDelayFunc
|
|
|
|
}
|
2020-02-13 14:23:25 +08:00
|
|
|
queues := make(map[string]int)
|
|
|
|
for qname, p := range cfg.Queues {
|
|
|
|
if p > 0 {
|
|
|
|
queues[qname] = p
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if len(queues) == 0 {
|
2020-01-06 23:15:59 +08:00
|
|
|
queues = defaultQueueConfig
|
|
|
|
}
|
2020-04-15 22:15:01 +08:00
|
|
|
shutdownTimeout := cfg.ShutdownTimeout
|
|
|
|
if shutdownTimeout == 0 {
|
|
|
|
shutdownTimeout = defaultShutdownTimeout
|
|
|
|
}
|
2020-05-08 12:28:06 +08:00
|
|
|
logger := log.NewLogger(cfg.Logger)
|
2020-05-11 21:55:04 +08:00
|
|
|
loglevel := cfg.LogLevel
|
|
|
|
if loglevel == level_unspecified {
|
|
|
|
loglevel = InfoLevel
|
|
|
|
}
|
|
|
|
logger.SetLevel(toInternalLogLevel(loglevel))
|
2020-01-14 23:26:41 +08:00
|
|
|
|
2020-01-31 22:48:58 +08:00
|
|
|
rdb := rdb.NewRDB(createRedisClient(r))
|
2020-06-08 04:04:27 +08:00
|
|
|
starting := make(chan *base.TaskMessage)
|
|
|
|
finished := make(chan *base.TaskMessage)
|
2020-02-18 22:57:39 +08:00
|
|
|
syncCh := make(chan *syncRequest)
|
2020-05-19 11:47:35 +08:00
|
|
|
status := base.NewServerStatus(base.StatusIdle)
|
2020-02-18 22:57:39 +08:00
|
|
|
cancels := base.NewCancelations()
|
2020-05-18 03:33:55 +08:00
|
|
|
|
|
|
|
syncer := newSyncer(syncerParams{
|
|
|
|
logger: logger,
|
|
|
|
requestsCh: syncCh,
|
|
|
|
interval: 5 * time.Second,
|
|
|
|
})
|
|
|
|
heartbeater := newHeartbeater(heartbeaterParams{
|
2020-05-19 11:47:35 +08:00
|
|
|
logger: logger,
|
|
|
|
broker: rdb,
|
|
|
|
interval: 5 * time.Second,
|
|
|
|
concurrency: n,
|
|
|
|
queues: queues,
|
|
|
|
strictPriority: cfg.StrictPriority,
|
|
|
|
status: status,
|
|
|
|
starting: starting,
|
|
|
|
finished: finished,
|
2020-05-18 03:33:55 +08:00
|
|
|
})
|
|
|
|
scheduler := newScheduler(schedulerParams{
|
|
|
|
logger: logger,
|
|
|
|
broker: rdb,
|
|
|
|
interval: 5 * time.Second,
|
|
|
|
})
|
|
|
|
subscriber := newSubscriber(subscriberParams{
|
|
|
|
logger: logger,
|
|
|
|
broker: rdb,
|
|
|
|
cancelations: cancels,
|
|
|
|
})
|
|
|
|
processor := newProcessor(processorParams{
|
2020-04-15 22:15:01 +08:00
|
|
|
logger: logger,
|
2020-04-17 21:56:44 +08:00
|
|
|
broker: rdb,
|
2020-04-15 22:15:01 +08:00
|
|
|
retryDelayFunc: delayFunc,
|
|
|
|
syncCh: syncCh,
|
|
|
|
cancelations: cancels,
|
2020-05-19 11:47:35 +08:00
|
|
|
concurrency: n,
|
|
|
|
queues: queues,
|
|
|
|
strictPriority: cfg.StrictPriority,
|
2020-04-15 22:15:01 +08:00
|
|
|
errHandler: cfg.ErrorHandler,
|
|
|
|
shutdownTimeout: shutdownTimeout,
|
2020-05-19 11:47:35 +08:00
|
|
|
starting: starting,
|
|
|
|
finished: finished,
|
2020-04-15 22:15:01 +08:00
|
|
|
})
|
2020-06-21 22:05:57 +08:00
|
|
|
recoverer := newRecoverer(recovererParams{
|
|
|
|
logger: logger,
|
|
|
|
broker: rdb,
|
|
|
|
retryDelayFunc: delayFunc,
|
|
|
|
interval: 1 * time.Minute,
|
|
|
|
})
|
2020-04-12 23:16:42 +08:00
|
|
|
return &Server{
|
2020-03-09 22:11:16 +08:00
|
|
|
logger: logger,
|
2020-04-17 21:56:44 +08:00
|
|
|
broker: rdb,
|
2020-05-19 11:47:35 +08:00
|
|
|
status: status,
|
2020-01-31 22:48:58 +08:00
|
|
|
scheduler: scheduler,
|
|
|
|
processor: processor,
|
|
|
|
syncer: syncer,
|
|
|
|
heartbeater: heartbeater,
|
2020-02-13 09:12:09 +08:00
|
|
|
subscriber: subscriber,
|
2020-06-21 22:05:57 +08:00
|
|
|
recoverer: recoverer,
|
2019-11-24 07:22:43 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-28 23:26:46 +08:00
|
|
|
// A Handler processes tasks.
|
2019-12-03 12:42:21 +08:00
|
|
|
//
|
|
|
|
// ProcessTask should return nil if the processing of a task
|
|
|
|
// is successful.
|
|
|
|
//
|
|
|
|
// If ProcessTask return a non-nil error or panics, the task
|
|
|
|
// will be retried after delay.
|
|
|
|
type Handler interface {
|
2020-02-11 23:06:52 +08:00
|
|
|
ProcessTask(context.Context, *Task) error
|
2019-12-03 12:42:21 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// The HandlerFunc type is an adapter to allow the use of
|
|
|
|
// ordinary functions as a Handler. If f is a function
|
|
|
|
// with the appropriate signature, HandlerFunc(f) is a
|
|
|
|
// Handler that calls f.
|
2020-02-11 23:06:52 +08:00
|
|
|
type HandlerFunc func(context.Context, *Task) error
|
2019-12-03 12:42:21 +08:00
|
|
|
|
2020-02-11 23:06:52 +08:00
|
|
|
// ProcessTask calls fn(ctx, task)
|
|
|
|
func (fn HandlerFunc) ProcessTask(ctx context.Context, task *Task) error {
|
|
|
|
return fn(ctx, task)
|
2019-12-03 12:42:21 +08:00
|
|
|
}
|
2019-11-24 07:22:43 +08:00
|
|
|
|
2020-04-13 02:00:45 +08:00
|
|
|
// ErrServerStopped indicates that the operation is now illegal because of the server being stopped.
|
|
|
|
var ErrServerStopped = errors.New("asynq: the server has been stopped")
|
|
|
|
|
2019-11-24 07:44:42 +08:00
|
|
|
// Run starts the background-task processing and blocks until
|
|
|
|
// an os signal to exit the program is received. Once it receives
|
2020-04-13 23:14:55 +08:00
|
|
|
// a signal, it gracefully shuts down all active workers and other
|
2019-11-24 07:44:42 +08:00
|
|
|
// goroutines to process the tasks.
|
2020-04-13 23:14:55 +08:00
|
|
|
//
|
2020-04-15 22:30:59 +08:00
|
|
|
// Run returns any error encountered during server startup time.
|
2020-04-13 23:14:55 +08:00
|
|
|
// If the server has already been stopped, ErrServerStopped is returned.
|
2020-04-13 02:00:45 +08:00
|
|
|
func (srv *Server) Run(handler Handler) error {
|
|
|
|
if err := srv.Start(handler); err != nil {
|
|
|
|
return err
|
2020-03-12 22:31:10 +08:00
|
|
|
}
|
2020-04-12 23:16:42 +08:00
|
|
|
srv.waitForSignals()
|
2020-04-13 02:00:45 +08:00
|
|
|
srv.Stop()
|
|
|
|
return nil
|
2019-11-24 07:44:42 +08:00
|
|
|
}
|
|
|
|
|
2020-04-13 23:14:55 +08:00
|
|
|
// Start starts the worker server. Once the server has started,
|
|
|
|
// it pulls tasks off queues and starts a worker goroutine for each task.
|
|
|
|
// Tasks are processed concurrently by the workers up to the number of
|
|
|
|
// concurrency specified at the initialization time.
|
|
|
|
//
|
2020-04-15 22:30:59 +08:00
|
|
|
// Start returns any error encountered during server startup time.
|
2020-04-13 23:14:55 +08:00
|
|
|
// If the server has already been stopped, ErrServerStopped is returned.
|
2020-04-13 02:00:45 +08:00
|
|
|
func (srv *Server) Start(handler Handler) error {
|
2020-04-15 00:01:22 +08:00
|
|
|
if handler == nil {
|
|
|
|
return fmt.Errorf("asynq: server cannot run with nil handler")
|
|
|
|
}
|
2020-05-19 11:47:35 +08:00
|
|
|
switch srv.status.Get() {
|
2020-04-13 02:41:50 +08:00
|
|
|
case base.StatusRunning:
|
2020-04-13 02:00:45 +08:00
|
|
|
return fmt.Errorf("asynq: the server is already running")
|
2020-04-13 02:41:50 +08:00
|
|
|
case base.StatusStopped:
|
2020-04-13 02:00:45 +08:00
|
|
|
return ErrServerStopped
|
2019-11-24 07:22:43 +08:00
|
|
|
}
|
2020-05-19 11:47:35 +08:00
|
|
|
srv.status.Set(base.StatusRunning)
|
2020-04-12 23:16:42 +08:00
|
|
|
srv.processor.handler = handler
|
2019-11-24 07:22:43 +08:00
|
|
|
|
2020-04-13 02:00:45 +08:00
|
|
|
srv.logger.Info("Starting processing")
|
|
|
|
|
2020-04-12 23:16:42 +08:00
|
|
|
srv.heartbeater.start(&srv.wg)
|
|
|
|
srv.subscriber.start(&srv.wg)
|
|
|
|
srv.syncer.start(&srv.wg)
|
2020-06-21 22:05:57 +08:00
|
|
|
srv.recoverer.start(&srv.wg)
|
2020-04-12 23:16:42 +08:00
|
|
|
srv.scheduler.start(&srv.wg)
|
|
|
|
srv.processor.start(&srv.wg)
|
2020-04-13 02:00:45 +08:00
|
|
|
return nil
|
2019-11-24 07:22:43 +08:00
|
|
|
}
|
|
|
|
|
2020-04-13 23:14:55 +08:00
|
|
|
// Stop stops the worker server.
|
|
|
|
// It gracefully closes all active workers. The server will wait for
|
2020-04-15 22:15:01 +08:00
|
|
|
// active workers to finish processing tasks for duration specified in Config.ShutdownTimeout.
|
|
|
|
// If worker didn't finish processing a task during the timeout, the task will be pushed back to Redis.
|
2020-04-13 02:00:45 +08:00
|
|
|
func (srv *Server) Stop() {
|
2020-05-19 11:47:35 +08:00
|
|
|
switch srv.status.Get() {
|
2020-04-13 07:42:11 +08:00
|
|
|
case base.StatusIdle, base.StatusStopped:
|
2020-04-13 02:00:45 +08:00
|
|
|
// server is not running, do nothing and return.
|
2019-11-24 07:22:43 +08:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-04-13 02:00:45 +08:00
|
|
|
srv.logger.Info("Starting graceful shutdown")
|
2020-02-17 06:42:21 +08:00
|
|
|
// Note: The order of termination is important.
|
|
|
|
// Sender goroutines should be terminated before the receiver goroutines.
|
2020-02-18 22:57:39 +08:00
|
|
|
// processor -> syncer (via syncCh)
|
2020-05-19 11:47:35 +08:00
|
|
|
// processor -> heartbeater (via starting, finished channels)
|
2020-04-12 23:16:42 +08:00
|
|
|
srv.scheduler.terminate()
|
|
|
|
srv.processor.terminate()
|
2020-06-21 22:05:57 +08:00
|
|
|
srv.recoverer.terminate()
|
2020-04-12 23:16:42 +08:00
|
|
|
srv.syncer.terminate()
|
|
|
|
srv.subscriber.terminate()
|
|
|
|
srv.heartbeater.terminate()
|
2019-11-24 09:09:57 +08:00
|
|
|
|
2020-04-12 23:16:42 +08:00
|
|
|
srv.wg.Wait()
|
2020-02-16 15:14:30 +08:00
|
|
|
|
2020-04-17 21:56:44 +08:00
|
|
|
srv.broker.Close()
|
2020-05-19 11:47:35 +08:00
|
|
|
srv.status.Set(base.StatusStopped)
|
2020-01-21 23:17:41 +08:00
|
|
|
|
2020-05-11 22:02:26 +08:00
|
|
|
srv.logger.Info("Exiting")
|
2019-11-24 07:22:43 +08:00
|
|
|
}
|
2020-04-13 02:00:45 +08:00
|
|
|
|
2020-04-13 23:14:55 +08:00
|
|
|
// Quiet signals the server to stop pulling new tasks off queues.
|
|
|
|
// Quiet should be used before stopping the server.
|
2020-04-13 02:00:45 +08:00
|
|
|
func (srv *Server) Quiet() {
|
2020-05-16 22:12:08 +08:00
|
|
|
srv.logger.Info("Stopping processor")
|
2020-04-13 02:00:45 +08:00
|
|
|
srv.processor.stop()
|
2020-05-19 11:47:35 +08:00
|
|
|
srv.status.Set(base.StatusQuiet)
|
2020-05-16 22:12:08 +08:00
|
|
|
srv.logger.Info("Processor stopped")
|
2020-04-13 02:00:45 +08:00
|
|
|
}
|