2019-11-15 13:07:19 +08:00
|
|
|
package asynq
|
|
|
|
|
2019-11-17 06:45:51 +08:00
|
|
|
/*
|
|
|
|
TODOs:
|
2019-11-18 13:13:41 +08:00
|
|
|
- [P0] Write tests
|
2019-11-18 07:36:33 +08:00
|
|
|
- [P0] Shutdown all workers gracefully when the process gets killed
|
2019-11-17 06:45:51 +08:00
|
|
|
- [P1] Add Support for multiple queues
|
2019-11-18 07:36:33 +08:00
|
|
|
- [P1] User defined max-retry count
|
|
|
|
- [P2] Web UI
|
2019-11-17 06:45:51 +08:00
|
|
|
*/
|
|
|
|
|
2019-11-15 13:07:19 +08:00
|
|
|
import (
|
|
|
|
"encoding/json"
|
2019-11-15 23:21:25 +08:00
|
|
|
"fmt"
|
|
|
|
"log"
|
2019-11-18 10:44:40 +08:00
|
|
|
"math"
|
|
|
|
"math/rand"
|
2019-11-15 23:21:25 +08:00
|
|
|
"strconv"
|
2019-11-15 13:07:19 +08:00
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/go-redis/redis/v7"
|
|
|
|
)
|
|
|
|
|
|
|
|
// Redis keys
|
|
|
|
const (
|
2019-11-18 07:36:33 +08:00
|
|
|
queuePrefix = "asynq:queues:" // LIST
|
|
|
|
allQueues = "asynq:queues" // SET
|
|
|
|
scheduled = "asynq:scheduled" // ZSET
|
|
|
|
retry = "asynq:retry" // ZSET
|
|
|
|
dead = "asynq:dead" // ZSET
|
2019-11-15 13:07:19 +08:00
|
|
|
)
|
|
|
|
|
2019-11-18 05:25:01 +08:00
|
|
|
// Max retry count by default
|
|
|
|
const defaultMaxRetry = 25
|
|
|
|
|
2019-11-15 13:07:19 +08:00
|
|
|
// Client is an interface for scheduling tasks.
|
|
|
|
type Client struct {
|
|
|
|
rdb *redis.Client
|
|
|
|
}
|
|
|
|
|
|
|
|
// Task represents a task to be performed.
|
|
|
|
type Task struct {
|
2019-11-17 06:45:51 +08:00
|
|
|
// Type indicates the kind of the task to be performed.
|
|
|
|
Type string
|
|
|
|
|
|
|
|
// Payload is an arbitrary data needed for task execution.
|
|
|
|
// The value has to be serializable.
|
|
|
|
Payload map[string]interface{}
|
2019-11-15 13:07:19 +08:00
|
|
|
}
|
|
|
|
|
2019-11-18 05:25:01 +08:00
|
|
|
// taskMessage is an internal representation of a task with additional metadata fields.
|
|
|
|
// This data gets written in redis.
|
|
|
|
type taskMessage struct {
|
|
|
|
// fields from type Task
|
|
|
|
Type string
|
|
|
|
Payload map[string]interface{}
|
|
|
|
|
|
|
|
//------- metadata fields ----------
|
|
|
|
// queue name this message should be enqueued to
|
2019-11-15 13:07:19 +08:00
|
|
|
Queue string
|
2019-11-18 05:25:01 +08:00
|
|
|
|
2019-11-18 10:44:40 +08:00
|
|
|
// max number of retry for this task.
|
2019-11-18 05:25:01 +08:00
|
|
|
Retry int
|
|
|
|
|
|
|
|
// number of times we've retried so far
|
|
|
|
Retried int
|
|
|
|
|
|
|
|
// error message from the last failure
|
|
|
|
ErrorMsg string
|
2019-11-15 13:07:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// RedisOpt specifies redis options.
|
|
|
|
type RedisOpt struct {
|
|
|
|
Addr string
|
|
|
|
Password string
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewClient creates and returns a new client.
|
|
|
|
func NewClient(opt *RedisOpt) *Client {
|
|
|
|
rdb := redis.NewClient(&redis.Options{Addr: opt.Addr, Password: opt.Password})
|
|
|
|
return &Client{rdb: rdb}
|
|
|
|
}
|
|
|
|
|
2019-11-17 06:45:51 +08:00
|
|
|
// Process enqueues the task to be performed at a given time.
|
|
|
|
func (c *Client) Process(task *Task, executeAt time.Time) error {
|
2019-11-18 05:25:01 +08:00
|
|
|
msg := &taskMessage{
|
|
|
|
Type: task.Type,
|
|
|
|
Payload: task.Payload,
|
2019-11-18 07:36:33 +08:00
|
|
|
Queue: "default",
|
2019-11-18 05:25:01 +08:00
|
|
|
Retry: defaultMaxRetry,
|
2019-11-15 13:07:19 +08:00
|
|
|
}
|
2019-11-18 07:36:33 +08:00
|
|
|
return c.enqueue(msg, executeAt)
|
|
|
|
}
|
|
|
|
|
|
|
|
// enqueue pushes a given task to the specified queue.
|
|
|
|
func (c *Client) enqueue(msg *taskMessage, executeAt time.Time) error {
|
2019-11-18 05:25:01 +08:00
|
|
|
if time.Now().After(executeAt) {
|
|
|
|
return push(c.rdb, msg)
|
2019-11-15 13:07:19 +08:00
|
|
|
}
|
2019-11-18 07:36:33 +08:00
|
|
|
return zadd(c.rdb, scheduled, float64(executeAt.Unix()), msg)
|
2019-11-15 13:07:19 +08:00
|
|
|
}
|
2019-11-15 23:21:25 +08:00
|
|
|
|
|
|
|
//-------------------- Workers --------------------
|
|
|
|
|
|
|
|
// Workers represents a pool of workers.
|
|
|
|
type Workers struct {
|
|
|
|
rdb *redis.Client
|
|
|
|
|
|
|
|
// poolTokens is a counting semaphore to ensure the number of active workers
|
|
|
|
// does not exceed the limit.
|
|
|
|
poolTokens chan struct{}
|
2019-11-18 13:21:32 +08:00
|
|
|
|
|
|
|
// running indicates whether the workes are currently running.
|
|
|
|
running bool
|
2019-11-19 13:23:49 +08:00
|
|
|
|
|
|
|
poller *poller
|
2019-11-15 23:21:25 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// NewWorkers creates and returns a new Workers.
|
|
|
|
func NewWorkers(poolSize int, opt *RedisOpt) *Workers {
|
|
|
|
rdb := redis.NewClient(&redis.Options{Addr: opt.Addr, Password: opt.Password})
|
2019-11-19 13:23:49 +08:00
|
|
|
poller := &poller{
|
|
|
|
rdb: rdb,
|
|
|
|
done: make(chan struct{}),
|
|
|
|
avgInterval: 5 * time.Second,
|
|
|
|
zsets: []string{scheduled, retry},
|
|
|
|
}
|
2019-11-15 23:21:25 +08:00
|
|
|
return &Workers{
|
|
|
|
rdb: rdb,
|
2019-11-19 13:23:49 +08:00
|
|
|
poller: poller,
|
2019-11-15 23:21:25 +08:00
|
|
|
poolTokens: make(chan struct{}, poolSize),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-17 06:45:51 +08:00
|
|
|
// TaskHandler handles a given task and report any error.
|
|
|
|
type TaskHandler func(*Task) error
|
2019-11-15 23:21:25 +08:00
|
|
|
|
2019-11-17 06:45:51 +08:00
|
|
|
// Run starts the workers and scheduler with a given handler.
|
|
|
|
func (w *Workers) Run(handler TaskHandler) {
|
2019-11-18 13:21:32 +08:00
|
|
|
if w.running {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
w.running = true
|
|
|
|
|
2019-11-19 13:23:49 +08:00
|
|
|
w.poller.start()
|
2019-11-15 23:21:25 +08:00
|
|
|
|
|
|
|
for {
|
|
|
|
// pull message out of the queue and process it
|
2019-11-17 00:20:23 +08:00
|
|
|
// TODO(hibiken): sort the list of queues in order of priority
|
2019-11-18 07:36:33 +08:00
|
|
|
res, err := w.rdb.BLPop(5*time.Second, listQueues(w.rdb)...).Result() // NOTE: BLPOP needs to time out because if case a new queue is added.
|
2019-11-15 23:21:25 +08:00
|
|
|
if err != nil {
|
|
|
|
if err != redis.Nil {
|
2019-11-17 00:20:23 +08:00
|
|
|
log.Printf("BLPOP command failed: %v\n", err)
|
2019-11-15 23:21:25 +08:00
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2019-11-18 05:25:01 +08:00
|
|
|
q, data := res[0], res[1]
|
|
|
|
fmt.Printf("perform task %v from %s\n", data, q)
|
|
|
|
var msg taskMessage
|
|
|
|
err = json.Unmarshal([]byte(data), &msg)
|
2019-11-17 06:45:51 +08:00
|
|
|
if err != nil {
|
2019-11-18 05:25:01 +08:00
|
|
|
log.Printf("[Servere Error] could not parse json encoded message %s: %v", data, err)
|
2019-11-15 23:21:25 +08:00
|
|
|
continue
|
|
|
|
}
|
2019-11-18 05:25:01 +08:00
|
|
|
t := &Task{Type: msg.Type, Payload: msg.Payload}
|
2019-11-18 23:42:26 +08:00
|
|
|
w.poolTokens <- struct{}{} // acquire token
|
2019-11-17 06:45:51 +08:00
|
|
|
go func(task *Task) {
|
2019-11-18 23:42:26 +08:00
|
|
|
defer func() { <-w.poolTokens }() // release token
|
2019-11-18 05:25:01 +08:00
|
|
|
err := handler(task)
|
|
|
|
if err != nil {
|
2019-11-18 10:44:40 +08:00
|
|
|
if msg.Retried >= msg.Retry {
|
2019-11-18 13:13:41 +08:00
|
|
|
fmt.Println("Retry exhausted!!!")
|
|
|
|
if err := kill(w.rdb, &msg); err != nil {
|
|
|
|
log.Printf("[SERVER ERROR] could not add task %+v to 'dead' set\n", err)
|
|
|
|
}
|
2019-11-18 07:36:33 +08:00
|
|
|
return
|
|
|
|
}
|
2019-11-18 05:25:01 +08:00
|
|
|
fmt.Println("RETRY!!!")
|
2019-11-18 10:44:40 +08:00
|
|
|
retryAt := time.Now().Add(delaySeconds((msg.Retried)))
|
|
|
|
fmt.Printf("[DEBUG] retying the task in %v\n", retryAt.Sub(time.Now()))
|
|
|
|
msg.Retried++
|
2019-11-18 07:36:33 +08:00
|
|
|
msg.ErrorMsg = err.Error()
|
2019-11-18 10:44:40 +08:00
|
|
|
if err := zadd(w.rdb, retry, float64(retryAt.Unix()), &msg); err != nil {
|
2019-11-18 07:36:33 +08:00
|
|
|
// TODO(hibiken): Not sure how to handle this error
|
|
|
|
log.Printf("[SEVERE ERROR] could not add msg %+v to 'retry' set: %v\n", msg, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-11-18 05:25:01 +08:00
|
|
|
}
|
|
|
|
}(t)
|
2019-11-15 23:21:25 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-17 00:20:23 +08:00
|
|
|
// push pushes the task to the specified queue to get picked up by a worker.
|
2019-11-18 05:25:01 +08:00
|
|
|
func push(rdb *redis.Client, msg *taskMessage) error {
|
|
|
|
bytes, err := json.Marshal(msg)
|
2019-11-16 23:51:53 +08:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("could not encode task into JSON: %v", err)
|
|
|
|
}
|
2019-11-18 05:25:01 +08:00
|
|
|
qname := queuePrefix + msg.Queue
|
2019-11-17 00:20:23 +08:00
|
|
|
err = rdb.SAdd(allQueues, qname).Err()
|
2019-11-16 23:51:53 +08:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("could not execute command SADD %q %q: %v",
|
2019-11-17 00:20:23 +08:00
|
|
|
allQueues, qname, err)
|
2019-11-16 23:51:53 +08:00
|
|
|
}
|
2019-11-17 00:20:23 +08:00
|
|
|
return rdb.RPush(qname, string(bytes)).Err()
|
|
|
|
}
|
|
|
|
|
2019-11-18 07:36:33 +08:00
|
|
|
// zadd serializes the given message and adds to the specified sorted set.
|
|
|
|
func zadd(rdb *redis.Client, zset string, zscore float64, msg *taskMessage) error {
|
|
|
|
bytes, err := json.Marshal(msg)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("could not encode task into JSON: %v", err)
|
|
|
|
}
|
|
|
|
return rdb.ZAdd(zset, &redis.Z{Member: string(bytes), Score: zscore}).Err()
|
|
|
|
}
|
|
|
|
|
2019-11-18 13:13:41 +08:00
|
|
|
const maxDeadTask = 100
|
|
|
|
const deadExpirationInDays = 90
|
|
|
|
|
|
|
|
// kill sends the task to "dead" sorted set. It also trim the sorted set by
|
|
|
|
// timestamp and set size.
|
|
|
|
func kill(rdb *redis.Client, msg *taskMessage) error {
|
|
|
|
bytes, err := json.Marshal(msg)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("could not encode task into JSON: %v", err)
|
|
|
|
}
|
|
|
|
now := time.Now()
|
|
|
|
pipe := rdb.Pipeline()
|
|
|
|
pipe.ZAdd(dead, &redis.Z{Member: string(bytes), Score: float64(now.Unix())})
|
|
|
|
limit := now.AddDate(0, 0, -deadExpirationInDays).Unix() // 90 days ago
|
|
|
|
pipe.ZRemRangeByScore(dead, "-inf", strconv.Itoa(int(limit)))
|
|
|
|
pipe.ZRemRangeByRank(dead, 0, -maxDeadTask) // trim the set to 100
|
|
|
|
_, err = pipe.Exec()
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-11-17 00:20:23 +08:00
|
|
|
// listQueues returns the list of all queues.
|
|
|
|
func listQueues(rdb *redis.Client) []string {
|
|
|
|
return rdb.SMembers(allQueues).Val()
|
2019-11-16 23:51:53 +08:00
|
|
|
}
|
2019-11-18 10:44:40 +08:00
|
|
|
|
|
|
|
// delaySeconds returns a number seconds to delay before retrying.
|
|
|
|
// Formula taken from https://github.com/mperham/sidekiq.
|
|
|
|
func delaySeconds(count int) time.Duration {
|
|
|
|
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
|
|
|
s := int(math.Pow(float64(count), 4)) + 15 + (r.Intn(30) * (count + 1))
|
|
|
|
return time.Duration(s) * time.Second
|
|
|
|
}
|