2019-11-15 13:07:19 +08:00
|
|
|
package asynq
|
|
|
|
|
2019-11-17 06:45:51 +08:00
|
|
|
/*
|
|
|
|
TODOs:
|
|
|
|
- [P0] Task error handling
|
|
|
|
- [P0] Retry
|
|
|
|
- [P0] Dead task (retry exausted)
|
2019-11-18 07:36:33 +08:00
|
|
|
- [P0] Shutdown all workers gracefully when the process gets killed
|
2019-11-17 06:45:51 +08:00
|
|
|
- [P1] Add Support for multiple queues
|
2019-11-18 07:36:33 +08:00
|
|
|
- [P1] User defined max-retry count
|
|
|
|
- [P2] Web UI
|
2019-11-17 06:45:51 +08:00
|
|
|
*/
|
|
|
|
|
2019-11-15 13:07:19 +08:00
|
|
|
import (
|
|
|
|
"encoding/json"
|
2019-11-15 23:21:25 +08:00
|
|
|
"fmt"
|
|
|
|
"log"
|
|
|
|
"strconv"
|
2019-11-15 13:07:19 +08:00
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/go-redis/redis/v7"
|
|
|
|
)
|
|
|
|
|
|
|
|
// Redis keys
|
|
|
|
const (
|
2019-11-18 07:36:33 +08:00
|
|
|
queuePrefix = "asynq:queues:" // LIST
|
|
|
|
allQueues = "asynq:queues" // SET
|
|
|
|
scheduled = "asynq:scheduled" // ZSET
|
|
|
|
retry = "asynq:retry" // ZSET
|
|
|
|
dead = "asynq:dead" // ZSET
|
2019-11-15 13:07:19 +08:00
|
|
|
)
|
|
|
|
|
2019-11-18 05:25:01 +08:00
|
|
|
// Max retry count by default
|
|
|
|
const defaultMaxRetry = 25
|
|
|
|
|
2019-11-15 13:07:19 +08:00
|
|
|
// Client is an interface for scheduling tasks.
|
|
|
|
type Client struct {
|
|
|
|
rdb *redis.Client
|
|
|
|
}
|
|
|
|
|
|
|
|
// Task represents a task to be performed.
|
|
|
|
type Task struct {
|
2019-11-17 06:45:51 +08:00
|
|
|
// Type indicates the kind of the task to be performed.
|
|
|
|
Type string
|
|
|
|
|
|
|
|
// Payload is an arbitrary data needed for task execution.
|
|
|
|
// The value has to be serializable.
|
|
|
|
Payload map[string]interface{}
|
2019-11-15 13:07:19 +08:00
|
|
|
}
|
|
|
|
|
2019-11-18 05:25:01 +08:00
|
|
|
// taskMessage is an internal representation of a task with additional metadata fields.
|
|
|
|
// This data gets written in redis.
|
|
|
|
type taskMessage struct {
|
|
|
|
// fields from type Task
|
|
|
|
Type string
|
|
|
|
Payload map[string]interface{}
|
|
|
|
|
|
|
|
//------- metadata fields ----------
|
|
|
|
// queue name this message should be enqueued to
|
2019-11-15 13:07:19 +08:00
|
|
|
Queue string
|
2019-11-18 05:25:01 +08:00
|
|
|
|
|
|
|
// remainig retry count
|
|
|
|
Retry int
|
|
|
|
|
|
|
|
// number of times we've retried so far
|
|
|
|
Retried int
|
|
|
|
|
|
|
|
// error message from the last failure
|
|
|
|
ErrorMsg string
|
2019-11-15 13:07:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// RedisOpt specifies redis options.
|
|
|
|
type RedisOpt struct {
|
|
|
|
Addr string
|
|
|
|
Password string
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewClient creates and returns a new client.
|
|
|
|
func NewClient(opt *RedisOpt) *Client {
|
|
|
|
rdb := redis.NewClient(&redis.Options{Addr: opt.Addr, Password: opt.Password})
|
|
|
|
return &Client{rdb: rdb}
|
|
|
|
}
|
|
|
|
|
2019-11-17 06:45:51 +08:00
|
|
|
// Process enqueues the task to be performed at a given time.
|
|
|
|
func (c *Client) Process(task *Task, executeAt time.Time) error {
|
2019-11-18 05:25:01 +08:00
|
|
|
msg := &taskMessage{
|
|
|
|
Type: task.Type,
|
|
|
|
Payload: task.Payload,
|
2019-11-18 07:36:33 +08:00
|
|
|
Queue: "default",
|
2019-11-18 05:25:01 +08:00
|
|
|
Retry: defaultMaxRetry,
|
2019-11-15 13:07:19 +08:00
|
|
|
}
|
2019-11-18 07:36:33 +08:00
|
|
|
return c.enqueue(msg, executeAt)
|
|
|
|
}
|
|
|
|
|
|
|
|
// enqueue pushes a given task to the specified queue.
|
|
|
|
func (c *Client) enqueue(msg *taskMessage, executeAt time.Time) error {
|
2019-11-18 05:25:01 +08:00
|
|
|
if time.Now().After(executeAt) {
|
|
|
|
return push(c.rdb, msg)
|
2019-11-15 13:07:19 +08:00
|
|
|
}
|
2019-11-18 07:36:33 +08:00
|
|
|
return zadd(c.rdb, scheduled, float64(executeAt.Unix()), msg)
|
2019-11-15 13:07:19 +08:00
|
|
|
}
|
2019-11-15 23:21:25 +08:00
|
|
|
|
|
|
|
//-------------------- Workers --------------------
|
|
|
|
|
|
|
|
// Workers represents a pool of workers.
|
|
|
|
type Workers struct {
|
|
|
|
rdb *redis.Client
|
|
|
|
|
|
|
|
// poolTokens is a counting semaphore to ensure the number of active workers
|
|
|
|
// does not exceed the limit.
|
|
|
|
poolTokens chan struct{}
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewWorkers creates and returns a new Workers.
|
|
|
|
func NewWorkers(poolSize int, opt *RedisOpt) *Workers {
|
|
|
|
rdb := redis.NewClient(&redis.Options{Addr: opt.Addr, Password: opt.Password})
|
|
|
|
return &Workers{
|
|
|
|
rdb: rdb,
|
|
|
|
poolTokens: make(chan struct{}, poolSize),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-17 06:45:51 +08:00
|
|
|
// TaskHandler handles a given task and report any error.
|
|
|
|
type TaskHandler func(*Task) error
|
2019-11-15 23:21:25 +08:00
|
|
|
|
2019-11-17 06:45:51 +08:00
|
|
|
// Run starts the workers and scheduler with a given handler.
|
|
|
|
func (w *Workers) Run(handler TaskHandler) {
|
2019-11-18 07:36:33 +08:00
|
|
|
go w.pollDeferred()
|
2019-11-15 23:21:25 +08:00
|
|
|
|
|
|
|
for {
|
|
|
|
// pull message out of the queue and process it
|
2019-11-17 00:20:23 +08:00
|
|
|
// TODO(hibiken): sort the list of queues in order of priority
|
2019-11-18 07:36:33 +08:00
|
|
|
res, err := w.rdb.BLPop(5*time.Second, listQueues(w.rdb)...).Result() // NOTE: BLPOP needs to time out because if case a new queue is added.
|
2019-11-15 23:21:25 +08:00
|
|
|
if err != nil {
|
|
|
|
if err != redis.Nil {
|
2019-11-17 00:20:23 +08:00
|
|
|
log.Printf("BLPOP command failed: %v\n", err)
|
2019-11-15 23:21:25 +08:00
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2019-11-18 05:25:01 +08:00
|
|
|
q, data := res[0], res[1]
|
|
|
|
fmt.Printf("perform task %v from %s\n", data, q)
|
|
|
|
var msg taskMessage
|
|
|
|
err = json.Unmarshal([]byte(data), &msg)
|
2019-11-17 06:45:51 +08:00
|
|
|
if err != nil {
|
2019-11-18 05:25:01 +08:00
|
|
|
log.Printf("[Servere Error] could not parse json encoded message %s: %v", data, err)
|
2019-11-15 23:21:25 +08:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
w.poolTokens <- struct{}{} // acquire a token
|
2019-11-18 05:25:01 +08:00
|
|
|
t := &Task{Type: msg.Type, Payload: msg.Payload}
|
2019-11-17 06:45:51 +08:00
|
|
|
go func(task *Task) {
|
2019-11-18 05:25:01 +08:00
|
|
|
err := handler(task)
|
|
|
|
if err != nil {
|
2019-11-18 07:36:33 +08:00
|
|
|
if msg.Retry == 0 {
|
|
|
|
// TODO(hibiken): Add the task to "dead" collection
|
|
|
|
fmt.Println("Retry exausted!!!")
|
|
|
|
return
|
|
|
|
}
|
2019-11-18 05:25:01 +08:00
|
|
|
fmt.Println("RETRY!!!")
|
2019-11-18 07:36:33 +08:00
|
|
|
delay := 10 * time.Second // TODO(hibiken): Implement exponential backoff.
|
|
|
|
msg.Retry--
|
|
|
|
msg.ErrorMsg = err.Error()
|
|
|
|
if err := zadd(w.rdb, retry, float64(time.Now().Add(delay).Unix()), &msg); err != nil {
|
|
|
|
// TODO(hibiken): Not sure how to handle this error
|
|
|
|
log.Printf("[SEVERE ERROR] could not add msg %+v to 'retry' set: %v\n", msg, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-11-18 05:25:01 +08:00
|
|
|
}
|
|
|
|
<-w.poolTokens // release the token
|
|
|
|
}(t)
|
2019-11-15 23:21:25 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-18 07:36:33 +08:00
|
|
|
func (w *Workers) pollDeferred() {
|
|
|
|
zsets := []string{scheduled, retry}
|
2019-11-15 23:21:25 +08:00
|
|
|
for {
|
2019-11-18 07:36:33 +08:00
|
|
|
for _, zset := range zsets {
|
|
|
|
// Get next items in the queue with scores (time to execute) <= now.
|
|
|
|
now := time.Now().Unix()
|
|
|
|
fmt.Printf("[DEBUG] polling ZSET %q\n", zset)
|
|
|
|
jobs, err := w.rdb.ZRangeByScore(zset,
|
|
|
|
&redis.ZRangeBy{
|
|
|
|
Min: "-inf",
|
|
|
|
Max: strconv.Itoa(int(now))}).Result()
|
|
|
|
fmt.Printf("len(jobs) = %d\n", len(jobs))
|
2019-11-15 23:21:25 +08:00
|
|
|
if err != nil {
|
2019-11-18 07:36:33 +08:00
|
|
|
log.Printf("radis command ZRANGEBYSCORE failed: %v\n", err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if len(jobs) == 0 {
|
|
|
|
fmt.Println("jobs empty")
|
2019-11-15 23:21:25 +08:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2019-11-18 07:36:33 +08:00
|
|
|
for _, j := range jobs {
|
|
|
|
var msg taskMessage
|
|
|
|
err = json.Unmarshal([]byte(j), &msg)
|
2019-11-16 23:39:42 +08:00
|
|
|
if err != nil {
|
2019-11-18 07:36:33 +08:00
|
|
|
fmt.Println("unmarshal failed")
|
2019-11-16 23:39:42 +08:00
|
|
|
continue
|
|
|
|
}
|
2019-11-18 07:36:33 +08:00
|
|
|
|
|
|
|
if w.rdb.ZRem(zset, j).Val() > 0 {
|
|
|
|
err = push(w.rdb, &msg)
|
|
|
|
if err != nil {
|
|
|
|
log.Printf("could not push task to queue %q: %v", msg.Queue, err)
|
|
|
|
// TODO(hibiken): Handle this error properly. Add back to scheduled ZSET?
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
2019-11-15 23:21:25 +08:00
|
|
|
}
|
|
|
|
}
|
2019-11-18 07:36:33 +08:00
|
|
|
time.Sleep(5 * time.Second)
|
2019-11-15 23:21:25 +08:00
|
|
|
}
|
|
|
|
}
|
2019-11-16 23:51:53 +08:00
|
|
|
|
2019-11-17 00:20:23 +08:00
|
|
|
// push pushes the task to the specified queue to get picked up by a worker.
|
2019-11-18 05:25:01 +08:00
|
|
|
func push(rdb *redis.Client, msg *taskMessage) error {
|
|
|
|
bytes, err := json.Marshal(msg)
|
2019-11-16 23:51:53 +08:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("could not encode task into JSON: %v", err)
|
|
|
|
}
|
2019-11-18 05:25:01 +08:00
|
|
|
qname := queuePrefix + msg.Queue
|
2019-11-17 00:20:23 +08:00
|
|
|
err = rdb.SAdd(allQueues, qname).Err()
|
2019-11-16 23:51:53 +08:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("could not execute command SADD %q %q: %v",
|
2019-11-17 00:20:23 +08:00
|
|
|
allQueues, qname, err)
|
2019-11-16 23:51:53 +08:00
|
|
|
}
|
2019-11-17 00:20:23 +08:00
|
|
|
return rdb.RPush(qname, string(bytes)).Err()
|
|
|
|
}
|
|
|
|
|
2019-11-18 07:36:33 +08:00
|
|
|
// zadd serializes the given message and adds to the specified sorted set.
|
|
|
|
func zadd(rdb *redis.Client, zset string, zscore float64, msg *taskMessage) error {
|
|
|
|
bytes, err := json.Marshal(msg)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("could not encode task into JSON: %v", err)
|
|
|
|
}
|
|
|
|
return rdb.ZAdd(zset, &redis.Z{Member: string(bytes), Score: zscore}).Err()
|
|
|
|
}
|
|
|
|
|
2019-11-17 00:20:23 +08:00
|
|
|
// listQueues returns the list of all queues.
|
|
|
|
func listQueues(rdb *redis.Client) []string {
|
|
|
|
return rdb.SMembers(allQueues).Val()
|
2019-11-16 23:51:53 +08:00
|
|
|
}
|