2020-01-03 10:13:16 +08:00
|
|
|
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
|
|
|
// Use of this source code is governed by a MIT license
|
|
|
|
// that can be found in the LICENSE file.
|
|
|
|
|
2019-12-05 12:30:37 +08:00
|
|
|
package rdb
|
|
|
|
|
|
|
|
import (
|
|
|
|
"encoding/json"
|
2019-12-08 22:46:04 +08:00
|
|
|
"fmt"
|
2020-06-05 21:42:27 +08:00
|
|
|
"sort"
|
2019-12-23 01:09:57 +08:00
|
|
|
"strings"
|
2019-12-05 12:30:37 +08:00
|
|
|
"time"
|
|
|
|
|
2019-12-08 22:46:04 +08:00
|
|
|
"github.com/go-redis/redis/v7"
|
2020-07-02 21:21:20 +08:00
|
|
|
"github.com/google/uuid"
|
2019-12-22 23:15:45 +08:00
|
|
|
"github.com/hibiken/asynq/internal/base"
|
2019-12-26 13:29:20 +08:00
|
|
|
"github.com/spf13/cast"
|
2019-12-05 12:30:37 +08:00
|
|
|
)
|
|
|
|
|
|
|
|
// Stats represents a state of queues at a certain time.
|
|
|
|
type Stats struct {
|
|
|
|
Enqueued int
|
|
|
|
InProgress int
|
|
|
|
Scheduled int
|
|
|
|
Retry int
|
|
|
|
Dead int
|
2019-12-26 12:17:00 +08:00
|
|
|
Processed int
|
|
|
|
Failed int
|
2020-06-05 21:42:27 +08:00
|
|
|
Queues []*Queue
|
2019-12-05 12:30:37 +08:00
|
|
|
Timestamp time.Time
|
|
|
|
}
|
|
|
|
|
2020-06-05 21:42:27 +08:00
|
|
|
// Queue represents a task queue.
|
|
|
|
type Queue struct {
|
2020-06-08 04:04:27 +08:00
|
|
|
// Name of the queue (e.g. "default", "critical").
|
|
|
|
// Note: It doesn't include the prefix "asynq:queues:".
|
|
|
|
Name string
|
|
|
|
|
|
|
|
// Paused indicates whether the queue is paused.
|
|
|
|
// If true, tasks in the queue should not be processed.
|
2020-06-05 21:42:27 +08:00
|
|
|
Paused bool
|
2020-06-08 04:04:27 +08:00
|
|
|
|
|
|
|
// Size is the number of tasks in the queue.
|
|
|
|
Size int
|
2020-06-05 21:42:27 +08:00
|
|
|
}
|
|
|
|
|
2020-01-05 01:41:05 +08:00
|
|
|
// DailyStats holds aggregate data for a given day.
|
|
|
|
type DailyStats struct {
|
|
|
|
Processed int
|
|
|
|
Failed int
|
|
|
|
Time time.Time
|
|
|
|
}
|
|
|
|
|
2019-12-05 12:30:37 +08:00
|
|
|
// EnqueuedTask is a task in a queue and is ready to be processed.
|
|
|
|
type EnqueuedTask struct {
|
2020-07-02 21:21:20 +08:00
|
|
|
ID uuid.UUID
|
2019-12-05 12:30:37 +08:00
|
|
|
Type string
|
|
|
|
Payload map[string]interface{}
|
2020-01-10 23:06:26 +08:00
|
|
|
Queue string
|
2019-12-05 12:30:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// InProgressTask is a task that's currently being processed.
|
|
|
|
type InProgressTask struct {
|
2020-07-02 21:21:20 +08:00
|
|
|
ID uuid.UUID
|
2019-12-05 12:30:37 +08:00
|
|
|
Type string
|
|
|
|
Payload map[string]interface{}
|
|
|
|
}
|
|
|
|
|
|
|
|
// ScheduledTask is a task that's scheduled to be processed in the future.
|
|
|
|
type ScheduledTask struct {
|
2020-07-02 21:21:20 +08:00
|
|
|
ID uuid.UUID
|
2019-12-05 12:30:37 +08:00
|
|
|
Type string
|
|
|
|
Payload map[string]interface{}
|
|
|
|
ProcessAt time.Time
|
2019-12-09 08:36:08 +08:00
|
|
|
Score int64
|
2020-01-10 23:06:26 +08:00
|
|
|
Queue string
|
2019-12-05 12:30:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// RetryTask is a task that's in retry queue because worker failed to process the task.
|
|
|
|
type RetryTask struct {
|
2020-07-02 21:21:20 +08:00
|
|
|
ID uuid.UUID
|
2019-12-05 12:30:37 +08:00
|
|
|
Type string
|
|
|
|
Payload map[string]interface{}
|
|
|
|
// TODO(hibiken): add LastFailedAt time.Time
|
|
|
|
ProcessAt time.Time
|
|
|
|
ErrorMsg string
|
|
|
|
Retried int
|
|
|
|
Retry int
|
2019-12-09 08:36:08 +08:00
|
|
|
Score int64
|
2020-01-10 23:06:26 +08:00
|
|
|
Queue string
|
2019-12-05 12:30:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// DeadTask is a task in that has exhausted all retries.
|
|
|
|
type DeadTask struct {
|
2020-07-02 21:21:20 +08:00
|
|
|
ID uuid.UUID
|
2019-12-05 12:30:37 +08:00
|
|
|
Type string
|
|
|
|
Payload map[string]interface{}
|
|
|
|
LastFailedAt time.Time
|
|
|
|
ErrorMsg string
|
2019-12-09 08:36:08 +08:00
|
|
|
Score int64
|
2020-01-10 23:06:26 +08:00
|
|
|
Queue string
|
2019-12-05 12:30:37 +08:00
|
|
|
}
|
|
|
|
|
2020-02-09 03:06:14 +08:00
|
|
|
// KEYS[1] -> asynq:queues
|
|
|
|
// KEYS[2] -> asynq:in_progress
|
|
|
|
// KEYS[3] -> asynq:scheduled
|
|
|
|
// KEYS[4] -> asynq:retry
|
|
|
|
// KEYS[5] -> asynq:dead
|
|
|
|
// KEYS[6] -> asynq:processed:<yyyy-mm-dd>
|
|
|
|
// KEYS[7] -> asynq:failure:<yyyy-mm-dd>
|
|
|
|
var currentStatsCmd = redis.NewScript(`
|
|
|
|
local res = {}
|
|
|
|
local queues = redis.call("SMEMBERS", KEYS[1])
|
|
|
|
for _, qkey in ipairs(queues) do
|
|
|
|
table.insert(res, qkey)
|
|
|
|
table.insert(res, redis.call("LLEN", qkey))
|
|
|
|
end
|
|
|
|
table.insert(res, KEYS[2])
|
|
|
|
table.insert(res, redis.call("LLEN", KEYS[2]))
|
|
|
|
table.insert(res, KEYS[3])
|
|
|
|
table.insert(res, redis.call("ZCARD", KEYS[3]))
|
|
|
|
table.insert(res, KEYS[4])
|
|
|
|
table.insert(res, redis.call("ZCARD", KEYS[4]))
|
|
|
|
table.insert(res, KEYS[5])
|
|
|
|
table.insert(res, redis.call("ZCARD", KEYS[5]))
|
|
|
|
local pcount = 0
|
|
|
|
local p = redis.call("GET", KEYS[6])
|
|
|
|
if p then
|
|
|
|
pcount = tonumber(p)
|
|
|
|
end
|
|
|
|
table.insert(res, "processed")
|
|
|
|
table.insert(res, pcount)
|
|
|
|
local fcount = 0
|
|
|
|
local f = redis.call("GET", KEYS[7])
|
|
|
|
if f then
|
|
|
|
fcount = tonumber(f)
|
|
|
|
end
|
|
|
|
table.insert(res, "failed")
|
|
|
|
table.insert(res, fcount)
|
|
|
|
return res`)
|
|
|
|
|
2019-12-05 12:30:37 +08:00
|
|
|
// CurrentStats returns a current state of the queues.
|
|
|
|
func (r *RDB) CurrentStats() (*Stats, error) {
|
2019-12-26 12:17:00 +08:00
|
|
|
now := time.Now()
|
2020-02-09 03:06:14 +08:00
|
|
|
res, err := currentStatsCmd.Run(r.client, []string{
|
2020-01-09 23:01:44 +08:00
|
|
|
base.AllQueues,
|
2019-12-26 13:29:20 +08:00
|
|
|
base.InProgressQueue,
|
|
|
|
base.ScheduledQueue,
|
|
|
|
base.RetryQueue,
|
|
|
|
base.DeadQueue,
|
|
|
|
base.ProcessedKey(now),
|
|
|
|
base.FailureKey(now),
|
|
|
|
}).Result()
|
2019-12-26 12:17:00 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-01-09 23:01:44 +08:00
|
|
|
data, err := cast.ToSliceE(res)
|
2019-12-26 12:17:00 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-06-05 21:42:27 +08:00
|
|
|
paused, err := r.client.SMembersMap(base.PausedQueues).Result()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-01-09 23:01:44 +08:00
|
|
|
stats := &Stats{
|
2020-06-05 21:42:27 +08:00
|
|
|
Queues: make([]*Queue, 0),
|
2020-01-09 23:01:44 +08:00
|
|
|
Timestamp: now,
|
|
|
|
}
|
|
|
|
for i := 0; i < len(data); i += 2 {
|
|
|
|
key := cast.ToString(data[i])
|
|
|
|
val := cast.ToInt(data[i+1])
|
|
|
|
|
|
|
|
switch {
|
|
|
|
case strings.HasPrefix(key, base.QueuePrefix):
|
|
|
|
stats.Enqueued += val
|
2020-06-05 21:42:27 +08:00
|
|
|
q := Queue{
|
|
|
|
Name: strings.TrimPrefix(key, base.QueuePrefix),
|
|
|
|
Size: val,
|
|
|
|
}
|
|
|
|
if _, exist := paused[key]; exist {
|
|
|
|
q.Paused = true
|
|
|
|
}
|
|
|
|
stats.Queues = append(stats.Queues, &q)
|
2020-01-09 23:01:44 +08:00
|
|
|
case key == base.InProgressQueue:
|
|
|
|
stats.InProgress = val
|
|
|
|
case key == base.ScheduledQueue:
|
|
|
|
stats.Scheduled = val
|
|
|
|
case key == base.RetryQueue:
|
|
|
|
stats.Retry = val
|
|
|
|
case key == base.DeadQueue:
|
|
|
|
stats.Dead = val
|
|
|
|
case key == "processed":
|
|
|
|
stats.Processed = val
|
|
|
|
case key == "failed":
|
|
|
|
stats.Failed = val
|
|
|
|
}
|
|
|
|
}
|
2020-06-05 21:42:27 +08:00
|
|
|
sort.Slice(stats.Queues, func(i, j int) bool {
|
|
|
|
return stats.Queues[i].Name < stats.Queues[j].Name
|
|
|
|
})
|
2020-01-09 23:01:44 +08:00
|
|
|
return stats, nil
|
2019-12-05 12:30:37 +08:00
|
|
|
}
|
|
|
|
|
2020-02-09 03:06:14 +08:00
|
|
|
var historicalStatsCmd = redis.NewScript(`
|
|
|
|
local res = {}
|
|
|
|
for _, key in ipairs(KEYS) do
|
|
|
|
local n = redis.call("GET", key)
|
|
|
|
if not n then
|
|
|
|
n = 0
|
|
|
|
end
|
|
|
|
table.insert(res, tonumber(n))
|
|
|
|
end
|
|
|
|
return res`)
|
|
|
|
|
2020-01-05 01:41:05 +08:00
|
|
|
// HistoricalStats returns a list of stats from the last n days.
|
|
|
|
func (r *RDB) HistoricalStats(n int) ([]*DailyStats, error) {
|
|
|
|
if n < 1 {
|
|
|
|
return []*DailyStats{}, nil
|
|
|
|
}
|
|
|
|
const day = 24 * time.Hour
|
|
|
|
now := time.Now().UTC()
|
|
|
|
var days []time.Time
|
|
|
|
var keys []string
|
|
|
|
for i := 0; i < n; i++ {
|
|
|
|
ts := now.Add(-time.Duration(i) * day)
|
|
|
|
days = append(days, ts)
|
|
|
|
keys = append(keys, base.ProcessedKey(ts))
|
|
|
|
keys = append(keys, base.FailureKey(ts))
|
|
|
|
}
|
2020-02-09 03:06:14 +08:00
|
|
|
res, err := historicalStatsCmd.Run(r.client, keys, len(keys)).Result()
|
2020-01-05 01:41:05 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
data, err := cast.ToIntSliceE(res)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
var stats []*DailyStats
|
|
|
|
for i := 0; i < len(data); i += 2 {
|
|
|
|
stats = append(stats, &DailyStats{
|
|
|
|
Processed: data[i],
|
|
|
|
Failed: data[i+1],
|
|
|
|
Time: days[i/2],
|
|
|
|
})
|
|
|
|
}
|
|
|
|
return stats, nil
|
|
|
|
}
|
|
|
|
|
2019-12-23 01:09:57 +08:00
|
|
|
// RedisInfo returns a map of redis info.
|
|
|
|
func (r *RDB) RedisInfo() (map[string]string, error) {
|
|
|
|
res, err := r.client.Info().Result()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
info := make(map[string]string)
|
|
|
|
lines := strings.Split(res, "\r\n")
|
|
|
|
for _, l := range lines {
|
|
|
|
kv := strings.Split(l, ":")
|
|
|
|
if len(kv) == 2 {
|
|
|
|
info[kv[0]] = kv[1]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return info, nil
|
|
|
|
}
|
|
|
|
|
2020-01-24 23:19:58 +08:00
|
|
|
func reverse(x []string) {
|
|
|
|
for i := len(x)/2 - 1; i >= 0; i-- {
|
|
|
|
opp := len(x) - 1 - i
|
|
|
|
x[i], x[opp] = x[opp], x[i]
|
2020-01-11 13:32:15 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-24 23:19:58 +08:00
|
|
|
// Pagination specifies the page size and page number
|
|
|
|
// for the list operation.
|
|
|
|
type Pagination struct {
|
|
|
|
// Number of items in the page.
|
2020-02-13 14:23:25 +08:00
|
|
|
Size int
|
2020-01-24 23:19:58 +08:00
|
|
|
|
|
|
|
// Page number starting from zero.
|
2020-02-13 14:23:25 +08:00
|
|
|
Page int
|
2020-01-11 13:32:15 +08:00
|
|
|
}
|
|
|
|
|
2020-01-24 23:19:58 +08:00
|
|
|
func (p Pagination) start() int64 {
|
|
|
|
return int64(p.Size * p.Page)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p Pagination) stop() int64 {
|
|
|
|
return int64(p.Size*p.Page + p.Size - 1)
|
|
|
|
}
|
|
|
|
|
|
|
|
// ListEnqueued returns enqueued tasks that are ready to be processed.
|
|
|
|
func (r *RDB) ListEnqueued(qname string, pgn Pagination) ([]*EnqueuedTask, error) {
|
|
|
|
qkey := base.QueueKey(qname)
|
|
|
|
if !r.client.SIsMember(base.AllQueues, qkey).Val() {
|
|
|
|
return nil, fmt.Errorf("queue %q does not exist", qname)
|
2020-01-11 13:32:15 +08:00
|
|
|
}
|
2020-01-24 23:19:58 +08:00
|
|
|
// Note: Because we use LPUSH to redis list, we need to calculate the
|
|
|
|
// correct range and reverse the list to get the tasks with pagination.
|
|
|
|
stop := -pgn.start() - 1
|
|
|
|
start := -pgn.stop() - 1
|
|
|
|
data, err := r.client.LRange(qkey, start, stop).Result()
|
2020-01-11 13:32:15 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-01-24 23:19:58 +08:00
|
|
|
reverse(data)
|
2019-12-05 12:30:37 +08:00
|
|
|
var tasks []*EnqueuedTask
|
|
|
|
for _, s := range data {
|
2019-12-22 23:15:45 +08:00
|
|
|
var msg base.TaskMessage
|
2019-12-05 12:30:37 +08:00
|
|
|
err := json.Unmarshal([]byte(s), &msg)
|
|
|
|
if err != nil {
|
2020-01-10 22:56:51 +08:00
|
|
|
continue // bad data, ignore and continue
|
2019-12-05 12:30:37 +08:00
|
|
|
}
|
|
|
|
tasks = append(tasks, &EnqueuedTask{
|
|
|
|
ID: msg.ID,
|
|
|
|
Type: msg.Type,
|
|
|
|
Payload: msg.Payload,
|
2020-01-10 23:06:26 +08:00
|
|
|
Queue: msg.Queue,
|
2019-12-05 12:30:37 +08:00
|
|
|
})
|
|
|
|
}
|
|
|
|
return tasks, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// ListInProgress returns all tasks that are currently being processed.
|
2020-01-24 23:19:58 +08:00
|
|
|
func (r *RDB) ListInProgress(pgn Pagination) ([]*InProgressTask, error) {
|
|
|
|
// Note: Because we use LPUSH to redis list, we need to calculate the
|
|
|
|
// correct range and reverse the list to get the tasks with pagination.
|
|
|
|
stop := -pgn.start() - 1
|
|
|
|
start := -pgn.stop() - 1
|
|
|
|
data, err := r.client.LRange(base.InProgressQueue, start, stop).Result()
|
2019-12-05 12:30:37 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-01-24 23:19:58 +08:00
|
|
|
reverse(data)
|
2019-12-05 12:30:37 +08:00
|
|
|
var tasks []*InProgressTask
|
|
|
|
for _, s := range data {
|
2019-12-22 23:15:45 +08:00
|
|
|
var msg base.TaskMessage
|
2019-12-05 12:30:37 +08:00
|
|
|
err := json.Unmarshal([]byte(s), &msg)
|
|
|
|
if err != nil {
|
|
|
|
continue // bad data, ignore and continue
|
|
|
|
}
|
|
|
|
tasks = append(tasks, &InProgressTask{
|
|
|
|
ID: msg.ID,
|
|
|
|
Type: msg.Type,
|
|
|
|
Payload: msg.Payload,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
return tasks, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// ListScheduled returns all tasks that are scheduled to be processed
|
|
|
|
// in the future.
|
2020-01-24 23:19:58 +08:00
|
|
|
func (r *RDB) ListScheduled(pgn Pagination) ([]*ScheduledTask, error) {
|
|
|
|
data, err := r.client.ZRangeWithScores(base.ScheduledQueue, pgn.start(), pgn.stop()).Result()
|
2019-12-05 12:30:37 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
var tasks []*ScheduledTask
|
|
|
|
for _, z := range data {
|
|
|
|
s, ok := z.Member.(string)
|
|
|
|
if !ok {
|
|
|
|
continue // bad data, ignore and continue
|
|
|
|
}
|
2019-12-22 23:15:45 +08:00
|
|
|
var msg base.TaskMessage
|
2019-12-05 12:30:37 +08:00
|
|
|
err := json.Unmarshal([]byte(s), &msg)
|
|
|
|
if err != nil {
|
|
|
|
continue // bad data, ignore and continue
|
|
|
|
}
|
|
|
|
processAt := time.Unix(int64(z.Score), 0)
|
|
|
|
tasks = append(tasks, &ScheduledTask{
|
|
|
|
ID: msg.ID,
|
|
|
|
Type: msg.Type,
|
|
|
|
Payload: msg.Payload,
|
2020-01-10 23:06:26 +08:00
|
|
|
Queue: msg.Queue,
|
2019-12-05 12:30:37 +08:00
|
|
|
ProcessAt: processAt,
|
2019-12-09 08:36:08 +08:00
|
|
|
Score: int64(z.Score),
|
2019-12-05 12:30:37 +08:00
|
|
|
})
|
|
|
|
}
|
|
|
|
return tasks, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// ListRetry returns all tasks that have failed before and willl be retried
|
|
|
|
// in the future.
|
2020-01-24 23:19:58 +08:00
|
|
|
func (r *RDB) ListRetry(pgn Pagination) ([]*RetryTask, error) {
|
|
|
|
data, err := r.client.ZRangeWithScores(base.RetryQueue, pgn.start(), pgn.stop()).Result()
|
2019-12-05 12:30:37 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
var tasks []*RetryTask
|
|
|
|
for _, z := range data {
|
|
|
|
s, ok := z.Member.(string)
|
|
|
|
if !ok {
|
|
|
|
continue // bad data, ignore and continue
|
|
|
|
}
|
2019-12-22 23:15:45 +08:00
|
|
|
var msg base.TaskMessage
|
2019-12-05 12:30:37 +08:00
|
|
|
err := json.Unmarshal([]byte(s), &msg)
|
|
|
|
if err != nil {
|
|
|
|
continue // bad data, ignore and continue
|
|
|
|
}
|
|
|
|
processAt := time.Unix(int64(z.Score), 0)
|
|
|
|
tasks = append(tasks, &RetryTask{
|
|
|
|
ID: msg.ID,
|
|
|
|
Type: msg.Type,
|
|
|
|
Payload: msg.Payload,
|
|
|
|
ErrorMsg: msg.ErrorMsg,
|
|
|
|
Retry: msg.Retry,
|
|
|
|
Retried: msg.Retried,
|
2020-01-10 23:06:26 +08:00
|
|
|
Queue: msg.Queue,
|
2019-12-05 12:30:37 +08:00
|
|
|
ProcessAt: processAt,
|
2019-12-09 08:36:08 +08:00
|
|
|
Score: int64(z.Score),
|
2019-12-05 12:30:37 +08:00
|
|
|
})
|
|
|
|
}
|
|
|
|
return tasks, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// ListDead returns all tasks that have exhausted its retry limit.
|
2020-01-24 23:19:58 +08:00
|
|
|
func (r *RDB) ListDead(pgn Pagination) ([]*DeadTask, error) {
|
|
|
|
data, err := r.client.ZRangeWithScores(base.DeadQueue, pgn.start(), pgn.stop()).Result()
|
2019-12-05 12:30:37 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
var tasks []*DeadTask
|
|
|
|
for _, z := range data {
|
|
|
|
s, ok := z.Member.(string)
|
|
|
|
if !ok {
|
|
|
|
continue // bad data, ignore and continue
|
|
|
|
}
|
2019-12-22 23:15:45 +08:00
|
|
|
var msg base.TaskMessage
|
2019-12-05 12:30:37 +08:00
|
|
|
err := json.Unmarshal([]byte(s), &msg)
|
|
|
|
if err != nil {
|
|
|
|
continue // bad data, ignore and continue
|
|
|
|
}
|
|
|
|
lastFailedAt := time.Unix(int64(z.Score), 0)
|
|
|
|
tasks = append(tasks, &DeadTask{
|
|
|
|
ID: msg.ID,
|
|
|
|
Type: msg.Type,
|
|
|
|
Payload: msg.Payload,
|
|
|
|
ErrorMsg: msg.ErrorMsg,
|
2020-01-10 23:06:26 +08:00
|
|
|
Queue: msg.Queue,
|
2019-12-05 12:30:37 +08:00
|
|
|
LastFailedAt: lastFailedAt,
|
2019-12-09 08:36:08 +08:00
|
|
|
Score: int64(z.Score),
|
2019-12-05 12:30:37 +08:00
|
|
|
})
|
|
|
|
}
|
|
|
|
return tasks, nil
|
|
|
|
}
|
2019-12-08 22:46:04 +08:00
|
|
|
|
2019-12-10 11:33:07 +08:00
|
|
|
// EnqueueDeadTask finds a task that matches the given id and score from dead queue
|
2019-12-09 06:17:57 +08:00
|
|
|
// and enqueues it for processing. If a task that matches the id and score
|
|
|
|
// does not exist, it returns ErrTaskNotFound.
|
2020-07-02 21:21:20 +08:00
|
|
|
func (r *RDB) EnqueueDeadTask(id uuid.UUID, score int64) error {
|
2019-12-22 23:15:45 +08:00
|
|
|
n, err := r.removeAndEnqueue(base.DeadQueue, id.String(), float64(score))
|
2019-12-08 22:46:04 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if n == 0 {
|
|
|
|
return ErrTaskNotFound
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-12-10 11:33:07 +08:00
|
|
|
// EnqueueRetryTask finds a task that matches the given id and score from retry queue
|
2019-12-09 06:17:57 +08:00
|
|
|
// and enqueues it for processing. If a task that matches the id and score
|
|
|
|
// does not exist, it returns ErrTaskNotFound.
|
2020-07-02 21:21:20 +08:00
|
|
|
func (r *RDB) EnqueueRetryTask(id uuid.UUID, score int64) error {
|
2019-12-22 23:15:45 +08:00
|
|
|
n, err := r.removeAndEnqueue(base.RetryQueue, id.String(), float64(score))
|
2019-12-08 22:46:04 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if n == 0 {
|
|
|
|
return ErrTaskNotFound
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-12-10 11:33:07 +08:00
|
|
|
// EnqueueScheduledTask finds a task that matches the given id and score from scheduled queue
|
2019-12-09 06:17:57 +08:00
|
|
|
// and enqueues it for processing. If a task that matches the id and score does not
|
2019-12-08 22:46:04 +08:00
|
|
|
// exist, it returns ErrTaskNotFound.
|
2020-07-02 21:21:20 +08:00
|
|
|
func (r *RDB) EnqueueScheduledTask(id uuid.UUID, score int64) error {
|
2019-12-22 23:15:45 +08:00
|
|
|
n, err := r.removeAndEnqueue(base.ScheduledQueue, id.String(), float64(score))
|
2019-12-08 22:46:04 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if n == 0 {
|
|
|
|
return ErrTaskNotFound
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-12-11 13:48:19 +08:00
|
|
|
// EnqueueAllScheduledTasks enqueues all tasks from scheduled queue
|
2019-12-11 13:38:25 +08:00
|
|
|
// and returns the number of tasks enqueued.
|
|
|
|
func (r *RDB) EnqueueAllScheduledTasks() (int64, error) {
|
2019-12-22 23:15:45 +08:00
|
|
|
return r.removeAndEnqueueAll(base.ScheduledQueue)
|
2019-12-11 12:28:31 +08:00
|
|
|
}
|
|
|
|
|
2019-12-11 13:48:19 +08:00
|
|
|
// EnqueueAllRetryTasks enqueues all tasks from retry queue
|
2019-12-11 13:38:25 +08:00
|
|
|
// and returns the number of tasks enqueued.
|
|
|
|
func (r *RDB) EnqueueAllRetryTasks() (int64, error) {
|
2019-12-22 23:15:45 +08:00
|
|
|
return r.removeAndEnqueueAll(base.RetryQueue)
|
2019-12-11 12:28:31 +08:00
|
|
|
}
|
|
|
|
|
2019-12-11 13:38:25 +08:00
|
|
|
// EnqueueAllDeadTasks enqueues all tasks from dead queue
|
|
|
|
// and returns the number of tasks enqueued.
|
|
|
|
func (r *RDB) EnqueueAllDeadTasks() (int64, error) {
|
2019-12-22 23:15:45 +08:00
|
|
|
return r.removeAndEnqueueAll(base.DeadQueue)
|
2019-12-11 12:28:31 +08:00
|
|
|
}
|
|
|
|
|
2020-02-09 03:06:14 +08:00
|
|
|
var removeAndEnqueueCmd = redis.NewScript(`
|
|
|
|
local msgs = redis.call("ZRANGEBYSCORE", KEYS[1], ARGV[1], ARGV[1])
|
|
|
|
for _, msg in ipairs(msgs) do
|
|
|
|
local decoded = cjson.decode(msg)
|
|
|
|
if decoded["ID"] == ARGV[2] then
|
|
|
|
local qkey = ARGV[3] .. decoded["Queue"]
|
|
|
|
redis.call("LPUSH", qkey, msg)
|
|
|
|
redis.call("ZREM", KEYS[1], msg)
|
|
|
|
return 1
|
2019-12-08 22:46:04 +08:00
|
|
|
end
|
2020-02-09 03:06:14 +08:00
|
|
|
end
|
|
|
|
return 0`)
|
|
|
|
|
|
|
|
func (r *RDB) removeAndEnqueue(zset, id string, score float64) (int64, error) {
|
|
|
|
res, err := removeAndEnqueueCmd.Run(r.client, []string{zset}, score, id, base.QueuePrefix).Result()
|
2019-12-08 22:46:04 +08:00
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
n, ok := res.(int64)
|
|
|
|
if !ok {
|
|
|
|
return 0, fmt.Errorf("could not cast %v to int64", res)
|
|
|
|
}
|
|
|
|
return n, nil
|
|
|
|
}
|
2019-12-11 12:28:31 +08:00
|
|
|
|
2020-02-09 03:06:14 +08:00
|
|
|
var removeAndEnqueueAllCmd = redis.NewScript(`
|
|
|
|
local msgs = redis.call("ZRANGE", KEYS[1], 0, -1)
|
|
|
|
for _, msg in ipairs(msgs) do
|
|
|
|
local decoded = cjson.decode(msg)
|
|
|
|
local qkey = ARGV[1] .. decoded["Queue"]
|
|
|
|
redis.call("LPUSH", qkey, msg)
|
|
|
|
redis.call("ZREM", KEYS[1], msg)
|
|
|
|
end
|
|
|
|
return table.getn(msgs)`)
|
|
|
|
|
2019-12-11 13:38:25 +08:00
|
|
|
func (r *RDB) removeAndEnqueueAll(zset string) (int64, error) {
|
2020-02-09 03:06:14 +08:00
|
|
|
res, err := removeAndEnqueueAllCmd.Run(r.client, []string{zset}, base.QueuePrefix).Result()
|
2019-12-11 12:28:31 +08:00
|
|
|
if err != nil {
|
2019-12-11 13:38:25 +08:00
|
|
|
return 0, err
|
2019-12-11 12:28:31 +08:00
|
|
|
}
|
2019-12-11 13:38:25 +08:00
|
|
|
n, ok := res.(int64)
|
|
|
|
if !ok {
|
|
|
|
return 0, fmt.Errorf("could not cast %v to int64", res)
|
|
|
|
}
|
|
|
|
return n, nil
|
2019-12-26 23:17:26 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// KillRetryTask finds a task that matches the given id and score from retry queue
|
|
|
|
// and moves it to dead queue. If a task that maches the id and score does not exist,
|
|
|
|
// it returns ErrTaskNotFound.
|
2020-07-02 21:21:20 +08:00
|
|
|
func (r *RDB) KillRetryTask(id uuid.UUID, score int64) error {
|
2019-12-26 23:17:26 +08:00
|
|
|
n, err := r.removeAndKill(base.RetryQueue, id.String(), float64(score))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if n == 0 {
|
|
|
|
return ErrTaskNotFound
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// KillScheduledTask finds a task that matches the given id and score from scheduled queue
|
|
|
|
// and moves it to dead queue. If a task that maches the id and score does not exist,
|
|
|
|
// it returns ErrTaskNotFound.
|
2020-07-02 21:21:20 +08:00
|
|
|
func (r *RDB) KillScheduledTask(id uuid.UUID, score int64) error {
|
2019-12-26 23:17:26 +08:00
|
|
|
n, err := r.removeAndKill(base.ScheduledQueue, id.String(), float64(score))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if n == 0 {
|
|
|
|
return ErrTaskNotFound
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-12-27 22:22:33 +08:00
|
|
|
// KillAllRetryTasks moves all tasks from retry queue to dead queue and
|
|
|
|
// returns the number of tasks that were moved.
|
|
|
|
func (r *RDB) KillAllRetryTasks() (int64, error) {
|
|
|
|
return r.removeAndKillAll(base.RetryQueue)
|
|
|
|
}
|
|
|
|
|
|
|
|
// KillAllScheduledTasks moves all tasks from scheduled queue to dead queue and
|
|
|
|
// returns the number of tasks that were moved.
|
|
|
|
func (r *RDB) KillAllScheduledTasks() (int64, error) {
|
|
|
|
return r.removeAndKillAll(base.ScheduledQueue)
|
|
|
|
}
|
|
|
|
|
2020-02-09 03:06:14 +08:00
|
|
|
// KEYS[1] -> ZSET to move task from (e.g., retry queue)
|
|
|
|
// KEYS[2] -> asynq:dead
|
|
|
|
// ARGV[1] -> score of the task to kill
|
|
|
|
// ARGV[2] -> id of the task to kill
|
|
|
|
// ARGV[3] -> current timestamp
|
|
|
|
// ARGV[4] -> cutoff timestamp (e.g., 90 days ago)
|
|
|
|
// ARGV[5] -> max number of tasks in dead queue (e.g., 100)
|
|
|
|
var removeAndKillCmd = redis.NewScript(`
|
|
|
|
local msgs = redis.call("ZRANGEBYSCORE", KEYS[1], ARGV[1], ARGV[1])
|
|
|
|
for _, msg in ipairs(msgs) do
|
|
|
|
local decoded = cjson.decode(msg)
|
|
|
|
if decoded["ID"] == ARGV[2] then
|
|
|
|
redis.call("ZREM", KEYS[1], msg)
|
|
|
|
redis.call("ZADD", KEYS[2], ARGV[3], msg)
|
|
|
|
redis.call("ZREMRANGEBYSCORE", KEYS[2], "-inf", ARGV[4])
|
|
|
|
redis.call("ZREMRANGEBYRANK", KEYS[2], 0, -ARGV[5])
|
|
|
|
return 1
|
2019-12-26 23:17:26 +08:00
|
|
|
end
|
2020-02-09 03:06:14 +08:00
|
|
|
end
|
|
|
|
return 0`)
|
|
|
|
|
|
|
|
func (r *RDB) removeAndKill(zset, id string, score float64) (int64, error) {
|
2019-12-26 23:17:26 +08:00
|
|
|
now := time.Now()
|
|
|
|
limit := now.AddDate(0, 0, -deadExpirationInDays).Unix() // 90 days ago
|
2020-02-09 03:06:14 +08:00
|
|
|
res, err := removeAndKillCmd.Run(r.client,
|
2019-12-26 23:17:26 +08:00
|
|
|
[]string{zset, base.DeadQueue},
|
|
|
|
score, id, now.Unix(), limit, maxDeadTasks).Result()
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
n, ok := res.(int64)
|
|
|
|
if !ok {
|
|
|
|
return 0, fmt.Errorf("could not cast %v to int64", res)
|
|
|
|
}
|
|
|
|
return n, nil
|
2019-12-27 22:22:33 +08:00
|
|
|
}
|
|
|
|
|
2020-02-09 03:06:14 +08:00
|
|
|
// KEYS[1] -> ZSET to move task from (e.g., retry queue)
|
|
|
|
// KEYS[2] -> asynq:dead
|
|
|
|
// ARGV[1] -> current timestamp
|
|
|
|
// ARGV[2] -> cutoff timestamp (e.g., 90 days ago)
|
|
|
|
// ARGV[3] -> max number of tasks in dead queue (e.g., 100)
|
|
|
|
var removeAndKillAllCmd = redis.NewScript(`
|
|
|
|
local msgs = redis.call("ZRANGE", KEYS[1], 0, -1)
|
|
|
|
for _, msg in ipairs(msgs) do
|
|
|
|
redis.call("ZADD", KEYS[2], ARGV[1], msg)
|
|
|
|
redis.call("ZREM", KEYS[1], msg)
|
|
|
|
redis.call("ZREMRANGEBYSCORE", KEYS[2], "-inf", ARGV[2])
|
|
|
|
redis.call("ZREMRANGEBYRANK", KEYS[2], 0, -ARGV[3])
|
|
|
|
end
|
|
|
|
return table.getn(msgs)`)
|
|
|
|
|
2019-12-27 22:22:33 +08:00
|
|
|
func (r *RDB) removeAndKillAll(zset string) (int64, error) {
|
|
|
|
now := time.Now()
|
|
|
|
limit := now.AddDate(0, 0, -deadExpirationInDays).Unix() // 90 days ago
|
2020-02-09 03:06:14 +08:00
|
|
|
res, err := removeAndKillAllCmd.Run(r.client, []string{zset, base.DeadQueue},
|
2019-12-27 22:22:33 +08:00
|
|
|
now.Unix(), limit, maxDeadTasks).Result()
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
n, ok := res.(int64)
|
|
|
|
if !ok {
|
|
|
|
return 0, fmt.Errorf("could not cast %v to int64", res)
|
|
|
|
}
|
|
|
|
return n, nil
|
2019-12-11 12:28:31 +08:00
|
|
|
}
|
2019-12-12 11:56:19 +08:00
|
|
|
|
|
|
|
// DeleteDeadTask finds a task that matches the given id and score from dead queue
|
|
|
|
// and deletes it. If a task that matches the id and score does not exist,
|
|
|
|
// it returns ErrTaskNotFound.
|
2020-07-02 21:21:20 +08:00
|
|
|
func (r *RDB) DeleteDeadTask(id uuid.UUID, score int64) error {
|
2019-12-22 23:15:45 +08:00
|
|
|
return r.deleteTask(base.DeadQueue, id.String(), float64(score))
|
2019-12-12 11:56:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteRetryTask finds a task that matches the given id and score from retry queue
|
|
|
|
// and deletes it. If a task that matches the id and score does not exist,
|
|
|
|
// it returns ErrTaskNotFound.
|
2020-07-02 21:21:20 +08:00
|
|
|
func (r *RDB) DeleteRetryTask(id uuid.UUID, score int64) error {
|
2019-12-22 23:15:45 +08:00
|
|
|
return r.deleteTask(base.RetryQueue, id.String(), float64(score))
|
2019-12-12 11:56:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteScheduledTask finds a task that matches the given id and score from
|
|
|
|
// scheduled queue and deletes it. If a task that matches the id and score
|
|
|
|
//does not exist, it returns ErrTaskNotFound.
|
2020-07-02 21:21:20 +08:00
|
|
|
func (r *RDB) DeleteScheduledTask(id uuid.UUID, score int64) error {
|
2019-12-22 23:15:45 +08:00
|
|
|
return r.deleteTask(base.ScheduledQueue, id.String(), float64(score))
|
2019-12-12 11:56:19 +08:00
|
|
|
}
|
|
|
|
|
2020-02-09 03:06:14 +08:00
|
|
|
var deleteTaskCmd = redis.NewScript(`
|
|
|
|
local msgs = redis.call("ZRANGEBYSCORE", KEYS[1], ARGV[1], ARGV[1])
|
|
|
|
for _, msg in ipairs(msgs) do
|
|
|
|
local decoded = cjson.decode(msg)
|
|
|
|
if decoded["ID"] == ARGV[2] then
|
|
|
|
redis.call("ZREM", KEYS[1], msg)
|
|
|
|
return 1
|
2019-12-12 11:56:19 +08:00
|
|
|
end
|
2020-02-09 03:06:14 +08:00
|
|
|
end
|
|
|
|
return 0`)
|
|
|
|
|
|
|
|
func (r *RDB) deleteTask(zset, id string, score float64) error {
|
|
|
|
res, err := deleteTaskCmd.Run(r.client, []string{zset}, score, id).Result()
|
2019-12-12 11:56:19 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
n, ok := res.(int64)
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("could not cast %v to int64", res)
|
|
|
|
}
|
|
|
|
if n == 0 {
|
|
|
|
return ErrTaskNotFound
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2019-12-12 22:38:01 +08:00
|
|
|
|
|
|
|
// DeleteAllDeadTasks deletes all tasks from the dead queue.
|
|
|
|
func (r *RDB) DeleteAllDeadTasks() error {
|
2019-12-22 23:15:45 +08:00
|
|
|
return r.client.Del(base.DeadQueue).Err()
|
2019-12-12 22:38:01 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteAllRetryTasks deletes all tasks from the dead queue.
|
|
|
|
func (r *RDB) DeleteAllRetryTasks() error {
|
2019-12-22 23:15:45 +08:00
|
|
|
return r.client.Del(base.RetryQueue).Err()
|
2019-12-12 22:38:01 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteAllScheduledTasks deletes all tasks from the dead queue.
|
|
|
|
func (r *RDB) DeleteAllScheduledTasks() error {
|
2019-12-22 23:15:45 +08:00
|
|
|
return r.client.Del(base.ScheduledQueue).Err()
|
2019-12-12 22:38:01 +08:00
|
|
|
}
|
2020-01-13 22:50:03 +08:00
|
|
|
|
2020-01-13 23:03:07 +08:00
|
|
|
// ErrQueueNotFound indicates specified queue does not exist.
|
|
|
|
type ErrQueueNotFound struct {
|
|
|
|
qname string
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e *ErrQueueNotFound) Error() string {
|
|
|
|
return fmt.Sprintf("queue %q does not exist", e.qname)
|
|
|
|
}
|
|
|
|
|
|
|
|
// ErrQueueNotEmpty indicates specified queue is not empty.
|
|
|
|
type ErrQueueNotEmpty struct {
|
|
|
|
qname string
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e *ErrQueueNotEmpty) Error() string {
|
|
|
|
return fmt.Sprintf("queue %q is not empty", e.qname)
|
|
|
|
}
|
|
|
|
|
2020-02-09 03:06:14 +08:00
|
|
|
// Skip checking whether queue is empty before removing.
|
|
|
|
var removeQueueForceCmd = redis.NewScript(`
|
|
|
|
local n = redis.call("SREM", KEYS[1], KEYS[2])
|
|
|
|
if n == 0 then
|
|
|
|
return redis.error_reply("LIST NOT FOUND")
|
|
|
|
end
|
|
|
|
redis.call("DEL", KEYS[2])
|
|
|
|
return redis.status_reply("OK")`)
|
|
|
|
|
|
|
|
// Checks whether queue is empty before removing.
|
|
|
|
var removeQueueCmd = redis.NewScript(`
|
|
|
|
local l = redis.call("LLEN", KEYS[2]) if l > 0 then
|
|
|
|
return redis.error_reply("LIST NOT EMPTY")
|
|
|
|
end
|
|
|
|
local n = redis.call("SREM", KEYS[1], KEYS[2])
|
|
|
|
if n == 0 then
|
|
|
|
return redis.error_reply("LIST NOT FOUND")
|
|
|
|
end
|
|
|
|
redis.call("DEL", KEYS[2])
|
|
|
|
return redis.status_reply("OK")`)
|
|
|
|
|
2020-01-13 23:03:07 +08:00
|
|
|
// RemoveQueue removes the specified queue.
|
|
|
|
//
|
|
|
|
// If force is set to true, it will remove the queue regardless
|
|
|
|
// of whether the queue is empty.
|
|
|
|
// If force is set to false, it will only remove the queue if
|
|
|
|
// it is empty.
|
|
|
|
func (r *RDB) RemoveQueue(qname string, force bool) error {
|
|
|
|
var script *redis.Script
|
|
|
|
if force {
|
2020-02-09 03:06:14 +08:00
|
|
|
script = removeQueueForceCmd
|
2020-01-13 23:03:07 +08:00
|
|
|
} else {
|
2020-02-09 03:06:14 +08:00
|
|
|
script = removeQueueCmd
|
2020-01-13 23:03:07 +08:00
|
|
|
}
|
|
|
|
err := script.Run(r.client,
|
|
|
|
[]string{base.AllQueues, base.QueueKey(qname)},
|
|
|
|
force).Err()
|
|
|
|
if err != nil {
|
|
|
|
switch err.Error() {
|
|
|
|
case "LIST NOT FOUND":
|
|
|
|
return &ErrQueueNotFound{qname}
|
|
|
|
case "LIST NOT EMPTY":
|
|
|
|
return &ErrQueueNotEmpty{qname}
|
|
|
|
default:
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
2020-01-13 22:50:03 +08:00
|
|
|
}
|
2020-02-02 14:22:48 +08:00
|
|
|
|
2020-02-09 03:06:14 +08:00
|
|
|
// Note: Script also removes stale keys.
|
2020-04-13 08:09:58 +08:00
|
|
|
var listServersCmd = redis.NewScript(`
|
2020-02-09 03:06:14 +08:00
|
|
|
local res = {}
|
|
|
|
local now = tonumber(ARGV[1])
|
|
|
|
local keys = redis.call("ZRANGEBYSCORE", KEYS[1], now, "+inf")
|
|
|
|
for _, key in ipairs(keys) do
|
2020-04-13 08:09:58 +08:00
|
|
|
local s = redis.call("GET", key)
|
|
|
|
if s then
|
|
|
|
table.insert(res, s)
|
2020-02-09 03:06:14 +08:00
|
|
|
end
|
|
|
|
end
|
|
|
|
redis.call("ZREMRANGEBYSCORE", KEYS[1], "-inf", now-1)
|
|
|
|
return res`)
|
|
|
|
|
2020-04-13 23:14:55 +08:00
|
|
|
// ListServers returns the list of server info.
|
2020-04-13 08:09:58 +08:00
|
|
|
func (r *RDB) ListServers() ([]*base.ServerInfo, error) {
|
|
|
|
res, err := listServersCmd.Run(r.client,
|
2020-04-13 07:42:11 +08:00
|
|
|
[]string{base.AllServers}, time.Now().UTC().Unix()).Result()
|
2020-02-02 14:22:48 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
data, err := cast.ToStringSliceE(res)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-04-13 08:09:58 +08:00
|
|
|
var servers []*base.ServerInfo
|
2020-02-02 14:22:48 +08:00
|
|
|
for _, s := range data {
|
2020-04-13 08:09:58 +08:00
|
|
|
var info base.ServerInfo
|
|
|
|
err := json.Unmarshal([]byte(s), &info)
|
2020-02-02 14:22:48 +08:00
|
|
|
if err != nil {
|
|
|
|
continue // skip bad data
|
|
|
|
}
|
2020-04-13 08:09:58 +08:00
|
|
|
servers = append(servers, &info)
|
2020-02-02 14:22:48 +08:00
|
|
|
}
|
2020-04-13 08:09:58 +08:00
|
|
|
return servers, nil
|
2020-02-02 14:22:48 +08:00
|
|
|
}
|
2020-02-23 12:42:53 +08:00
|
|
|
|
|
|
|
// Note: Script also removes stale keys.
|
|
|
|
var listWorkersCmd = redis.NewScript(`
|
|
|
|
local res = {}
|
|
|
|
local now = tonumber(ARGV[1])
|
|
|
|
local keys = redis.call("ZRANGEBYSCORE", KEYS[1], now, "+inf")
|
|
|
|
for _, key in ipairs(keys) do
|
|
|
|
local workers = redis.call("HVALS", key)
|
|
|
|
for _, w in ipairs(workers) do
|
|
|
|
table.insert(res, w)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
redis.call("ZREMRANGEBYSCORE", KEYS[1], "-inf", now-1)
|
|
|
|
return res`)
|
|
|
|
|
|
|
|
// ListWorkers returns the list of worker stats.
|
|
|
|
func (r *RDB) ListWorkers() ([]*base.WorkerInfo, error) {
|
|
|
|
res, err := listWorkersCmd.Run(r.client, []string{base.AllWorkers}, time.Now().UTC().Unix()).Result()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
data, err := cast.ToStringSliceE(res)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
var workers []*base.WorkerInfo
|
|
|
|
for _, s := range data {
|
|
|
|
var w base.WorkerInfo
|
|
|
|
err := json.Unmarshal([]byte(s), &w)
|
|
|
|
if err != nil {
|
|
|
|
continue // skip bad data
|
|
|
|
}
|
|
|
|
workers = append(workers, &w)
|
|
|
|
}
|
|
|
|
return workers, nil
|
|
|
|
}
|
2020-06-03 21:44:12 +08:00
|
|
|
|
|
|
|
// KEYS[1] -> asynq:paused
|
|
|
|
// ARGV[1] -> asynq:queues:<qname> - queue to pause
|
|
|
|
var pauseCmd = redis.NewScript(`
|
|
|
|
local ismem = redis.call("SISMEMBER", KEYS[1], ARGV[1])
|
|
|
|
if ismem == 1 then
|
|
|
|
return redis.error_reply("queue is already paused")
|
|
|
|
end
|
|
|
|
return redis.call("SADD", KEYS[1], ARGV[1])`)
|
|
|
|
|
|
|
|
// Pause pauses processing of tasks from the given queue.
|
|
|
|
func (r *RDB) Pause(qname string) error {
|
|
|
|
qkey := base.QueueKey(qname)
|
|
|
|
return pauseCmd.Run(r.client, []string{base.PausedQueues}, qkey).Err()
|
|
|
|
}
|
|
|
|
|
|
|
|
// KEYS[1] -> asynq:paused
|
|
|
|
// ARGV[1] -> asynq:queues:<qname> - queue to unpause
|
|
|
|
var unpauseCmd = redis.NewScript(`
|
|
|
|
local ismem = redis.call("SISMEMBER", KEYS[1], ARGV[1])
|
|
|
|
if ismem == 0 then
|
|
|
|
return redis.error_reply("queue is not paused")
|
|
|
|
end
|
|
|
|
return redis.call("SREM", KEYS[1], ARGV[1])`)
|
|
|
|
|
|
|
|
// Unpause resumes processing of tasks from the given queue.
|
|
|
|
func (r *RDB) Unpause(qname string) error {
|
|
|
|
qkey := base.QueueKey(qname)
|
|
|
|
return unpauseCmd.Run(r.client, []string{base.PausedQueues}, qkey).Err()
|
|
|
|
}
|