2020-01-03 10:13:16 +08:00
|
|
|
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
|
|
|
// Use of this source code is governed by a MIT license
|
|
|
|
// that can be found in the LICENSE file.
|
|
|
|
|
2019-12-05 12:30:37 +08:00
|
|
|
package rdb
|
|
|
|
|
|
|
|
import (
|
2021-09-02 20:56:02 +08:00
|
|
|
"context"
|
2019-12-08 22:46:04 +08:00
|
|
|
"fmt"
|
2019-12-23 01:09:57 +08:00
|
|
|
"strings"
|
2019-12-05 12:30:37 +08:00
|
|
|
"time"
|
|
|
|
|
2021-09-02 20:56:02 +08:00
|
|
|
"github.com/go-redis/redis/v8"
|
2019-12-22 23:15:45 +08:00
|
|
|
"github.com/hibiken/asynq/internal/base"
|
2021-05-02 21:47:32 +08:00
|
|
|
"github.com/hibiken/asynq/internal/errors"
|
2019-12-26 13:29:20 +08:00
|
|
|
"github.com/spf13/cast"
|
2019-12-05 12:30:37 +08:00
|
|
|
)
|
|
|
|
|
2020-08-11 20:35:06 +08:00
|
|
|
// AllQueues returns a list of all queue names.
|
|
|
|
func (r *RDB) AllQueues() ([]string, error) {
|
2021-09-02 20:56:02 +08:00
|
|
|
return r.client.SMembers(context.Background(), base.AllQueues).Result()
|
2019-12-05 12:30:37 +08:00
|
|
|
}
|
|
|
|
|
2020-08-11 20:35:06 +08:00
|
|
|
// Stats represents a state of queues at a certain time.
|
|
|
|
type Stats struct {
|
2020-06-08 04:04:27 +08:00
|
|
|
// Name of the queue (e.g. "default", "critical").
|
2020-08-12 20:30:59 +08:00
|
|
|
Queue string
|
2021-01-27 10:36:45 +08:00
|
|
|
// MemoryUsage is the total number of bytes the queue and its tasks require
|
2021-08-09 21:09:10 +08:00
|
|
|
// to be stored in redis. It is an approximate memory usage value in bytes
|
|
|
|
// since the value is computed by sampling.
|
2021-01-27 10:36:45 +08:00
|
|
|
MemoryUsage int64
|
2020-06-08 04:04:27 +08:00
|
|
|
// Paused indicates whether the queue is paused.
|
|
|
|
// If true, tasks in the queue should not be processed.
|
2020-06-05 21:42:27 +08:00
|
|
|
Paused bool
|
2020-08-21 21:00:49 +08:00
|
|
|
// Size is the total number of tasks in the queue.
|
|
|
|
Size int
|
2020-08-11 20:35:06 +08:00
|
|
|
// Number of tasks in each state.
|
2020-09-06 03:43:15 +08:00
|
|
|
Pending int
|
|
|
|
Active int
|
|
|
|
Scheduled int
|
|
|
|
Retry int
|
2021-01-13 03:01:21 +08:00
|
|
|
Archived int
|
2021-11-06 07:52:54 +08:00
|
|
|
Completed int
|
2020-08-11 20:35:06 +08:00
|
|
|
// Total number of tasks processed during the current date.
|
|
|
|
// The number includes both succeeded and failed tasks.
|
|
|
|
Processed int
|
|
|
|
// Total number of tasks failed during the current date.
|
|
|
|
Failed int
|
|
|
|
// Time this stats was taken.
|
|
|
|
Timestamp time.Time
|
2020-06-05 21:42:27 +08:00
|
|
|
}
|
|
|
|
|
2020-01-05 01:41:05 +08:00
|
|
|
// DailyStats holds aggregate data for a given day.
|
|
|
|
type DailyStats struct {
|
2020-08-12 20:30:59 +08:00
|
|
|
// Name of the queue (e.g. "default", "critical").
|
|
|
|
Queue string
|
|
|
|
// Total number of tasks processed during the given day.
|
|
|
|
// The number includes both succeeded and failed tasks.
|
2020-01-05 01:41:05 +08:00
|
|
|
Processed int
|
2020-08-12 20:30:59 +08:00
|
|
|
// Total number of tasks failed during the given day.
|
|
|
|
Failed int
|
|
|
|
// Date this stats was taken.
|
|
|
|
Time time.Time
|
2020-01-05 01:41:05 +08:00
|
|
|
}
|
|
|
|
|
2020-08-11 21:28:12 +08:00
|
|
|
// KEYS[1] -> asynq:<qname>
|
2020-09-06 03:43:15 +08:00
|
|
|
// KEYS[2] -> asynq:<qname>:active
|
2020-08-11 21:28:12 +08:00
|
|
|
// KEYS[3] -> asynq:<qname>:scheduled
|
|
|
|
// KEYS[4] -> asynq:<qname>:retry
|
2021-01-13 03:01:21 +08:00
|
|
|
// KEYS[5] -> asynq:<qname>:archived
|
2021-11-06 07:52:54 +08:00
|
|
|
// KEYS[6] -> asynq:<qname>:completed
|
|
|
|
// KEYS[7] -> asynq:<qname>:processed:<yyyy-mm-dd>
|
|
|
|
// KEYS[8] -> asynq:<qname>:failed:<yyyy-mm-dd>
|
|
|
|
// KEYS[9] -> asynq:<qname>:paused
|
2020-02-09 03:06:14 +08:00
|
|
|
var currentStatsCmd = redis.NewScript(`
|
|
|
|
local res = {}
|
2020-08-11 21:28:12 +08:00
|
|
|
table.insert(res, KEYS[1])
|
|
|
|
table.insert(res, redis.call("LLEN", KEYS[1]))
|
2020-02-09 03:06:14 +08:00
|
|
|
table.insert(res, KEYS[2])
|
|
|
|
table.insert(res, redis.call("LLEN", KEYS[2]))
|
|
|
|
table.insert(res, KEYS[3])
|
|
|
|
table.insert(res, redis.call("ZCARD", KEYS[3]))
|
|
|
|
table.insert(res, KEYS[4])
|
|
|
|
table.insert(res, redis.call("ZCARD", KEYS[4]))
|
|
|
|
table.insert(res, KEYS[5])
|
|
|
|
table.insert(res, redis.call("ZCARD", KEYS[5]))
|
2021-11-06 07:52:54 +08:00
|
|
|
table.insert(res, KEYS[6])
|
|
|
|
table.insert(res, redis.call("ZCARD", KEYS[6]))
|
2020-02-09 03:06:14 +08:00
|
|
|
local pcount = 0
|
2021-11-06 07:52:54 +08:00
|
|
|
local p = redis.call("GET", KEYS[7])
|
2020-02-09 03:06:14 +08:00
|
|
|
if p then
|
|
|
|
pcount = tonumber(p)
|
|
|
|
end
|
2021-11-06 07:52:54 +08:00
|
|
|
table.insert(res, KEYS[7])
|
2020-02-09 03:06:14 +08:00
|
|
|
table.insert(res, pcount)
|
|
|
|
local fcount = 0
|
2021-11-06 07:52:54 +08:00
|
|
|
local f = redis.call("GET", KEYS[8])
|
2020-02-09 03:06:14 +08:00
|
|
|
if f then
|
|
|
|
fcount = tonumber(f)
|
|
|
|
end
|
2020-08-11 21:28:12 +08:00
|
|
|
table.insert(res, KEYS[8])
|
2021-11-06 07:52:54 +08:00
|
|
|
table.insert(res, fcount)
|
|
|
|
table.insert(res, KEYS[9])
|
|
|
|
table.insert(res, redis.call("EXISTS", KEYS[9]))
|
2020-02-09 03:06:14 +08:00
|
|
|
return res`)
|
|
|
|
|
2019-12-05 12:30:37 +08:00
|
|
|
// CurrentStats returns a current state of the queues.
|
2020-08-11 21:28:12 +08:00
|
|
|
func (r *RDB) CurrentStats(qname string) (*Stats, error) {
|
2021-05-09 22:05:10 +08:00
|
|
|
var op errors.Op = "rdb.CurrentStats"
|
2021-11-06 07:52:54 +08:00
|
|
|
exists, err := r.queueExists(qname)
|
2020-08-11 21:28:12 +08:00
|
|
|
if err != nil {
|
2021-05-09 22:05:10 +08:00
|
|
|
return nil, errors.E(op, errors.Unknown, err)
|
2020-08-11 21:28:12 +08:00
|
|
|
}
|
|
|
|
if !exists {
|
2021-05-09 22:05:10 +08:00
|
|
|
return nil, errors.E(op, errors.NotFound, &errors.QueueNotFoundError{Queue: qname})
|
2020-08-11 21:28:12 +08:00
|
|
|
}
|
2019-12-26 12:17:00 +08:00
|
|
|
now := time.Now()
|
2021-09-02 20:56:02 +08:00
|
|
|
res, err := currentStatsCmd.Run(context.Background(), r.client, []string{
|
2021-03-13 08:23:08 +08:00
|
|
|
base.PendingKey(qname),
|
2020-09-06 03:43:15 +08:00
|
|
|
base.ActiveKey(qname),
|
2020-08-11 21:28:12 +08:00
|
|
|
base.ScheduledKey(qname),
|
|
|
|
base.RetryKey(qname),
|
2021-01-13 03:01:21 +08:00
|
|
|
base.ArchivedKey(qname),
|
2021-11-06 07:52:54 +08:00
|
|
|
base.CompletedKey(qname),
|
2020-08-11 21:28:12 +08:00
|
|
|
base.ProcessedKey(qname, now),
|
|
|
|
base.FailedKey(qname, now),
|
|
|
|
base.PausedKey(qname),
|
2019-12-26 13:29:20 +08:00
|
|
|
}).Result()
|
2019-12-26 12:17:00 +08:00
|
|
|
if err != nil {
|
2021-05-09 22:05:10 +08:00
|
|
|
return nil, errors.E(op, errors.Unknown, err)
|
2019-12-26 12:17:00 +08:00
|
|
|
}
|
2020-01-09 23:01:44 +08:00
|
|
|
data, err := cast.ToSliceE(res)
|
2019-12-26 12:17:00 +08:00
|
|
|
if err != nil {
|
2021-05-09 22:05:10 +08:00
|
|
|
return nil, errors.E(op, errors.Internal, "cast error: unexpected return value from Lua script")
|
2019-12-26 12:17:00 +08:00
|
|
|
}
|
2020-01-09 23:01:44 +08:00
|
|
|
stats := &Stats{
|
2020-08-12 20:30:59 +08:00
|
|
|
Queue: qname,
|
2020-01-09 23:01:44 +08:00
|
|
|
Timestamp: now,
|
|
|
|
}
|
2020-08-21 21:00:49 +08:00
|
|
|
size := 0
|
2020-01-09 23:01:44 +08:00
|
|
|
for i := 0; i < len(data); i += 2 {
|
|
|
|
key := cast.ToString(data[i])
|
|
|
|
val := cast.ToInt(data[i+1])
|
2020-08-11 21:28:12 +08:00
|
|
|
switch key {
|
2021-03-13 08:23:08 +08:00
|
|
|
case base.PendingKey(qname):
|
2020-09-05 22:03:43 +08:00
|
|
|
stats.Pending = val
|
2020-08-21 21:00:49 +08:00
|
|
|
size += val
|
2020-09-06 03:43:15 +08:00
|
|
|
case base.ActiveKey(qname):
|
|
|
|
stats.Active = val
|
2020-08-21 21:00:49 +08:00
|
|
|
size += val
|
2020-08-11 21:28:12 +08:00
|
|
|
case base.ScheduledKey(qname):
|
2020-01-09 23:01:44 +08:00
|
|
|
stats.Scheduled = val
|
2020-08-21 21:00:49 +08:00
|
|
|
size += val
|
2020-08-13 21:54:32 +08:00
|
|
|
case base.RetryKey(qname):
|
2020-01-09 23:01:44 +08:00
|
|
|
stats.Retry = val
|
2020-08-21 21:00:49 +08:00
|
|
|
size += val
|
2021-01-13 03:01:21 +08:00
|
|
|
case base.ArchivedKey(qname):
|
|
|
|
stats.Archived = val
|
2020-08-21 21:00:49 +08:00
|
|
|
size += val
|
2021-11-06 07:52:54 +08:00
|
|
|
case base.CompletedKey(qname):
|
|
|
|
stats.Completed = val
|
|
|
|
size += val
|
2020-08-11 21:28:12 +08:00
|
|
|
case base.ProcessedKey(qname, now):
|
2020-01-09 23:01:44 +08:00
|
|
|
stats.Processed = val
|
2020-08-11 21:28:12 +08:00
|
|
|
case base.FailedKey(qname, now):
|
2020-01-09 23:01:44 +08:00
|
|
|
stats.Failed = val
|
2020-08-11 21:28:12 +08:00
|
|
|
case base.PausedKey(qname):
|
|
|
|
if val == 0 {
|
|
|
|
stats.Paused = false
|
|
|
|
} else {
|
|
|
|
stats.Paused = true
|
|
|
|
}
|
2020-01-09 23:01:44 +08:00
|
|
|
}
|
|
|
|
}
|
2020-08-21 21:00:49 +08:00
|
|
|
stats.Size = size
|
2021-01-27 10:36:45 +08:00
|
|
|
memusg, err := r.memoryUsage(qname)
|
|
|
|
if err != nil {
|
2021-05-09 22:05:10 +08:00
|
|
|
return nil, errors.E(op, errors.CanonicalCode(err), err)
|
2021-01-27 10:36:45 +08:00
|
|
|
}
|
|
|
|
stats.MemoryUsage = memusg
|
2020-01-09 23:01:44 +08:00
|
|
|
return stats, nil
|
2019-12-05 12:30:37 +08:00
|
|
|
}
|
|
|
|
|
2021-08-09 21:09:10 +08:00
|
|
|
// Computes memory usage for the given queue by sampling tasks
|
|
|
|
// from each redis list/zset. Returns approximate memory usage value
|
|
|
|
// in bytes.
|
|
|
|
//
|
|
|
|
// KEYS[1] -> asynq:{qname}:active
|
|
|
|
// KEYS[2] -> asynq:{qname}:pending
|
|
|
|
// KEYS[3] -> asynq:{qname}:scheduled
|
|
|
|
// KEYS[4] -> asynq:{qname}:retry
|
|
|
|
// KEYS[5] -> asynq:{qname}:archived
|
2021-11-06 07:52:54 +08:00
|
|
|
// KEYS[6] -> asynq:{qname}:completed
|
2021-08-09 21:09:10 +08:00
|
|
|
//
|
|
|
|
// ARGV[1] -> asynq:{qname}:t:
|
|
|
|
// ARGV[2] -> sample_size (e.g 20)
|
|
|
|
var memoryUsageCmd = redis.NewScript(`
|
|
|
|
local sample_size = tonumber(ARGV[2])
|
|
|
|
if sample_size <= 0 then
|
|
|
|
return redis.error_reply("sample size must be a positive number")
|
|
|
|
end
|
|
|
|
local memusg = 0
|
|
|
|
for i=1,2 do
|
|
|
|
local ids = redis.call("LRANGE", KEYS[i], 0, sample_size - 1)
|
|
|
|
local sample_total = 0
|
|
|
|
if (table.getn(ids) > 0) then
|
|
|
|
for _, id in ipairs(ids) do
|
|
|
|
local bytes = redis.call("MEMORY", "USAGE", ARGV[1] .. id)
|
|
|
|
sample_total = sample_total + bytes
|
|
|
|
end
|
|
|
|
local n = redis.call("LLEN", KEYS[i])
|
|
|
|
local avg = sample_total / table.getn(ids)
|
|
|
|
memusg = memusg + (avg * n)
|
|
|
|
end
|
|
|
|
local m = redis.call("MEMORY", "USAGE", KEYS[i])
|
|
|
|
if (m) then
|
|
|
|
memusg = memusg + m
|
|
|
|
end
|
|
|
|
end
|
2021-11-06 07:52:54 +08:00
|
|
|
for i=3,6 do
|
2021-08-09 21:09:10 +08:00
|
|
|
local ids = redis.call("ZRANGE", KEYS[i], 0, sample_size - 1)
|
|
|
|
local sample_total = 0
|
|
|
|
if (table.getn(ids) > 0) then
|
|
|
|
for _, id in ipairs(ids) do
|
|
|
|
local bytes = redis.call("MEMORY", "USAGE", ARGV[1] .. id)
|
|
|
|
sample_total = sample_total + bytes
|
|
|
|
end
|
|
|
|
local n = redis.call("ZCARD", KEYS[i])
|
|
|
|
local avg = sample_total / table.getn(ids)
|
|
|
|
memusg = memusg + (avg * n)
|
|
|
|
end
|
|
|
|
local m = redis.call("MEMORY", "USAGE", KEYS[i])
|
|
|
|
if (m) then
|
|
|
|
memusg = memusg + m
|
|
|
|
end
|
|
|
|
end
|
|
|
|
return memusg
|
|
|
|
`)
|
|
|
|
|
2021-01-27 10:36:45 +08:00
|
|
|
func (r *RDB) memoryUsage(qname string) (int64, error) {
|
2021-05-09 22:05:10 +08:00
|
|
|
var op errors.Op = "rdb.memoryUsage"
|
2021-08-09 21:09:10 +08:00
|
|
|
const sampleSize = 20
|
|
|
|
keys := []string{
|
|
|
|
base.ActiveKey(qname),
|
|
|
|
base.PendingKey(qname),
|
|
|
|
base.ScheduledKey(qname),
|
|
|
|
base.RetryKey(qname),
|
|
|
|
base.ArchivedKey(qname),
|
2021-11-06 07:52:54 +08:00
|
|
|
base.CompletedKey(qname),
|
2021-01-27 10:36:45 +08:00
|
|
|
}
|
2021-08-09 21:09:10 +08:00
|
|
|
argv := []interface{}{
|
|
|
|
base.TaskKeyPrefix(qname),
|
|
|
|
sampleSize,
|
|
|
|
}
|
2021-09-02 20:56:02 +08:00
|
|
|
res, err := memoryUsageCmd.Run(context.Background(), r.client, keys, argv...).Result()
|
2021-08-09 21:09:10 +08:00
|
|
|
if err != nil {
|
|
|
|
return 0, errors.E(op, errors.Unknown, fmt.Sprintf("redis eval error: %v", err))
|
|
|
|
}
|
|
|
|
usg, err := cast.ToInt64E(res)
|
|
|
|
if err != nil {
|
|
|
|
return 0, errors.E(op, errors.Internal, fmt.Sprintf("could not cast script return value to int64"))
|
2021-01-27 10:36:45 +08:00
|
|
|
}
|
|
|
|
return usg, nil
|
|
|
|
}
|
|
|
|
|
2020-02-09 03:06:14 +08:00
|
|
|
var historicalStatsCmd = redis.NewScript(`
|
|
|
|
local res = {}
|
|
|
|
for _, key in ipairs(KEYS) do
|
|
|
|
local n = redis.call("GET", key)
|
|
|
|
if not n then
|
2020-08-12 20:30:59 +08:00
|
|
|
n = 0
|
2020-02-09 03:06:14 +08:00
|
|
|
end
|
|
|
|
table.insert(res, tonumber(n))
|
|
|
|
end
|
|
|
|
return res`)
|
|
|
|
|
2020-08-12 20:30:59 +08:00
|
|
|
// HistoricalStats returns a list of stats from the last n days for the given queue.
|
|
|
|
func (r *RDB) HistoricalStats(qname string, n int) ([]*DailyStats, error) {
|
2021-05-09 22:05:10 +08:00
|
|
|
var op errors.Op = "rdb.HistoricalStats"
|
2020-01-05 01:41:05 +08:00
|
|
|
if n < 1 {
|
2021-05-09 22:05:10 +08:00
|
|
|
return nil, errors.E(op, errors.FailedPrecondition, "the number of days must be positive")
|
2020-08-21 21:00:49 +08:00
|
|
|
}
|
2021-11-06 07:52:54 +08:00
|
|
|
exists, err := r.queueExists(qname)
|
2020-08-21 21:00:49 +08:00
|
|
|
if err != nil {
|
2021-05-09 22:15:09 +08:00
|
|
|
return nil, errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "sismember", Err: err})
|
2020-08-21 21:00:49 +08:00
|
|
|
}
|
|
|
|
if !exists {
|
2021-05-09 22:05:10 +08:00
|
|
|
return nil, errors.E(op, errors.NotFound, &errors.QueueNotFoundError{Queue: qname})
|
2020-01-05 01:41:05 +08:00
|
|
|
}
|
|
|
|
const day = 24 * time.Hour
|
|
|
|
now := time.Now().UTC()
|
|
|
|
var days []time.Time
|
|
|
|
var keys []string
|
|
|
|
for i := 0; i < n; i++ {
|
|
|
|
ts := now.Add(-time.Duration(i) * day)
|
|
|
|
days = append(days, ts)
|
2020-08-12 20:30:59 +08:00
|
|
|
keys = append(keys, base.ProcessedKey(qname, ts))
|
2020-08-13 21:54:32 +08:00
|
|
|
keys = append(keys, base.FailedKey(qname, ts))
|
2020-01-05 01:41:05 +08:00
|
|
|
}
|
2021-09-02 20:56:02 +08:00
|
|
|
res, err := historicalStatsCmd.Run(context.Background(), r.client, keys).Result()
|
2020-01-05 01:41:05 +08:00
|
|
|
if err != nil {
|
2021-05-09 22:05:10 +08:00
|
|
|
return nil, errors.E(op, errors.Unknown, fmt.Sprintf("redis eval error: %v", err))
|
2020-01-05 01:41:05 +08:00
|
|
|
}
|
|
|
|
data, err := cast.ToIntSliceE(res)
|
|
|
|
if err != nil {
|
2021-05-09 22:05:10 +08:00
|
|
|
return nil, errors.E(op, errors.Internal, fmt.Sprintf("cast error: unexpected return value from Lua script: %v", res))
|
2020-01-05 01:41:05 +08:00
|
|
|
}
|
|
|
|
var stats []*DailyStats
|
|
|
|
for i := 0; i < len(data); i += 2 {
|
|
|
|
stats = append(stats, &DailyStats{
|
2020-08-12 20:30:59 +08:00
|
|
|
Queue: qname,
|
2020-01-05 01:41:05 +08:00
|
|
|
Processed: data[i],
|
|
|
|
Failed: data[i+1],
|
|
|
|
Time: days[i/2],
|
|
|
|
})
|
|
|
|
}
|
|
|
|
return stats, nil
|
|
|
|
}
|
|
|
|
|
2019-12-23 01:09:57 +08:00
|
|
|
// RedisInfo returns a map of redis info.
|
|
|
|
func (r *RDB) RedisInfo() (map[string]string, error) {
|
2021-09-02 20:56:02 +08:00
|
|
|
res, err := r.client.Info(context.Background()).Result()
|
2019-12-23 01:09:57 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-09-08 03:55:32 +08:00
|
|
|
return parseInfo(res)
|
|
|
|
}
|
|
|
|
|
|
|
|
// RedisClusterInfo returns a map of redis cluster info.
|
|
|
|
func (r *RDB) RedisClusterInfo() (map[string]string, error) {
|
2021-09-02 20:56:02 +08:00
|
|
|
res, err := r.client.ClusterInfo(context.Background()).Result()
|
2020-09-08 03:55:32 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return parseInfo(res)
|
|
|
|
}
|
|
|
|
|
|
|
|
func parseInfo(infoStr string) (map[string]string, error) {
|
2019-12-23 01:09:57 +08:00
|
|
|
info := make(map[string]string)
|
2020-09-08 03:55:32 +08:00
|
|
|
lines := strings.Split(infoStr, "\r\n")
|
2019-12-23 01:09:57 +08:00
|
|
|
for _, l := range lines {
|
|
|
|
kv := strings.Split(l, ":")
|
|
|
|
if len(kv) == 2 {
|
|
|
|
info[kv[0]] = kv[1]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return info, nil
|
|
|
|
}
|
|
|
|
|
2021-11-06 07:52:54 +08:00
|
|
|
// TODO: Use generics once available.
|
|
|
|
func reverse(x []*base.TaskInfo) {
|
2020-01-24 23:19:58 +08:00
|
|
|
for i := len(x)/2 - 1; i >= 0; i-- {
|
|
|
|
opp := len(x) - 1 - i
|
|
|
|
x[i], x[opp] = x[opp], x[i]
|
2020-01-11 13:32:15 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-05-23 04:03:12 +08:00
|
|
|
// checkQueueExists verifies whether the queue exists.
|
|
|
|
// It returns QueueNotFoundError if queue doesn't exist.
|
|
|
|
func (r *RDB) checkQueueExists(qname string) error {
|
2021-11-06 07:52:54 +08:00
|
|
|
exists, err := r.queueExists(qname)
|
2021-05-23 04:03:12 +08:00
|
|
|
if err != nil {
|
|
|
|
return errors.E(errors.Unknown, &errors.RedisCommandError{Command: "sismember", Err: err})
|
|
|
|
}
|
|
|
|
if !exists {
|
|
|
|
return errors.E(errors.Internal, &errors.QueueNotFoundError{Queue: qname})
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Input:
|
|
|
|
// KEYS[1] -> task key (asynq:{<qname>}:t:<taskid>)
|
|
|
|
// ARGV[1] -> task id
|
|
|
|
// ARGV[2] -> current time in Unix time (seconds)
|
|
|
|
// ARGV[3] -> queue key prefix (asynq:{<qname>}:)
|
|
|
|
//
|
|
|
|
// Output:
|
2021-11-06 07:52:54 +08:00
|
|
|
// Tuple of {msg, state, nextProcessAt, result}
|
2021-05-23 04:03:12 +08:00
|
|
|
// msg: encoded task message
|
|
|
|
// state: string describing the state of the task
|
|
|
|
// nextProcessAt: unix time in seconds, zero if not applicable.
|
2021-11-06 07:52:54 +08:00
|
|
|
// result: result data associated with the task
|
2021-05-23 04:03:12 +08:00
|
|
|
//
|
|
|
|
// If the task key doesn't exist, it returns error with a message "NOT FOUND"
|
|
|
|
var getTaskInfoCmd = redis.NewScript(`
|
|
|
|
if redis.call("EXISTS", KEYS[1]) == 0 then
|
|
|
|
return redis.error_reply("NOT FOUND")
|
|
|
|
end
|
2021-11-06 07:52:54 +08:00
|
|
|
local msg, state, result = unpack(redis.call("HMGET", KEYS[1], "msg", "state", "result"))
|
2021-05-23 04:03:12 +08:00
|
|
|
if state == "scheduled" or state == "retry" then
|
2021-11-06 07:52:54 +08:00
|
|
|
return {msg, state, redis.call("ZSCORE", ARGV[3] .. state, ARGV[1]), result}
|
2021-05-23 04:03:12 +08:00
|
|
|
end
|
|
|
|
if state == "pending" then
|
2021-11-06 07:52:54 +08:00
|
|
|
return {msg, state, ARGV[2], result}
|
2021-05-23 04:03:12 +08:00
|
|
|
end
|
2021-11-06 07:52:54 +08:00
|
|
|
return {msg, state, 0, result}
|
2021-05-23 04:03:12 +08:00
|
|
|
`)
|
|
|
|
|
|
|
|
// GetTaskInfo returns a TaskInfo describing the task from the given queue.
|
2021-09-10 21:29:37 +08:00
|
|
|
func (r *RDB) GetTaskInfo(qname, id string) (*base.TaskInfo, error) {
|
2021-05-23 04:03:12 +08:00
|
|
|
var op errors.Op = "rdb.GetTaskInfo"
|
|
|
|
if err := r.checkQueueExists(qname); err != nil {
|
|
|
|
return nil, errors.E(op, errors.CanonicalCode(err), err)
|
|
|
|
}
|
2021-09-10 21:29:37 +08:00
|
|
|
keys := []string{base.TaskKey(qname, id)}
|
2021-05-23 04:03:12 +08:00
|
|
|
argv := []interface{}{
|
2021-09-10 21:29:37 +08:00
|
|
|
id,
|
2021-05-23 04:03:12 +08:00
|
|
|
time.Now().Unix(),
|
|
|
|
base.QueueKeyPrefix(qname),
|
|
|
|
}
|
2021-09-02 20:56:02 +08:00
|
|
|
res, err := getTaskInfoCmd.Run(context.Background(), r.client, keys, argv...).Result()
|
2021-05-23 04:03:12 +08:00
|
|
|
if err != nil {
|
|
|
|
if err.Error() == "NOT FOUND" {
|
2021-09-10 21:29:37 +08:00
|
|
|
return nil, errors.E(op, errors.NotFound, &errors.TaskNotFoundError{Queue: qname, ID: id})
|
2021-05-23 04:03:12 +08:00
|
|
|
}
|
|
|
|
return nil, errors.E(op, errors.Unknown, err)
|
|
|
|
}
|
|
|
|
vals, err := cast.ToSliceE(res)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.E(op, errors.Internal, "unexpected value returned from Lua script")
|
|
|
|
}
|
2021-11-06 07:52:54 +08:00
|
|
|
if len(vals) != 4 {
|
2021-05-23 04:03:12 +08:00
|
|
|
return nil, errors.E(op, errors.Internal, "unepxected number of values returned from Lua script")
|
|
|
|
}
|
|
|
|
encoded, err := cast.ToStringE(vals[0])
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.E(op, errors.Internal, "unexpected value returned from Lua script")
|
|
|
|
}
|
|
|
|
stateStr, err := cast.ToStringE(vals[1])
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.E(op, errors.Internal, "unexpected value returned from Lua script")
|
|
|
|
}
|
|
|
|
processAtUnix, err := cast.ToInt64E(vals[2])
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.E(op, errors.Internal, "unexpected value returned from Lua script")
|
|
|
|
}
|
2021-11-06 07:52:54 +08:00
|
|
|
resultStr, err := cast.ToStringE(vals[3])
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.E(op, errors.Internal, "unexpected value returned from Lua script")
|
|
|
|
}
|
2021-05-23 04:03:12 +08:00
|
|
|
msg, err := base.DecodeMessage([]byte(encoded))
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.E(op, errors.Internal, "could not decode task message")
|
|
|
|
}
|
|
|
|
state, err := base.TaskStateFromString(stateStr)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.E(op, errors.CanonicalCode(err), err)
|
|
|
|
}
|
|
|
|
var nextProcessAt time.Time
|
|
|
|
if processAtUnix != 0 {
|
|
|
|
nextProcessAt = time.Unix(processAtUnix, 0)
|
|
|
|
}
|
2021-11-06 07:52:54 +08:00
|
|
|
var result []byte
|
|
|
|
if len(resultStr) > 0 {
|
|
|
|
result = []byte(resultStr)
|
|
|
|
}
|
2021-05-23 04:03:12 +08:00
|
|
|
return &base.TaskInfo{
|
|
|
|
Message: msg,
|
|
|
|
State: state,
|
|
|
|
NextProcessAt: nextProcessAt,
|
2021-11-06 07:52:54 +08:00
|
|
|
Result: result,
|
2021-05-23 04:03:12 +08:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2020-01-24 23:19:58 +08:00
|
|
|
// Pagination specifies the page size and page number
|
|
|
|
// for the list operation.
|
|
|
|
type Pagination struct {
|
|
|
|
// Number of items in the page.
|
2020-02-13 14:23:25 +08:00
|
|
|
Size int
|
2020-01-24 23:19:58 +08:00
|
|
|
|
|
|
|
// Page number starting from zero.
|
2020-02-13 14:23:25 +08:00
|
|
|
Page int
|
2020-01-11 13:32:15 +08:00
|
|
|
}
|
|
|
|
|
2020-01-24 23:19:58 +08:00
|
|
|
func (p Pagination) start() int64 {
|
|
|
|
return int64(p.Size * p.Page)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p Pagination) stop() int64 {
|
|
|
|
return int64(p.Size*p.Page + p.Size - 1)
|
|
|
|
}
|
|
|
|
|
2020-09-05 22:03:43 +08:00
|
|
|
// ListPending returns pending tasks that are ready to be processed.
|
2021-11-06 07:52:54 +08:00
|
|
|
func (r *RDB) ListPending(qname string, pgn Pagination) ([]*base.TaskInfo, error) {
|
2021-05-09 02:45:30 +08:00
|
|
|
var op errors.Op = "rdb.ListPending"
|
2021-11-06 07:52:54 +08:00
|
|
|
exists, err := r.queueExists(qname)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "sismember", Err: err})
|
|
|
|
}
|
|
|
|
if !exists {
|
2021-05-09 02:45:30 +08:00
|
|
|
return nil, errors.E(op, errors.NotFound, &errors.QueueNotFoundError{Queue: qname})
|
2020-01-11 13:32:15 +08:00
|
|
|
}
|
2021-11-06 07:52:54 +08:00
|
|
|
res, err := r.listMessages(qname, base.TaskStatePending, pgn)
|
2021-05-09 02:45:30 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, errors.E(op, errors.CanonicalCode(err), err)
|
|
|
|
}
|
|
|
|
return res, nil
|
2019-12-05 12:30:37 +08:00
|
|
|
}
|
|
|
|
|
2020-09-06 03:43:15 +08:00
|
|
|
// ListActive returns all tasks that are currently being processed for the given queue.
|
2021-11-06 07:52:54 +08:00
|
|
|
func (r *RDB) ListActive(qname string, pgn Pagination) ([]*base.TaskInfo, error) {
|
2021-05-09 02:45:30 +08:00
|
|
|
var op errors.Op = "rdb.ListActive"
|
2021-11-06 07:52:54 +08:00
|
|
|
exists, err := r.queueExists(qname)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "sismember", Err: err})
|
|
|
|
}
|
|
|
|
if !exists {
|
2021-05-09 02:45:30 +08:00
|
|
|
return nil, errors.E(op, errors.NotFound, &errors.QueueNotFoundError{Queue: qname})
|
|
|
|
}
|
2021-11-06 07:52:54 +08:00
|
|
|
res, err := r.listMessages(qname, base.TaskStateActive, pgn)
|
2021-05-09 02:45:30 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, errors.E(op, errors.CanonicalCode(err), err)
|
2020-08-21 21:00:49 +08:00
|
|
|
}
|
2021-05-09 02:45:30 +08:00
|
|
|
return res, nil
|
2020-07-13 21:29:41 +08:00
|
|
|
}
|
|
|
|
|
2021-03-13 08:23:08 +08:00
|
|
|
// KEYS[1] -> key for id list (e.g. asynq:{<qname>}:pending)
|
|
|
|
// ARGV[1] -> start offset
|
|
|
|
// ARGV[2] -> stop offset
|
|
|
|
// ARGV[3] -> task key prefix
|
|
|
|
var listMessagesCmd = redis.NewScript(`
|
|
|
|
local ids = redis.call("LRange", KEYS[1], ARGV[1], ARGV[2])
|
2021-11-06 07:52:54 +08:00
|
|
|
local data = {}
|
2021-03-13 08:23:08 +08:00
|
|
|
for _, id in ipairs(ids) do
|
|
|
|
local key = ARGV[3] .. id
|
2021-11-06 07:52:54 +08:00
|
|
|
local msg, result = unpack(redis.call("HMGET", key, "msg","result"))
|
|
|
|
table.insert(data, msg)
|
|
|
|
table.insert(data, result)
|
2021-03-13 08:23:08 +08:00
|
|
|
end
|
2021-11-06 07:52:54 +08:00
|
|
|
return data
|
2021-03-13 08:23:08 +08:00
|
|
|
`)
|
|
|
|
|
2021-11-06 07:52:54 +08:00
|
|
|
// listMessages returns a list of TaskInfo in Redis list with the given key.
|
|
|
|
func (r *RDB) listMessages(qname string, state base.TaskState, pgn Pagination) ([]*base.TaskInfo, error) {
|
|
|
|
var key string
|
|
|
|
switch state {
|
|
|
|
case base.TaskStateActive:
|
|
|
|
key = base.ActiveKey(qname)
|
|
|
|
case base.TaskStatePending:
|
|
|
|
key = base.PendingKey(qname)
|
|
|
|
default:
|
|
|
|
panic(fmt.Sprintf("unsupported task state: %v", state))
|
|
|
|
}
|
2020-01-24 23:19:58 +08:00
|
|
|
// Note: Because we use LPUSH to redis list, we need to calculate the
|
|
|
|
// correct range and reverse the list to get the tasks with pagination.
|
|
|
|
stop := -pgn.start() - 1
|
|
|
|
start := -pgn.stop() - 1
|
2021-09-02 20:56:02 +08:00
|
|
|
res, err := listMessagesCmd.Run(context.Background(), r.client,
|
2021-03-13 08:23:08 +08:00
|
|
|
[]string{key}, start, stop, base.TaskKeyPrefix(qname)).Result()
|
|
|
|
if err != nil {
|
2021-05-09 02:45:30 +08:00
|
|
|
return nil, errors.E(errors.Unknown, err)
|
2021-03-13 08:23:08 +08:00
|
|
|
}
|
|
|
|
data, err := cast.ToStringSliceE(res)
|
2019-12-05 12:30:37 +08:00
|
|
|
if err != nil {
|
2021-05-09 02:45:30 +08:00
|
|
|
return nil, errors.E(errors.Internal, fmt.Errorf("cast error: Lua script returned unexpected value: %v", res))
|
2019-12-05 12:30:37 +08:00
|
|
|
}
|
2021-11-06 07:52:54 +08:00
|
|
|
var infos []*base.TaskInfo
|
|
|
|
for i := 0; i < len(data); i += 2 {
|
|
|
|
m, err := base.DecodeMessage([]byte(data[i]))
|
2019-12-05 12:30:37 +08:00
|
|
|
if err != nil {
|
|
|
|
continue // bad data, ignore and continue
|
|
|
|
}
|
2021-11-06 07:52:54 +08:00
|
|
|
var res []byte
|
|
|
|
if len(data[i+1]) > 0 {
|
|
|
|
res = []byte(data[i+1])
|
|
|
|
}
|
|
|
|
var nextProcessAt time.Time
|
|
|
|
if state == base.TaskStatePending {
|
|
|
|
nextProcessAt = time.Now()
|
|
|
|
}
|
|
|
|
infos = append(infos, &base.TaskInfo{
|
|
|
|
Message: m,
|
|
|
|
State: state,
|
|
|
|
NextProcessAt: nextProcessAt,
|
|
|
|
Result: res,
|
|
|
|
})
|
2019-12-05 12:30:37 +08:00
|
|
|
}
|
2021-11-06 07:52:54 +08:00
|
|
|
reverse(infos)
|
|
|
|
return infos, nil
|
2020-07-13 21:29:41 +08:00
|
|
|
|
2019-12-05 12:30:37 +08:00
|
|
|
}
|
|
|
|
|
2020-08-12 21:18:15 +08:00
|
|
|
// ListScheduled returns all tasks from the given queue that are scheduled
|
|
|
|
// to be processed in the future.
|
2021-11-06 07:52:54 +08:00
|
|
|
func (r *RDB) ListScheduled(qname string, pgn Pagination) ([]*base.TaskInfo, error) {
|
2021-05-09 02:45:30 +08:00
|
|
|
var op errors.Op = "rdb.ListScheduled"
|
2021-11-06 07:52:54 +08:00
|
|
|
exists, err := r.queueExists(qname)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "sismember", Err: err})
|
|
|
|
}
|
|
|
|
if !exists {
|
2021-05-09 02:45:30 +08:00
|
|
|
return nil, errors.E(op, errors.NotFound, &errors.QueueNotFoundError{Queue: qname})
|
|
|
|
}
|
2021-11-06 07:52:54 +08:00
|
|
|
res, err := r.listZSetEntries(qname, base.TaskStateScheduled, pgn)
|
2021-05-09 02:45:30 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, errors.E(op, errors.CanonicalCode(err), err)
|
2020-08-21 21:00:49 +08:00
|
|
|
}
|
2021-05-09 02:45:30 +08:00
|
|
|
return res, nil
|
2019-12-05 12:30:37 +08:00
|
|
|
}
|
|
|
|
|
2020-08-12 21:18:15 +08:00
|
|
|
// ListRetry returns all tasks from the given queue that have failed before
|
|
|
|
// and willl be retried in the future.
|
2021-11-06 07:52:54 +08:00
|
|
|
func (r *RDB) ListRetry(qname string, pgn Pagination) ([]*base.TaskInfo, error) {
|
2021-05-09 02:45:30 +08:00
|
|
|
var op errors.Op = "rdb.ListRetry"
|
2021-11-06 07:52:54 +08:00
|
|
|
exists, err := r.queueExists(qname)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "sismember", Err: err})
|
|
|
|
}
|
|
|
|
if !exists {
|
2021-05-09 02:45:30 +08:00
|
|
|
return nil, errors.E(op, errors.NotFound, &errors.QueueNotFoundError{Queue: qname})
|
|
|
|
}
|
2021-11-06 07:52:54 +08:00
|
|
|
res, err := r.listZSetEntries(qname, base.TaskStateRetry, pgn)
|
2021-05-09 02:45:30 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, errors.E(op, errors.CanonicalCode(err), err)
|
2020-08-21 21:00:49 +08:00
|
|
|
}
|
2021-05-09 02:45:30 +08:00
|
|
|
return res, nil
|
2019-12-05 12:30:37 +08:00
|
|
|
}
|
|
|
|
|
2021-01-13 03:01:21 +08:00
|
|
|
// ListArchived returns all tasks from the given queue that have exhausted its retry limit.
|
2021-11-06 07:52:54 +08:00
|
|
|
func (r *RDB) ListArchived(qname string, pgn Pagination) ([]*base.TaskInfo, error) {
|
2021-05-09 02:45:30 +08:00
|
|
|
var op errors.Op = "rdb.ListArchived"
|
2021-11-06 07:52:54 +08:00
|
|
|
exists, err := r.queueExists(qname)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "sismember", Err: err})
|
|
|
|
}
|
|
|
|
if !exists {
|
2021-05-09 02:45:30 +08:00
|
|
|
return nil, errors.E(op, errors.NotFound, &errors.QueueNotFoundError{Queue: qname})
|
2020-08-21 21:00:49 +08:00
|
|
|
}
|
2021-11-06 07:52:54 +08:00
|
|
|
zs, err := r.listZSetEntries(qname, base.TaskStateArchived, pgn)
|
2021-05-09 02:45:30 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, errors.E(op, errors.CanonicalCode(err), err)
|
|
|
|
}
|
2021-03-13 08:23:08 +08:00
|
|
|
return zs, nil
|
2020-07-13 21:29:41 +08:00
|
|
|
}
|
|
|
|
|
2021-11-06 07:52:54 +08:00
|
|
|
// ListCompleted returns all tasks from the given queue that have completed successfully.
|
|
|
|
func (r *RDB) ListCompleted(qname string, pgn Pagination) ([]*base.TaskInfo, error) {
|
|
|
|
var op errors.Op = "rdb.ListCompleted"
|
|
|
|
exists, err := r.queueExists(qname)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "sismember", Err: err})
|
|
|
|
}
|
|
|
|
if !exists {
|
|
|
|
return nil, errors.E(op, errors.NotFound, &errors.QueueNotFoundError{Queue: qname})
|
|
|
|
}
|
|
|
|
zs, err := r.listZSetEntries(qname, base.TaskStateCompleted, pgn)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.E(op, errors.CanonicalCode(err), err)
|
|
|
|
}
|
|
|
|
return zs, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Reports whether a queue with the given name exists.
|
|
|
|
func (r *RDB) queueExists(qname string) (bool, error) {
|
|
|
|
return r.client.SIsMember(context.Background(), base.AllQueues, qname).Result()
|
|
|
|
}
|
|
|
|
|
2021-03-13 08:23:08 +08:00
|
|
|
// KEYS[1] -> key for ids set (e.g. asynq:{<qname>}:scheduled)
|
|
|
|
// ARGV[1] -> min
|
|
|
|
// ARGV[2] -> max
|
|
|
|
// ARGV[3] -> task key prefix
|
|
|
|
//
|
|
|
|
// Returns an array populated with
|
2021-11-06 07:52:54 +08:00
|
|
|
// [msg1, score1, result1, msg2, score2, result2, ..., msgN, scoreN, resultN]
|
2021-03-13 08:23:08 +08:00
|
|
|
var listZSetEntriesCmd = redis.NewScript(`
|
2021-11-06 07:52:54 +08:00
|
|
|
local data = {}
|
2021-03-13 08:23:08 +08:00
|
|
|
local id_score_pairs = redis.call("ZRANGE", KEYS[1], ARGV[1], ARGV[2], "WITHSCORES")
|
|
|
|
for i = 1, table.getn(id_score_pairs), 2 do
|
2021-11-06 07:52:54 +08:00
|
|
|
local id = id_score_pairs[i]
|
|
|
|
local score = id_score_pairs[i+1]
|
|
|
|
local key = ARGV[3] .. id
|
|
|
|
local msg, res = unpack(redis.call("HMGET", key, "msg", "result"))
|
|
|
|
table.insert(data, msg)
|
|
|
|
table.insert(data, score)
|
|
|
|
table.insert(data, res)
|
2021-03-13 08:23:08 +08:00
|
|
|
end
|
2021-11-06 07:52:54 +08:00
|
|
|
return data
|
2021-03-13 08:23:08 +08:00
|
|
|
`)
|
|
|
|
|
2020-07-13 21:29:41 +08:00
|
|
|
// listZSetEntries returns a list of message and score pairs in Redis sorted-set
|
|
|
|
// with the given key.
|
2021-11-06 07:52:54 +08:00
|
|
|
func (r *RDB) listZSetEntries(qname string, state base.TaskState, pgn Pagination) ([]*base.TaskInfo, error) {
|
|
|
|
var key string
|
|
|
|
switch state {
|
|
|
|
case base.TaskStateScheduled:
|
|
|
|
key = base.ScheduledKey(qname)
|
|
|
|
case base.TaskStateRetry:
|
|
|
|
key = base.RetryKey(qname)
|
|
|
|
case base.TaskStateArchived:
|
|
|
|
key = base.ArchivedKey(qname)
|
|
|
|
case base.TaskStateCompleted:
|
|
|
|
key = base.CompletedKey(qname)
|
|
|
|
default:
|
|
|
|
panic(fmt.Sprintf("unsupported task state: %v", state))
|
|
|
|
}
|
2021-09-02 20:56:02 +08:00
|
|
|
res, err := listZSetEntriesCmd.Run(context.Background(), r.client, []string{key},
|
2021-03-13 08:23:08 +08:00
|
|
|
pgn.start(), pgn.stop(), base.TaskKeyPrefix(qname)).Result()
|
2019-12-05 12:30:37 +08:00
|
|
|
if err != nil {
|
2021-05-09 02:45:30 +08:00
|
|
|
return nil, errors.E(errors.Unknown, err)
|
2019-12-05 12:30:37 +08:00
|
|
|
}
|
2021-03-13 08:23:08 +08:00
|
|
|
data, err := cast.ToSliceE(res)
|
|
|
|
if err != nil {
|
2021-05-09 02:45:30 +08:00
|
|
|
return nil, errors.E(errors.Internal, fmt.Errorf("cast error: Lua script returned unexpected value: %v", res))
|
2021-03-13 08:23:08 +08:00
|
|
|
}
|
2021-11-06 07:52:54 +08:00
|
|
|
var infos []*base.TaskInfo
|
|
|
|
for i := 0; i < len(data); i += 3 {
|
2021-03-13 08:23:08 +08:00
|
|
|
s, err := cast.ToStringE(data[i])
|
|
|
|
if err != nil {
|
2021-05-09 02:45:30 +08:00
|
|
|
return nil, errors.E(errors.Internal, fmt.Errorf("cast error: Lua script returned unexpected value: %v", res))
|
2019-12-05 12:30:37 +08:00
|
|
|
}
|
2021-03-13 08:23:08 +08:00
|
|
|
score, err := cast.ToInt64E(data[i+1])
|
|
|
|
if err != nil {
|
2021-05-09 02:45:30 +08:00
|
|
|
return nil, errors.E(errors.Internal, fmt.Errorf("cast error: Lua script returned unexpected value: %v", res))
|
2021-03-13 08:23:08 +08:00
|
|
|
}
|
2021-11-06 07:52:54 +08:00
|
|
|
resStr, err := cast.ToStringE(data[i+2])
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.E(errors.Internal, fmt.Errorf("cast error: Lua script returned unexpected value: %v", res))
|
|
|
|
}
|
2021-03-13 08:23:08 +08:00
|
|
|
msg, err := base.DecodeMessage([]byte(s))
|
2019-12-05 12:30:37 +08:00
|
|
|
if err != nil {
|
|
|
|
continue // bad data, ignore and continue
|
|
|
|
}
|
2021-11-06 07:52:54 +08:00
|
|
|
var nextProcessAt time.Time
|
|
|
|
if state == base.TaskStateScheduled || state == base.TaskStateRetry {
|
|
|
|
nextProcessAt = time.Unix(score, 0)
|
|
|
|
}
|
|
|
|
var resBytes []byte
|
|
|
|
if len(resStr) > 0 {
|
|
|
|
resBytes = []byte(resStr)
|
|
|
|
}
|
|
|
|
infos = append(infos, &base.TaskInfo{
|
|
|
|
Message: msg,
|
|
|
|
State: state,
|
|
|
|
NextProcessAt: nextProcessAt,
|
|
|
|
Result: resBytes,
|
|
|
|
})
|
2019-12-05 12:30:37 +08:00
|
|
|
}
|
2021-11-06 07:52:54 +08:00
|
|
|
return infos, nil
|
2019-12-05 12:30:37 +08:00
|
|
|
}
|
2019-12-08 22:46:04 +08:00
|
|
|
|
2020-09-06 04:35:52 +08:00
|
|
|
// RunAllScheduledTasks enqueues all scheduled tasks from the given queue
|
2019-12-11 13:38:25 +08:00
|
|
|
// and returns the number of tasks enqueued.
|
2021-05-08 07:31:07 +08:00
|
|
|
// If a queue with the given name doesn't exist, it returns QueueNotFoundError.
|
2020-09-06 04:35:52 +08:00
|
|
|
func (r *RDB) RunAllScheduledTasks(qname string) (int64, error) {
|
2021-05-08 07:31:07 +08:00
|
|
|
var op errors.Op = "rdb.RunAllScheduledTasks"
|
|
|
|
n, err := r.runAll(base.ScheduledKey(qname), qname)
|
|
|
|
if errors.IsQueueNotFound(err) {
|
|
|
|
return 0, errors.E(op, errors.NotFound, err)
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return 0, errors.E(op, errors.Unknown, err)
|
|
|
|
}
|
|
|
|
return n, nil
|
2019-12-11 12:28:31 +08:00
|
|
|
}
|
|
|
|
|
2020-09-06 04:35:52 +08:00
|
|
|
// RunAllRetryTasks enqueues all retry tasks from the given queue
|
2019-12-11 13:38:25 +08:00
|
|
|
// and returns the number of tasks enqueued.
|
2021-05-08 07:31:07 +08:00
|
|
|
// If a queue with the given name doesn't exist, it returns QueueNotFoundError.
|
2020-09-06 04:35:52 +08:00
|
|
|
func (r *RDB) RunAllRetryTasks(qname string) (int64, error) {
|
2021-05-08 07:31:07 +08:00
|
|
|
var op errors.Op = "rdb.RunAllRetryTasks"
|
|
|
|
n, err := r.runAll(base.RetryKey(qname), qname)
|
|
|
|
if errors.IsQueueNotFound(err) {
|
|
|
|
return 0, errors.E(op, errors.NotFound, err)
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return 0, errors.E(op, errors.Unknown, err)
|
|
|
|
}
|
|
|
|
return n, nil
|
2019-12-11 12:28:31 +08:00
|
|
|
}
|
|
|
|
|
2021-01-13 03:01:21 +08:00
|
|
|
// RunAllArchivedTasks enqueues all archived tasks from the given queue
|
2019-12-11 13:38:25 +08:00
|
|
|
// and returns the number of tasks enqueued.
|
2021-05-08 07:31:07 +08:00
|
|
|
// If a queue with the given name doesn't exist, it returns QueueNotFoundError.
|
2021-01-13 03:01:21 +08:00
|
|
|
func (r *RDB) RunAllArchivedTasks(qname string) (int64, error) {
|
2021-05-08 07:31:07 +08:00
|
|
|
var op errors.Op = "rdb.RunAllArchivedTasks"
|
|
|
|
n, err := r.runAll(base.ArchivedKey(qname), qname)
|
|
|
|
if errors.IsQueueNotFound(err) {
|
|
|
|
return 0, errors.E(op, errors.NotFound, err)
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return 0, errors.E(op, errors.Unknown, err)
|
|
|
|
}
|
|
|
|
return n, nil
|
2019-12-11 12:28:31 +08:00
|
|
|
}
|
|
|
|
|
2021-05-06 07:31:07 +08:00
|
|
|
// runTaskCmd is a Lua script that updates the given task to pending state.
|
|
|
|
//
|
|
|
|
// Input:
|
2021-04-28 22:27:35 +08:00
|
|
|
// KEYS[1] -> asynq:{<qname>}:t:<task_id>
|
2021-03-13 08:23:08 +08:00
|
|
|
// KEYS[2] -> asynq:{<qname>}:pending
|
2021-05-06 07:31:07 +08:00
|
|
|
// --
|
2021-03-13 08:23:08 +08:00
|
|
|
// ARGV[1] -> task ID
|
2021-04-28 22:27:35 +08:00
|
|
|
// ARGV[2] -> queue key prefix; asynq:{<qname>}:
|
2021-05-06 07:31:07 +08:00
|
|
|
//
|
|
|
|
// Output:
|
|
|
|
// Numeric code indicating the status:
|
|
|
|
// Returns 1 if task is successfully updated.
|
|
|
|
// Returns 0 if task is not found.
|
2021-05-23 08:46:23 +08:00
|
|
|
// Returns -1 if task is in active state.
|
|
|
|
// Returns -2 if task is in pending state.
|
2021-05-06 07:31:07 +08:00
|
|
|
// Returns error reply if unexpected error occurs.
|
2021-04-28 22:27:35 +08:00
|
|
|
var runTaskCmd = redis.NewScript(`
|
|
|
|
if redis.call("EXISTS", KEYS[1]) == 0 then
|
2021-03-13 08:23:08 +08:00
|
|
|
return 0
|
2020-02-09 03:06:14 +08:00
|
|
|
end
|
2021-04-28 22:27:35 +08:00
|
|
|
local state = redis.call("HGET", KEYS[1], "state")
|
|
|
|
if state == "active" then
|
2021-05-23 08:46:23 +08:00
|
|
|
return -1
|
2021-04-28 22:27:35 +08:00
|
|
|
elseif state == "pending" then
|
2021-05-23 08:46:23 +08:00
|
|
|
return -2
|
2021-04-28 22:27:35 +08:00
|
|
|
end
|
|
|
|
local n = redis.call("ZREM", ARGV[2] .. state, ARGV[1])
|
|
|
|
if n == 0 then
|
|
|
|
return redis.error_reply("internal error: task id not found in zset " .. tostring(state))
|
|
|
|
end
|
2021-03-13 08:23:08 +08:00
|
|
|
redis.call("LPUSH", KEYS[2], ARGV[1])
|
2021-04-28 22:27:35 +08:00
|
|
|
redis.call("HSET", KEYS[1], "state", "pending")
|
2021-03-13 08:23:08 +08:00
|
|
|
return 1
|
|
|
|
`)
|
2020-02-09 03:06:14 +08:00
|
|
|
|
2021-05-06 07:31:07 +08:00
|
|
|
// RunTask finds a task that matches the id from the given queue and updates it to pending state.
|
|
|
|
// It returns nil if it successfully updated the task.
|
|
|
|
//
|
|
|
|
// If a queue with the given name doesn't exist, it returns QueueNotFoundError.
|
|
|
|
// If a task with the given id doesn't exist in the queue, it returns TaskNotFoundError
|
|
|
|
// If a task is in active or pending state it returns non-nil error with Code FailedPrecondition.
|
2021-09-10 21:29:37 +08:00
|
|
|
func (r *RDB) RunTask(qname, id string) error {
|
2021-05-06 07:31:07 +08:00
|
|
|
var op errors.Op = "rdb.RunTask"
|
2021-05-23 08:46:23 +08:00
|
|
|
if err := r.checkQueueExists(qname); err != nil {
|
|
|
|
return errors.E(op, errors.CanonicalCode(err), err)
|
|
|
|
}
|
2021-04-28 22:27:35 +08:00
|
|
|
keys := []string{
|
2021-09-10 21:29:37 +08:00
|
|
|
base.TaskKey(qname, id),
|
2021-04-28 22:27:35 +08:00
|
|
|
base.PendingKey(qname),
|
|
|
|
}
|
|
|
|
argv := []interface{}{
|
2021-09-10 21:29:37 +08:00
|
|
|
id,
|
2021-04-28 22:27:35 +08:00
|
|
|
base.QueueKeyPrefix(qname),
|
|
|
|
}
|
2021-09-02 20:56:02 +08:00
|
|
|
res, err := runTaskCmd.Run(context.Background(), r.client, keys, argv...).Result()
|
2019-12-08 22:46:04 +08:00
|
|
|
if err != nil {
|
2021-05-06 07:31:07 +08:00
|
|
|
return errors.E(op, errors.Unknown, err)
|
2019-12-08 22:46:04 +08:00
|
|
|
}
|
|
|
|
n, ok := res.(int64)
|
|
|
|
if !ok {
|
2021-05-06 07:31:07 +08:00
|
|
|
return errors.E(op, errors.Internal, fmt.Sprintf("cast error: unexpected return value from Lua script: %v", res))
|
|
|
|
}
|
|
|
|
switch n {
|
|
|
|
case 1:
|
|
|
|
return nil
|
|
|
|
case 0:
|
2021-09-10 21:29:37 +08:00
|
|
|
return errors.E(op, errors.NotFound, &errors.TaskNotFoundError{Queue: qname, ID: id})
|
2021-05-06 07:31:07 +08:00
|
|
|
case -1:
|
|
|
|
return errors.E(op, errors.FailedPrecondition, "task is already running")
|
2021-05-23 08:46:23 +08:00
|
|
|
case -2:
|
2021-05-06 07:31:07 +08:00
|
|
|
return errors.E(op, errors.FailedPrecondition, "task is already in pending state")
|
|
|
|
default:
|
|
|
|
return errors.E(op, errors.Internal, fmt.Sprintf("unexpected return value from Lua script %d", n))
|
2019-12-08 22:46:04 +08:00
|
|
|
}
|
|
|
|
}
|
2019-12-11 12:28:31 +08:00
|
|
|
|
2021-05-08 07:31:07 +08:00
|
|
|
// runAllCmd is a Lua script that moves all tasks in the given state
|
|
|
|
// (one of: scheduled, retry, archived) to pending state.
|
|
|
|
//
|
|
|
|
// Input:
|
|
|
|
// KEYS[1] -> zset which holds task ids (e.g. asynq:{<qname>}:scheduled)
|
|
|
|
// KEYS[2] -> asynq:{<qname>}:pending
|
|
|
|
// --
|
|
|
|
// ARGV[1] -> task key prefix
|
|
|
|
//
|
|
|
|
// Output:
|
|
|
|
// integer: number of tasks updated to pending state.
|
|
|
|
var runAllCmd = redis.NewScript(`
|
2021-03-13 08:23:08 +08:00
|
|
|
local ids = redis.call("ZRANGE", KEYS[1], 0, -1)
|
|
|
|
for _, id in ipairs(ids) do
|
|
|
|
redis.call("LPUSH", KEYS[2], id)
|
2021-05-08 07:31:07 +08:00
|
|
|
redis.call("HSET", ARGV[1] .. id, "state", "pending")
|
2020-02-09 03:06:14 +08:00
|
|
|
end
|
2021-05-08 07:31:07 +08:00
|
|
|
redis.call("DEL", KEYS[1])
|
2021-03-13 08:23:08 +08:00
|
|
|
return table.getn(ids)`)
|
2020-02-09 03:06:14 +08:00
|
|
|
|
2021-05-08 07:31:07 +08:00
|
|
|
func (r *RDB) runAll(zset, qname string) (int64, error) {
|
2021-05-23 08:46:23 +08:00
|
|
|
if err := r.checkQueueExists(qname); err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
2021-05-08 07:31:07 +08:00
|
|
|
keys := []string{
|
|
|
|
zset,
|
|
|
|
base.PendingKey(qname),
|
|
|
|
}
|
|
|
|
argv := []interface{}{
|
|
|
|
base.TaskKeyPrefix(qname),
|
|
|
|
}
|
2021-09-02 20:56:02 +08:00
|
|
|
res, err := runAllCmd.Run(context.Background(), r.client, keys, argv...).Result()
|
2019-12-11 12:28:31 +08:00
|
|
|
if err != nil {
|
2021-06-09 21:06:43 +08:00
|
|
|
return 0, err
|
2019-12-11 12:28:31 +08:00
|
|
|
}
|
2019-12-11 13:38:25 +08:00
|
|
|
n, ok := res.(int64)
|
|
|
|
if !ok {
|
|
|
|
return 0, fmt.Errorf("could not cast %v to int64", res)
|
|
|
|
}
|
2021-05-08 07:31:07 +08:00
|
|
|
if n == -1 {
|
|
|
|
return 0, &errors.QueueNotFoundError{Queue: qname}
|
|
|
|
}
|
2019-12-11 13:38:25 +08:00
|
|
|
return n, nil
|
2019-12-26 23:17:26 +08:00
|
|
|
}
|
|
|
|
|
2021-01-13 03:01:21 +08:00
|
|
|
// ArchiveAllRetryTasks archives all retry tasks from the given queue and
|
2019-12-27 22:22:33 +08:00
|
|
|
// returns the number of tasks that were moved.
|
2021-05-08 07:06:07 +08:00
|
|
|
// If a queue with the given name doesn't exist, it returns QueueNotFoundError.
|
2021-01-13 03:01:21 +08:00
|
|
|
func (r *RDB) ArchiveAllRetryTasks(qname string) (int64, error) {
|
2021-05-08 07:06:07 +08:00
|
|
|
var op errors.Op = "rdb.ArchiveAllRetryTasks"
|
2021-05-04 11:07:00 +08:00
|
|
|
n, err := r.archiveAll(base.RetryKey(qname), base.ArchivedKey(qname), qname)
|
2021-05-08 07:06:07 +08:00
|
|
|
if errors.IsQueueNotFound(err) {
|
|
|
|
return 0, errors.E(op, errors.NotFound, err)
|
|
|
|
}
|
2021-05-04 11:07:00 +08:00
|
|
|
if err != nil {
|
2021-05-08 07:06:07 +08:00
|
|
|
return 0, errors.E(op, errors.Internal, err)
|
2021-05-04 11:07:00 +08:00
|
|
|
}
|
|
|
|
return n, nil
|
2019-12-27 22:22:33 +08:00
|
|
|
}
|
|
|
|
|
2021-01-13 03:01:21 +08:00
|
|
|
// ArchiveAllScheduledTasks archives all scheduled tasks from the given queue and
|
2019-12-27 22:22:33 +08:00
|
|
|
// returns the number of tasks that were moved.
|
2021-05-08 07:06:07 +08:00
|
|
|
// If a queue with the given name doesn't exist, it returns QueueNotFoundError.
|
2021-01-13 03:01:21 +08:00
|
|
|
func (r *RDB) ArchiveAllScheduledTasks(qname string) (int64, error) {
|
2021-05-08 07:06:07 +08:00
|
|
|
var op errors.Op = "rdb.ArchiveAllScheduledTasks"
|
2021-05-04 11:07:00 +08:00
|
|
|
n, err := r.archiveAll(base.ScheduledKey(qname), base.ArchivedKey(qname), qname)
|
2021-05-08 07:06:07 +08:00
|
|
|
if errors.IsQueueNotFound(err) {
|
|
|
|
return 0, errors.E(op, errors.NotFound, err)
|
|
|
|
}
|
2021-05-04 11:07:00 +08:00
|
|
|
if err != nil {
|
2021-05-08 07:06:07 +08:00
|
|
|
return 0, errors.E(op, errors.Internal, err)
|
2021-05-04 11:07:00 +08:00
|
|
|
}
|
|
|
|
return n, nil
|
2019-12-27 22:22:33 +08:00
|
|
|
}
|
|
|
|
|
2021-05-04 07:38:13 +08:00
|
|
|
// archiveAllPendingCmd is a Lua script that moves all pending tasks from
|
|
|
|
// the given queue to archived state.
|
|
|
|
//
|
|
|
|
// Input:
|
2021-03-13 08:23:08 +08:00
|
|
|
// KEYS[1] -> asynq:{<qname>}:pending
|
2021-01-21 07:03:34 +08:00
|
|
|
// KEYS[2] -> asynq:{<qname>}:archived
|
2021-05-08 07:06:07 +08:00
|
|
|
// --
|
2021-01-21 07:03:34 +08:00
|
|
|
// ARGV[1] -> current timestamp
|
|
|
|
// ARGV[2] -> cutoff timestamp (e.g., 90 days ago)
|
|
|
|
// ARGV[3] -> max number of tasks in archive (e.g., 100)
|
2021-05-04 07:38:13 +08:00
|
|
|
// ARGV[4] -> task key prefix (asynq:{<qname>}:t:)
|
|
|
|
//
|
|
|
|
// Output:
|
2021-05-08 07:06:07 +08:00
|
|
|
// integer: Number of tasks archived
|
2021-01-21 07:03:34 +08:00
|
|
|
var archiveAllPendingCmd = redis.NewScript(`
|
2021-03-13 08:23:08 +08:00
|
|
|
local ids = redis.call("LRANGE", KEYS[1], 0, -1)
|
|
|
|
for _, id in ipairs(ids) do
|
|
|
|
redis.call("ZADD", KEYS[2], ARGV[1], id)
|
2021-05-04 07:38:13 +08:00
|
|
|
redis.call("HSET", ARGV[4] .. id, "state", "archived")
|
2021-01-21 07:03:34 +08:00
|
|
|
end
|
2021-05-04 07:38:13 +08:00
|
|
|
redis.call("ZREMRANGEBYSCORE", KEYS[2], "-inf", ARGV[2])
|
|
|
|
redis.call("ZREMRANGEBYRANK", KEYS[2], 0, -ARGV[3])
|
2021-01-21 07:03:34 +08:00
|
|
|
redis.call("DEL", KEYS[1])
|
2021-03-13 08:23:08 +08:00
|
|
|
return table.getn(ids)`)
|
2021-01-21 07:03:34 +08:00
|
|
|
|
|
|
|
// ArchiveAllPendingTasks archives all pending tasks from the given queue and
|
2021-03-13 08:23:08 +08:00
|
|
|
// returns the number of tasks moved.
|
2021-05-08 07:06:07 +08:00
|
|
|
// If a queue with the given name doesn't exist, it returns QueueNotFoundError.
|
2021-01-21 07:03:34 +08:00
|
|
|
func (r *RDB) ArchiveAllPendingTasks(qname string) (int64, error) {
|
2021-05-04 07:38:13 +08:00
|
|
|
var op errors.Op = "rdb.ArchiveAllPendingTasks"
|
2021-05-23 08:46:23 +08:00
|
|
|
if err := r.checkQueueExists(qname); err != nil {
|
|
|
|
return 0, errors.E(op, errors.CanonicalCode(err), err)
|
|
|
|
}
|
2021-05-08 07:06:07 +08:00
|
|
|
keys := []string{
|
|
|
|
base.PendingKey(qname),
|
|
|
|
base.ArchivedKey(qname),
|
|
|
|
}
|
2021-01-21 07:03:34 +08:00
|
|
|
now := time.Now()
|
2021-03-13 08:23:08 +08:00
|
|
|
argv := []interface{}{
|
|
|
|
now.Unix(),
|
|
|
|
now.AddDate(0, 0, -archivedExpirationInDays).Unix(),
|
|
|
|
maxArchiveSize,
|
2021-05-04 07:38:13 +08:00
|
|
|
base.TaskKeyPrefix(qname),
|
2021-03-13 08:23:08 +08:00
|
|
|
}
|
2021-09-02 20:56:02 +08:00
|
|
|
res, err := archiveAllPendingCmd.Run(context.Background(), r.client, keys, argv...).Result()
|
2021-01-21 07:03:34 +08:00
|
|
|
if err != nil {
|
2021-05-04 07:38:13 +08:00
|
|
|
return 0, errors.E(op, errors.Internal, err)
|
2021-01-21 07:03:34 +08:00
|
|
|
}
|
|
|
|
n, ok := res.(int64)
|
|
|
|
if !ok {
|
2021-05-04 07:38:13 +08:00
|
|
|
return 0, errors.E(op, errors.Internal, fmt.Sprintf("unexpected return value from script %v", res))
|
2021-01-21 07:03:34 +08:00
|
|
|
}
|
|
|
|
return n, nil
|
|
|
|
}
|
|
|
|
|
2021-05-02 07:13:40 +08:00
|
|
|
// archiveTaskCmd is a Lua script that archives a task given a task id.
|
2021-05-01 21:47:49 +08:00
|
|
|
//
|
|
|
|
// Input:
|
2021-04-29 12:24:42 +08:00
|
|
|
// KEYS[1] -> task key (asynq:{<qname>}:t:<task_id>)
|
|
|
|
// KEYS[2] -> archived key (asynq:{<qname>}:archived)
|
2021-05-02 07:13:40 +08:00
|
|
|
// --
|
2021-03-13 08:23:08 +08:00
|
|
|
// ARGV[1] -> id of the task to archive
|
|
|
|
// ARGV[2] -> current timestamp
|
|
|
|
// ARGV[3] -> cutoff timestamp (e.g., 90 days ago)
|
|
|
|
// ARGV[4] -> max number of tasks in archived state (e.g., 100)
|
2021-04-29 12:24:42 +08:00
|
|
|
// ARGV[5] -> queue key prefix (asynq:{<qname>}:)
|
2021-05-01 21:47:49 +08:00
|
|
|
//
|
|
|
|
// Output:
|
2021-05-02 07:13:40 +08:00
|
|
|
// Numeric code indicating the status:
|
|
|
|
// Returns 1 if task is successfully archived.
|
|
|
|
// Returns 0 if task is not found.
|
|
|
|
// Returns -1 if task is already archived.
|
|
|
|
// Returns -2 if task is in active state.
|
|
|
|
// Returns error reply if unexpected error occurs.
|
2021-04-29 12:24:42 +08:00
|
|
|
var archiveTaskCmd = redis.NewScript(`
|
|
|
|
if redis.call("EXISTS", KEYS[1]) == 0 then
|
2021-03-13 08:23:08 +08:00
|
|
|
return 0
|
2020-02-09 03:06:14 +08:00
|
|
|
end
|
2021-04-29 12:24:42 +08:00
|
|
|
local state = redis.call("HGET", KEYS[1], "state")
|
|
|
|
if state == "active" then
|
2021-05-02 07:13:40 +08:00
|
|
|
return -2
|
2021-04-29 12:24:42 +08:00
|
|
|
end
|
|
|
|
if state == "archived" then
|
2021-05-02 07:13:40 +08:00
|
|
|
return -1
|
2021-04-29 12:24:42 +08:00
|
|
|
end
|
|
|
|
if state == "pending" then
|
|
|
|
if redis.call("LREM", ARGV[5] .. state, 1, ARGV[1]) == 0 then
|
2021-05-02 07:13:40 +08:00
|
|
|
return redis.error_reply("task id not found in list " .. tostring(state))
|
2021-04-29 12:24:42 +08:00
|
|
|
end
|
|
|
|
else
|
|
|
|
if redis.call("ZREM", ARGV[5] .. state, ARGV[1]) == 0 then
|
2021-05-02 07:13:40 +08:00
|
|
|
return redis.error_reply("task id not found in zset " .. tostring(state))
|
2021-04-29 12:24:42 +08:00
|
|
|
end
|
|
|
|
end
|
2021-03-13 08:23:08 +08:00
|
|
|
redis.call("ZADD", KEYS[2], ARGV[2], ARGV[1])
|
2021-04-29 12:24:42 +08:00
|
|
|
redis.call("HSET", KEYS[1], "state", "archived")
|
2021-03-13 08:23:08 +08:00
|
|
|
redis.call("ZREMRANGEBYSCORE", KEYS[2], "-inf", ARGV[3])
|
|
|
|
redis.call("ZREMRANGEBYRANK", KEYS[2], 0, -ARGV[4])
|
|
|
|
return 1
|
|
|
|
`)
|
2020-02-09 03:06:14 +08:00
|
|
|
|
2021-05-02 07:13:40 +08:00
|
|
|
// ArchiveTask finds a task that matches the id from the given queue and archives it.
|
|
|
|
// It returns nil if it successfully archived the task.
|
|
|
|
//
|
2021-05-02 21:47:32 +08:00
|
|
|
// If a queue with the given name doesn't exist, it returns QueueNotFoundError.
|
|
|
|
// If a task with the given id doesn't exist in the queue, it returns TaskNotFoundError
|
|
|
|
// If a task is already archived, it returns TaskAlreadyArchivedError.
|
2021-05-07 21:26:16 +08:00
|
|
|
// If a task is in active state it returns non-nil error with FailedPrecondition code.
|
2021-09-10 21:29:37 +08:00
|
|
|
func (r *RDB) ArchiveTask(qname, id string) error {
|
2021-05-02 21:47:32 +08:00
|
|
|
var op errors.Op = "rdb.ArchiveTask"
|
2021-05-23 08:46:23 +08:00
|
|
|
if err := r.checkQueueExists(qname); err != nil {
|
|
|
|
return errors.E(op, errors.CanonicalCode(err), err)
|
|
|
|
}
|
2021-04-29 12:24:42 +08:00
|
|
|
keys := []string{
|
2021-09-10 21:29:37 +08:00
|
|
|
base.TaskKey(qname, id),
|
2021-04-29 12:24:42 +08:00
|
|
|
base.ArchivedKey(qname),
|
|
|
|
}
|
2019-12-26 23:17:26 +08:00
|
|
|
now := time.Now()
|
2021-04-29 12:24:42 +08:00
|
|
|
argv := []interface{}{
|
2021-09-10 21:29:37 +08:00
|
|
|
id,
|
2021-04-29 12:24:42 +08:00
|
|
|
now.Unix(),
|
|
|
|
now.AddDate(0, 0, -archivedExpirationInDays).Unix(),
|
|
|
|
maxArchiveSize,
|
|
|
|
base.QueueKeyPrefix(qname),
|
|
|
|
}
|
2021-09-02 20:56:02 +08:00
|
|
|
res, err := archiveTaskCmd.Run(context.Background(), r.client, keys, argv...).Result()
|
2019-12-26 23:17:26 +08:00
|
|
|
if err != nil {
|
2021-05-02 21:47:32 +08:00
|
|
|
return errors.E(op, errors.Unknown, err)
|
2019-12-26 23:17:26 +08:00
|
|
|
}
|
|
|
|
n, ok := res.(int64)
|
|
|
|
if !ok {
|
2021-05-02 21:47:32 +08:00
|
|
|
return errors.E(op, errors.Internal, fmt.Sprintf("could not cast the return value %v from archiveTaskCmd to int64.", res))
|
2021-05-02 07:13:40 +08:00
|
|
|
}
|
|
|
|
switch n {
|
|
|
|
case 1:
|
|
|
|
return nil
|
|
|
|
case 0:
|
2021-09-10 21:29:37 +08:00
|
|
|
return errors.E(op, errors.NotFound, &errors.TaskNotFoundError{Queue: qname, ID: id})
|
2021-05-02 07:13:40 +08:00
|
|
|
case -1:
|
2021-09-10 21:29:37 +08:00
|
|
|
return errors.E(op, errors.FailedPrecondition, &errors.TaskAlreadyArchivedError{Queue: qname, ID: id})
|
2021-05-02 07:13:40 +08:00
|
|
|
case -2:
|
2021-05-02 21:47:32 +08:00
|
|
|
return errors.E(op, errors.FailedPrecondition, "cannot archive task in active state. use CancelTask instead.")
|
2021-05-02 07:13:40 +08:00
|
|
|
case -3:
|
2021-05-02 21:47:32 +08:00
|
|
|
return errors.E(op, errors.NotFound, &errors.QueueNotFoundError{Queue: qname})
|
2021-05-02 07:13:40 +08:00
|
|
|
default:
|
2021-05-02 21:47:32 +08:00
|
|
|
return errors.E(op, errors.Internal, fmt.Sprintf("unexpected return value from archiveTaskCmd script: %d", n))
|
2019-12-26 23:17:26 +08:00
|
|
|
}
|
2019-12-27 22:22:33 +08:00
|
|
|
}
|
|
|
|
|
2021-05-04 11:07:00 +08:00
|
|
|
// archiveAllCmd is a Lua script that archives all tasks in either scheduled
|
|
|
|
// or retry state from the given queue.
|
|
|
|
//
|
|
|
|
// Input:
|
2021-03-13 08:23:08 +08:00
|
|
|
// KEYS[1] -> ZSET to move task from (e.g., asynq:{<qname>}:retry)
|
2021-01-13 03:01:21 +08:00
|
|
|
// KEYS[2] -> asynq:{<qname>}:archived
|
2021-05-08 07:06:07 +08:00
|
|
|
// --
|
2020-02-09 03:06:14 +08:00
|
|
|
// ARGV[1] -> current timestamp
|
|
|
|
// ARGV[2] -> cutoff timestamp (e.g., 90 days ago)
|
2021-01-13 03:01:21 +08:00
|
|
|
// ARGV[3] -> max number of tasks in archive (e.g., 100)
|
2021-05-04 11:07:00 +08:00
|
|
|
// ARGV[4] -> task key prefix (asynq:{<qname>}:t:)
|
|
|
|
//
|
|
|
|
// Output:
|
|
|
|
// integer: number of tasks archived
|
|
|
|
var archiveAllCmd = redis.NewScript(`
|
2021-03-13 08:23:08 +08:00
|
|
|
local ids = redis.call("ZRANGE", KEYS[1], 0, -1)
|
|
|
|
for _, id in ipairs(ids) do
|
|
|
|
redis.call("ZADD", KEYS[2], ARGV[1], id)
|
2021-05-04 11:07:00 +08:00
|
|
|
redis.call("HSET", ARGV[4] .. id, "state", "archived")
|
2020-02-09 03:06:14 +08:00
|
|
|
end
|
2021-05-04 11:07:00 +08:00
|
|
|
redis.call("ZREMRANGEBYSCORE", KEYS[2], "-inf", ARGV[2])
|
|
|
|
redis.call("ZREMRANGEBYRANK", KEYS[2], 0, -ARGV[3])
|
2021-03-13 08:23:08 +08:00
|
|
|
redis.call("DEL", KEYS[1])
|
|
|
|
return table.getn(ids)`)
|
2020-02-09 03:06:14 +08:00
|
|
|
|
2021-05-04 11:07:00 +08:00
|
|
|
func (r *RDB) archiveAll(src, dst, qname string) (int64, error) {
|
2021-05-23 08:46:23 +08:00
|
|
|
if err := r.checkQueueExists(qname); err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
2021-05-08 07:06:07 +08:00
|
|
|
keys := []string{
|
|
|
|
src,
|
|
|
|
dst,
|
|
|
|
}
|
2019-12-27 22:22:33 +08:00
|
|
|
now := time.Now()
|
2021-03-13 08:23:08 +08:00
|
|
|
argv := []interface{}{
|
|
|
|
now.Unix(),
|
|
|
|
now.AddDate(0, 0, -archivedExpirationInDays).Unix(),
|
|
|
|
maxArchiveSize,
|
2021-05-04 11:07:00 +08:00
|
|
|
base.TaskKeyPrefix(qname),
|
2021-05-08 07:06:07 +08:00
|
|
|
qname,
|
2021-03-13 08:23:08 +08:00
|
|
|
}
|
2021-09-02 20:56:02 +08:00
|
|
|
res, err := archiveAllCmd.Run(context.Background(), r.client, keys, argv...).Result()
|
2019-12-27 22:22:33 +08:00
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
n, ok := res.(int64)
|
|
|
|
if !ok {
|
2021-05-04 11:07:00 +08:00
|
|
|
return 0, fmt.Errorf("unexpected return value from script: %v", res)
|
2019-12-27 22:22:33 +08:00
|
|
|
}
|
2021-05-08 07:06:07 +08:00
|
|
|
if n == -1 {
|
|
|
|
return 0, &errors.QueueNotFoundError{Queue: qname}
|
|
|
|
}
|
2019-12-27 22:22:33 +08:00
|
|
|
return n, nil
|
2019-12-11 12:28:31 +08:00
|
|
|
}
|
2019-12-12 11:56:19 +08:00
|
|
|
|
2021-05-06 07:00:40 +08:00
|
|
|
// Input:
|
|
|
|
// KEYS[1] -> asynq:{<qname>}:t:<task_id>
|
2021-05-23 08:46:23 +08:00
|
|
|
// --
|
2021-03-13 08:23:08 +08:00
|
|
|
// ARGV[1] -> task ID
|
2021-05-06 07:00:40 +08:00
|
|
|
// ARGV[2] -> queue key prefix
|
|
|
|
//
|
|
|
|
// Output:
|
|
|
|
// Numeric code indicating the status:
|
|
|
|
// Returns 1 if task is successfully deleted.
|
|
|
|
// Returns 0 if task is not found.
|
2021-05-23 08:46:23 +08:00
|
|
|
// Returns -1 if task is in active state.
|
2021-05-06 07:00:40 +08:00
|
|
|
var deleteTaskCmd = redis.NewScript(`
|
|
|
|
if redis.call("EXISTS", KEYS[1]) == 0 then
|
2021-03-13 08:23:08 +08:00
|
|
|
return 0
|
|
|
|
end
|
2021-05-06 07:00:40 +08:00
|
|
|
local state = redis.call("HGET", KEYS[1], "state")
|
|
|
|
if state == "active" then
|
2021-05-23 08:46:23 +08:00
|
|
|
return -1
|
2021-05-06 07:00:40 +08:00
|
|
|
end
|
|
|
|
if state == "pending" then
|
|
|
|
if redis.call("LREM", ARGV[2] .. state, 0, ARGV[1]) == 0 then
|
|
|
|
return redis.error_reply("task is not found in list: " .. tostring(state))
|
|
|
|
end
|
|
|
|
else
|
|
|
|
if redis.call("ZREM", ARGV[2] .. state, ARGV[1]) == 0 then
|
|
|
|
return redis.error_reply("task is not found in zset: " .. tostring(state))
|
|
|
|
end
|
|
|
|
end
|
2021-06-09 21:06:43 +08:00
|
|
|
local unique_key = redis.call("HGET", KEYS[1], "unique_key")
|
2021-06-25 21:37:58 +08:00
|
|
|
if unique_key and unique_key ~= "" and redis.call("GET", unique_key) == ARGV[1] then
|
2021-06-09 21:06:43 +08:00
|
|
|
redis.call("DEL", unique_key)
|
|
|
|
end
|
2021-05-06 07:00:40 +08:00
|
|
|
return redis.call("DEL", KEYS[1])
|
2021-03-13 08:23:08 +08:00
|
|
|
`)
|
|
|
|
|
2021-05-06 07:00:40 +08:00
|
|
|
// DeleteTask finds a task that matches the id from the given queue and deletes it.
|
|
|
|
// It returns nil if it successfully archived the task.
|
|
|
|
//
|
|
|
|
// If a queue with the given name doesn't exist, it returns QueueNotFoundError.
|
|
|
|
// If a task with the given id doesn't exist in the queue, it returns TaskNotFoundError
|
|
|
|
// If a task is in active state it returns non-nil error with Code FailedPrecondition.
|
2021-09-10 21:29:37 +08:00
|
|
|
func (r *RDB) DeleteTask(qname, id string) error {
|
2021-05-06 07:00:40 +08:00
|
|
|
var op errors.Op = "rdb.DeleteTask"
|
2021-05-23 08:46:23 +08:00
|
|
|
if err := r.checkQueueExists(qname); err != nil {
|
|
|
|
return errors.E(op, errors.CanonicalCode(err), err)
|
|
|
|
}
|
2021-05-06 07:00:40 +08:00
|
|
|
keys := []string{
|
2021-09-10 21:29:37 +08:00
|
|
|
base.TaskKey(qname, id),
|
2021-01-21 07:03:34 +08:00
|
|
|
}
|
2021-05-06 07:00:40 +08:00
|
|
|
argv := []interface{}{
|
2021-09-10 21:29:37 +08:00
|
|
|
id,
|
2021-05-06 07:00:40 +08:00
|
|
|
base.QueueKeyPrefix(qname),
|
2021-03-13 08:23:08 +08:00
|
|
|
}
|
2021-09-02 20:56:02 +08:00
|
|
|
res, err := deleteTaskCmd.Run(context.Background(), r.client, keys, argv...).Result()
|
2019-12-12 11:56:19 +08:00
|
|
|
if err != nil {
|
2021-05-06 07:00:40 +08:00
|
|
|
return errors.E(op, errors.Unknown, err)
|
2019-12-12 11:56:19 +08:00
|
|
|
}
|
|
|
|
n, ok := res.(int64)
|
|
|
|
if !ok {
|
2021-05-06 07:00:40 +08:00
|
|
|
return errors.E(op, errors.Internal, fmt.Sprintf("cast error: deleteTaskCmd script returned unexported value %v", res))
|
2019-12-12 11:56:19 +08:00
|
|
|
}
|
2021-05-06 07:00:40 +08:00
|
|
|
switch n {
|
|
|
|
case 1:
|
|
|
|
return nil
|
|
|
|
case 0:
|
2021-09-10 21:29:37 +08:00
|
|
|
return errors.E(op, errors.NotFound, &errors.TaskNotFoundError{Queue: qname, ID: id})
|
2021-05-06 07:00:40 +08:00
|
|
|
case -1:
|
|
|
|
return errors.E(op, errors.FailedPrecondition, "cannot delete task in active state. use CancelTask instead.")
|
|
|
|
default:
|
|
|
|
return errors.E(op, errors.Internal, fmt.Sprintf("unexpected return value from deleteTaskCmd script: %d", n))
|
2019-12-12 11:56:19 +08:00
|
|
|
}
|
|
|
|
}
|
2019-12-12 22:38:01 +08:00
|
|
|
|
2021-01-13 03:01:21 +08:00
|
|
|
// DeleteAllArchivedTasks deletes all archived tasks from the given queue
|
2020-07-13 21:29:41 +08:00
|
|
|
// and returns the number of tasks deleted.
|
2021-01-13 03:01:21 +08:00
|
|
|
func (r *RDB) DeleteAllArchivedTasks(qname string) (int64, error) {
|
2021-05-08 07:48:36 +08:00
|
|
|
var op errors.Op = "rdb.DeleteAllArchivedTasks"
|
|
|
|
n, err := r.deleteAll(base.ArchivedKey(qname), qname)
|
|
|
|
if errors.IsQueueNotFound(err) {
|
|
|
|
return 0, errors.E(op, errors.NotFound, err)
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return 0, errors.E(op, errors.Unknown, err)
|
|
|
|
}
|
|
|
|
return n, nil
|
2020-07-13 21:29:41 +08:00
|
|
|
}
|
|
|
|
|
2020-08-16 04:04:26 +08:00
|
|
|
// DeleteAllRetryTasks deletes all retry tasks from the given queue
|
2020-07-13 21:29:41 +08:00
|
|
|
// and returns the number of tasks deleted.
|
2020-08-16 04:04:26 +08:00
|
|
|
func (r *RDB) DeleteAllRetryTasks(qname string) (int64, error) {
|
2021-05-08 07:48:36 +08:00
|
|
|
var op errors.Op = "rdb.DeleteAllRetryTasks"
|
|
|
|
n, err := r.deleteAll(base.RetryKey(qname), qname)
|
|
|
|
if errors.IsQueueNotFound(err) {
|
|
|
|
return 0, errors.E(op, errors.NotFound, err)
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return 0, errors.E(op, errors.Unknown, err)
|
|
|
|
}
|
|
|
|
return n, nil
|
2019-12-12 22:38:01 +08:00
|
|
|
}
|
|
|
|
|
2020-08-16 04:04:26 +08:00
|
|
|
// DeleteAllScheduledTasks deletes all scheduled tasks from the given queue
|
2020-07-13 21:29:41 +08:00
|
|
|
// and returns the number of tasks deleted.
|
2020-08-16 04:04:26 +08:00
|
|
|
func (r *RDB) DeleteAllScheduledTasks(qname string) (int64, error) {
|
2021-05-08 07:48:36 +08:00
|
|
|
var op errors.Op = "rdb.DeleteAllScheduledTasks"
|
|
|
|
n, err := r.deleteAll(base.ScheduledKey(qname), qname)
|
|
|
|
if errors.IsQueueNotFound(err) {
|
|
|
|
return 0, errors.E(op, errors.NotFound, err)
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return 0, errors.E(op, errors.Unknown, err)
|
|
|
|
}
|
|
|
|
return n, nil
|
2019-12-12 22:38:01 +08:00
|
|
|
}
|
|
|
|
|
2021-11-06 07:52:54 +08:00
|
|
|
// DeleteAllCompletedTasks deletes all completed tasks from the given queue
|
|
|
|
// and returns the number of tasks deleted.
|
|
|
|
func (r *RDB) DeleteAllCompletedTasks(qname string) (int64, error) {
|
|
|
|
var op errors.Op = "rdb.DeleteAllCompletedTasks"
|
|
|
|
n, err := r.deleteAll(base.CompletedKey(qname), qname)
|
|
|
|
if errors.IsQueueNotFound(err) {
|
|
|
|
return 0, errors.E(op, errors.NotFound, err)
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return 0, errors.E(op, errors.Unknown, err)
|
|
|
|
}
|
|
|
|
return n, nil
|
|
|
|
}
|
|
|
|
|
2021-05-08 07:48:36 +08:00
|
|
|
// deleteAllCmd deletes tasks from the given zset.
|
|
|
|
//
|
|
|
|
// Input:
|
|
|
|
// KEYS[1] -> zset holding the task ids.
|
|
|
|
// --
|
|
|
|
// ARGV[1] -> task key prefix
|
|
|
|
//
|
|
|
|
// Output:
|
|
|
|
// integer: number of tasks deleted
|
|
|
|
var deleteAllCmd = redis.NewScript(`
|
|
|
|
local ids = redis.call("ZRANGE", KEYS[1], 0, -1)
|
|
|
|
for _, id in ipairs(ids) do
|
2021-06-09 21:06:43 +08:00
|
|
|
local task_key = ARGV[1] .. id
|
|
|
|
local unique_key = redis.call("HGET", task_key, "unique_key")
|
2021-06-25 21:37:58 +08:00
|
|
|
if unique_key and unique_key ~= "" and redis.call("GET", unique_key) == id then
|
2021-06-09 21:06:43 +08:00
|
|
|
redis.call("DEL", unique_key)
|
|
|
|
end
|
|
|
|
redis.call("DEL", task_key)
|
2021-05-08 07:48:36 +08:00
|
|
|
end
|
|
|
|
redis.call("DEL", KEYS[1])
|
|
|
|
return table.getn(ids)`)
|
|
|
|
|
2021-03-13 08:23:08 +08:00
|
|
|
func (r *RDB) deleteAll(key, qname string) (int64, error) {
|
2021-05-23 08:46:23 +08:00
|
|
|
if err := r.checkQueueExists(qname); err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
2021-05-08 07:48:36 +08:00
|
|
|
argv := []interface{}{
|
|
|
|
base.TaskKeyPrefix(qname),
|
|
|
|
qname,
|
|
|
|
}
|
2021-09-02 20:56:02 +08:00
|
|
|
res, err := deleteAllCmd.Run(context.Background(), r.client, []string{key}, argv...).Result()
|
2020-07-13 21:29:41 +08:00
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
n, ok := res.(int64)
|
|
|
|
if !ok {
|
2021-05-08 07:48:36 +08:00
|
|
|
return 0, fmt.Errorf("unexpected return value from Lua script: %v", res)
|
|
|
|
}
|
2020-07-13 21:29:41 +08:00
|
|
|
return n, nil
|
2019-12-12 22:38:01 +08:00
|
|
|
}
|
2020-01-13 22:50:03 +08:00
|
|
|
|
2021-05-08 07:48:36 +08:00
|
|
|
// deleteAllPendingCmd deletes all pending tasks from the given queue.
|
|
|
|
//
|
|
|
|
// Input:
|
2021-03-13 08:23:08 +08:00
|
|
|
// KEYS[1] -> asynq:{<qname>}:pending
|
2021-05-08 07:48:36 +08:00
|
|
|
// --
|
2021-03-13 08:23:08 +08:00
|
|
|
// ARGV[1] -> task key prefix
|
2021-05-08 07:48:36 +08:00
|
|
|
//
|
|
|
|
// Output:
|
|
|
|
// integer: number of tasks deleted
|
2021-01-21 07:03:34 +08:00
|
|
|
var deleteAllPendingCmd = redis.NewScript(`
|
2021-03-13 08:23:08 +08:00
|
|
|
local ids = redis.call("LRANGE", KEYS[1], 0, -1)
|
|
|
|
for _, id in ipairs(ids) do
|
2021-05-08 07:48:36 +08:00
|
|
|
redis.call("DEL", ARGV[1] .. id)
|
2021-06-06 21:35:36 +08:00
|
|
|
end
|
2021-01-21 07:03:34 +08:00
|
|
|
redis.call("DEL", KEYS[1])
|
2021-03-13 08:23:08 +08:00
|
|
|
return table.getn(ids)`)
|
2021-01-21 07:03:34 +08:00
|
|
|
|
|
|
|
// DeleteAllPendingTasks deletes all pending tasks from the given queue
|
|
|
|
// and returns the number of tasks deleted.
|
|
|
|
func (r *RDB) DeleteAllPendingTasks(qname string) (int64, error) {
|
2021-05-08 07:48:36 +08:00
|
|
|
var op errors.Op = "rdb.DeleteAllPendingTasks"
|
2021-05-23 08:46:23 +08:00
|
|
|
if err := r.checkQueueExists(qname); err != nil {
|
|
|
|
return 0, errors.E(op, errors.CanonicalCode(err), err)
|
|
|
|
}
|
2021-05-08 07:48:36 +08:00
|
|
|
keys := []string{
|
|
|
|
base.PendingKey(qname),
|
|
|
|
}
|
|
|
|
argv := []interface{}{
|
|
|
|
base.TaskKeyPrefix(qname),
|
|
|
|
}
|
2021-09-02 20:56:02 +08:00
|
|
|
res, err := deleteAllPendingCmd.Run(context.Background(), r.client, keys, argv...).Result()
|
2021-01-21 07:03:34 +08:00
|
|
|
if err != nil {
|
2021-05-08 07:48:36 +08:00
|
|
|
return 0, errors.E(op, errors.Unknown, err)
|
2021-01-21 07:03:34 +08:00
|
|
|
}
|
|
|
|
n, ok := res.(int64)
|
|
|
|
if !ok {
|
2021-05-08 07:48:36 +08:00
|
|
|
return 0, errors.E(op, errors.Internal, "command error: unexpected return value %v", res)
|
|
|
|
}
|
2021-01-21 07:03:34 +08:00
|
|
|
return n, nil
|
|
|
|
}
|
|
|
|
|
2021-05-09 21:48:44 +08:00
|
|
|
// removeQueueForceCmd removes the given queue regardless of
|
|
|
|
// whether the queue is empty.
|
|
|
|
// It only check whether active queue is empty before removing.
|
|
|
|
//
|
|
|
|
// Input:
|
2020-08-20 21:59:10 +08:00
|
|
|
// KEYS[1] -> asynq:{<qname>}
|
2020-09-06 03:43:15 +08:00
|
|
|
// KEYS[2] -> asynq:{<qname>}:active
|
2020-08-20 21:59:10 +08:00
|
|
|
// KEYS[3] -> asynq:{<qname>}:scheduled
|
|
|
|
// KEYS[4] -> asynq:{<qname>}:retry
|
2021-01-13 03:01:21 +08:00
|
|
|
// KEYS[5] -> asynq:{<qname>}:archived
|
2020-08-20 21:59:10 +08:00
|
|
|
// KEYS[6] -> asynq:{<qname>}:deadlines
|
2021-05-09 21:48:44 +08:00
|
|
|
// --
|
2021-03-13 08:23:08 +08:00
|
|
|
// ARGV[1] -> task key prefix
|
2021-05-09 21:48:44 +08:00
|
|
|
//
|
|
|
|
// Output:
|
|
|
|
// Numeric code to indicate the status.
|
|
|
|
// Returns 1 if successfully removed.
|
|
|
|
// Returns -2 if the queue has active tasks.
|
2020-02-09 03:06:14 +08:00
|
|
|
var removeQueueForceCmd = redis.NewScript(`
|
2020-09-06 03:43:15 +08:00
|
|
|
local active = redis.call("LLEN", KEYS[2])
|
|
|
|
if active > 0 then
|
2021-05-09 21:48:44 +08:00
|
|
|
return -2
|
2020-02-09 03:06:14 +08:00
|
|
|
end
|
2021-03-13 08:23:08 +08:00
|
|
|
for _, id in ipairs(redis.call("LRANGE", KEYS[1], 0, -1)) do
|
|
|
|
redis.call("DEL", ARGV[1] .. id)
|
|
|
|
end
|
|
|
|
for _, id in ipairs(redis.call("LRANGE", KEYS[2], 0, -1)) do
|
|
|
|
redis.call("DEL", ARGV[1] .. id)
|
|
|
|
end
|
|
|
|
for _, id in ipairs(redis.call("ZRANGE", KEYS[3], 0, -1)) do
|
|
|
|
redis.call("DEL", ARGV[1] .. id)
|
|
|
|
end
|
|
|
|
for _, id in ipairs(redis.call("ZRANGE", KEYS[4], 0, -1)) do
|
|
|
|
redis.call("DEL", ARGV[1] .. id)
|
|
|
|
end
|
|
|
|
for _, id in ipairs(redis.call("ZRANGE", KEYS[5], 0, -1)) do
|
|
|
|
redis.call("DEL", ARGV[1] .. id)
|
|
|
|
end
|
2021-03-13 08:23:08 +08:00
|
|
|
for _, id in ipairs(redis.call("LRANGE", KEYS[1], 0, -1)) do
|
|
|
|
redis.call("DEL", ARGV[1] .. id)
|
|
|
|
end
|
|
|
|
for _, id in ipairs(redis.call("LRANGE", KEYS[2], 0, -1)) do
|
|
|
|
redis.call("DEL", ARGV[1] .. id)
|
|
|
|
end
|
|
|
|
for _, id in ipairs(redis.call("ZRANGE", KEYS[3], 0, -1)) do
|
|
|
|
redis.call("DEL", ARGV[1] .. id)
|
|
|
|
end
|
|
|
|
for _, id in ipairs(redis.call("ZRANGE", KEYS[4], 0, -1)) do
|
|
|
|
redis.call("DEL", ARGV[1] .. id)
|
|
|
|
end
|
|
|
|
for _, id in ipairs(redis.call("ZRANGE", KEYS[5], 0, -1)) do
|
|
|
|
redis.call("DEL", ARGV[1] .. id)
|
|
|
|
end
|
2020-08-20 21:59:10 +08:00
|
|
|
redis.call("DEL", KEYS[1])
|
2020-02-09 03:06:14 +08:00
|
|
|
redis.call("DEL", KEYS[2])
|
2020-08-20 21:59:10 +08:00
|
|
|
redis.call("DEL", KEYS[3])
|
|
|
|
redis.call("DEL", KEYS[4])
|
|
|
|
redis.call("DEL", KEYS[5])
|
|
|
|
redis.call("DEL", KEYS[6])
|
2021-05-09 21:48:44 +08:00
|
|
|
return 1`)
|
2020-02-09 03:06:14 +08:00
|
|
|
|
2021-05-09 21:48:44 +08:00
|
|
|
// removeQueueCmd removes the given queue.
|
|
|
|
// It checks whether queue is empty before removing.
|
|
|
|
//
|
|
|
|
// Input:
|
2021-03-13 08:23:08 +08:00
|
|
|
// KEYS[1] -> asynq:{<qname>}:pending
|
2020-09-06 03:43:15 +08:00
|
|
|
// KEYS[2] -> asynq:{<qname>}:active
|
2020-08-20 21:59:10 +08:00
|
|
|
// KEYS[3] -> asynq:{<qname>}:scheduled
|
|
|
|
// KEYS[4] -> asynq:{<qname>}:retry
|
2021-01-13 03:01:21 +08:00
|
|
|
// KEYS[5] -> asynq:{<qname>}:archived
|
2020-08-20 21:59:10 +08:00
|
|
|
// KEYS[6] -> asynq:{<qname>}:deadlines
|
2021-05-09 21:48:44 +08:00
|
|
|
// --
|
2021-03-13 08:23:08 +08:00
|
|
|
// ARGV[1] -> task key prefix
|
2021-05-09 21:48:44 +08:00
|
|
|
//
|
|
|
|
// Output:
|
|
|
|
// Numeric code to indicate the status
|
|
|
|
// Returns 1 if successfully removed.
|
|
|
|
// Returns -1 if queue is not empty
|
2020-02-09 03:06:14 +08:00
|
|
|
var removeQueueCmd = redis.NewScript(`
|
2021-03-13 08:23:08 +08:00
|
|
|
local ids = {}
|
|
|
|
for _, id in ipairs(redis.call("LRANGE", KEYS[1], 0, -1)) do
|
|
|
|
table.insert(ids, id)
|
|
|
|
end
|
|
|
|
for _, id in ipairs(redis.call("LRANGE", KEYS[2], 0, -1)) do
|
|
|
|
table.insert(ids, id)
|
|
|
|
end
|
|
|
|
for _, id in ipairs(redis.call("ZRANGE", KEYS[3], 0, -1)) do
|
|
|
|
table.insert(ids, id)
|
|
|
|
end
|
|
|
|
for _, id in ipairs(redis.call("ZRANGE", KEYS[4], 0, -1)) do
|
|
|
|
table.insert(ids, id)
|
|
|
|
end
|
|
|
|
for _, id in ipairs(redis.call("ZRANGE", KEYS[5], 0, -1)) do
|
|
|
|
table.insert(ids, id)
|
|
|
|
end
|
|
|
|
if table.getn(ids) > 0 then
|
2021-05-09 21:48:44 +08:00
|
|
|
return -1
|
2020-02-09 03:06:14 +08:00
|
|
|
end
|
2021-03-13 08:23:08 +08:00
|
|
|
for _, id in ipairs(ids) do
|
|
|
|
redis.call("DEL", ARGV[1] .. id)
|
|
|
|
end
|
2021-03-13 08:23:08 +08:00
|
|
|
for _, id in ipairs(ids) do
|
|
|
|
redis.call("DEL", ARGV[1] .. id)
|
|
|
|
end
|
2020-08-20 21:59:10 +08:00
|
|
|
redis.call("DEL", KEYS[1])
|
2020-02-09 03:06:14 +08:00
|
|
|
redis.call("DEL", KEYS[2])
|
2020-08-20 21:59:10 +08:00
|
|
|
redis.call("DEL", KEYS[3])
|
|
|
|
redis.call("DEL", KEYS[4])
|
|
|
|
redis.call("DEL", KEYS[5])
|
|
|
|
redis.call("DEL", KEYS[6])
|
2021-05-09 21:48:44 +08:00
|
|
|
return 1`)
|
2020-02-09 03:06:14 +08:00
|
|
|
|
2020-01-13 23:03:07 +08:00
|
|
|
// RemoveQueue removes the specified queue.
|
|
|
|
//
|
|
|
|
// If force is set to true, it will remove the queue regardless
|
2020-09-06 03:43:15 +08:00
|
|
|
// as long as no tasks are active for the queue.
|
2020-01-13 23:03:07 +08:00
|
|
|
// If force is set to false, it will only remove the queue if
|
2020-08-20 21:59:10 +08:00
|
|
|
// the queue is empty.
|
2020-01-13 23:03:07 +08:00
|
|
|
func (r *RDB) RemoveQueue(qname string, force bool) error {
|
2021-05-09 21:48:44 +08:00
|
|
|
var op errors.Op = "rdb.RemoveQueue"
|
2021-11-06 07:52:54 +08:00
|
|
|
exists, err := r.queueExists(qname)
|
2020-08-20 21:59:10 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if !exists {
|
2021-05-09 21:48:44 +08:00
|
|
|
return errors.E(op, errors.NotFound, &errors.QueueNotFoundError{Queue: qname})
|
2020-08-20 21:59:10 +08:00
|
|
|
}
|
2020-01-13 23:03:07 +08:00
|
|
|
var script *redis.Script
|
|
|
|
if force {
|
2020-02-09 03:06:14 +08:00
|
|
|
script = removeQueueForceCmd
|
2020-01-13 23:03:07 +08:00
|
|
|
} else {
|
2020-02-09 03:06:14 +08:00
|
|
|
script = removeQueueCmd
|
2020-01-13 23:03:07 +08:00
|
|
|
}
|
2020-08-20 21:59:10 +08:00
|
|
|
keys := []string{
|
2021-03-13 08:23:08 +08:00
|
|
|
base.PendingKey(qname),
|
2020-09-06 03:43:15 +08:00
|
|
|
base.ActiveKey(qname),
|
2020-08-20 21:59:10 +08:00
|
|
|
base.ScheduledKey(qname),
|
|
|
|
base.RetryKey(qname),
|
2021-01-13 03:01:21 +08:00
|
|
|
base.ArchivedKey(qname),
|
2020-08-20 21:59:10 +08:00
|
|
|
base.DeadlinesKey(qname),
|
2020-01-13 23:03:07 +08:00
|
|
|
}
|
2021-09-02 20:56:02 +08:00
|
|
|
res, err := script.Run(context.Background(), r.client, keys, base.TaskKeyPrefix(qname)).Result()
|
2021-05-09 21:48:44 +08:00
|
|
|
if err != nil {
|
|
|
|
return errors.E(op, errors.Unknown, err)
|
|
|
|
}
|
|
|
|
n, ok := res.(int64)
|
|
|
|
if !ok {
|
|
|
|
return errors.E(op, errors.Internal, fmt.Sprintf("unexpeced return value from Lua script: %v", res))
|
|
|
|
}
|
|
|
|
switch n {
|
|
|
|
case 1:
|
2021-09-02 20:56:02 +08:00
|
|
|
if err := r.client.SRem(context.Background(), base.AllQueues, qname).Err(); err != nil {
|
2021-05-09 21:48:44 +08:00
|
|
|
return errors.E(op, errors.Unknown, err)
|
2020-08-21 21:00:49 +08:00
|
|
|
}
|
2021-05-09 21:48:44 +08:00
|
|
|
return nil
|
|
|
|
case -1:
|
|
|
|
return errors.E(op, errors.NotFound, &errors.QueueNotEmptyError{Queue: qname})
|
|
|
|
case -2:
|
|
|
|
return errors.E(op, errors.FailedPrecondition, "cannot remove queue with active tasks")
|
|
|
|
default:
|
|
|
|
return errors.E(op, errors.Unknown, fmt.Sprintf("unexpected return value from Lua script: %d", n))
|
2020-08-20 21:59:10 +08:00
|
|
|
}
|
2020-01-13 22:50:03 +08:00
|
|
|
}
|
2020-02-02 14:22:48 +08:00
|
|
|
|
2020-02-09 03:06:14 +08:00
|
|
|
// Note: Script also removes stale keys.
|
2020-08-18 21:30:15 +08:00
|
|
|
var listServerKeysCmd = redis.NewScript(`
|
2020-02-09 03:06:14 +08:00
|
|
|
local now = tonumber(ARGV[1])
|
|
|
|
local keys = redis.call("ZRANGEBYSCORE", KEYS[1], now, "+inf")
|
|
|
|
redis.call("ZREMRANGEBYSCORE", KEYS[1], "-inf", now-1)
|
2020-08-18 21:30:15 +08:00
|
|
|
return keys`)
|
2020-02-09 03:06:14 +08:00
|
|
|
|
2020-04-13 23:14:55 +08:00
|
|
|
// ListServers returns the list of server info.
|
2020-04-13 08:09:58 +08:00
|
|
|
func (r *RDB) ListServers() ([]*base.ServerInfo, error) {
|
2020-09-27 08:33:29 +08:00
|
|
|
now := time.Now()
|
2021-09-02 20:56:02 +08:00
|
|
|
res, err := listServerKeysCmd.Run(context.Background(), r.client, []string{base.AllServers}, now.Unix()).Result()
|
2020-02-02 14:22:48 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-08-18 21:30:15 +08:00
|
|
|
keys, err := cast.ToStringSliceE(res)
|
2020-02-02 14:22:48 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-04-13 08:09:58 +08:00
|
|
|
var servers []*base.ServerInfo
|
2020-08-18 21:30:15 +08:00
|
|
|
for _, key := range keys {
|
2021-09-02 20:56:02 +08:00
|
|
|
data, err := r.client.Get(context.Background(), key).Result()
|
2020-02-02 14:22:48 +08:00
|
|
|
if err != nil {
|
|
|
|
continue // skip bad data
|
|
|
|
}
|
2021-03-13 08:23:08 +08:00
|
|
|
info, err := base.DecodeServerInfo([]byte(data))
|
|
|
|
if err != nil {
|
2020-08-18 21:30:15 +08:00
|
|
|
continue // skip bad data
|
|
|
|
}
|
2021-03-13 08:23:08 +08:00
|
|
|
servers = append(servers, info)
|
2020-02-02 14:22:48 +08:00
|
|
|
}
|
2020-04-13 08:09:58 +08:00
|
|
|
return servers, nil
|
2020-02-02 14:22:48 +08:00
|
|
|
}
|
2020-02-23 12:42:53 +08:00
|
|
|
|
|
|
|
// Note: Script also removes stale keys.
|
2021-03-13 08:23:08 +08:00
|
|
|
var listWorkersCmd = redis.NewScript(`
|
2020-02-23 12:42:53 +08:00
|
|
|
local now = tonumber(ARGV[1])
|
|
|
|
local keys = redis.call("ZRANGEBYSCORE", KEYS[1], now, "+inf")
|
|
|
|
redis.call("ZREMRANGEBYSCORE", KEYS[1], "-inf", now-1)
|
2021-06-14 06:03:46 +08:00
|
|
|
return keys`)
|
2020-02-23 12:42:53 +08:00
|
|
|
|
|
|
|
// ListWorkers returns the list of worker stats.
|
|
|
|
func (r *RDB) ListWorkers() ([]*base.WorkerInfo, error) {
|
2021-06-14 06:03:46 +08:00
|
|
|
var op errors.Op = "rdb.ListWorkers"
|
2020-09-27 08:33:29 +08:00
|
|
|
now := time.Now()
|
2021-09-02 20:56:02 +08:00
|
|
|
res, err := listWorkersCmd.Run(context.Background(), r.client, []string{base.AllWorkers}, now.Unix()).Result()
|
2020-02-23 12:42:53 +08:00
|
|
|
if err != nil {
|
2021-06-14 06:03:46 +08:00
|
|
|
return nil, errors.E(op, errors.Unknown, err)
|
2020-02-23 12:42:53 +08:00
|
|
|
}
|
2021-06-14 06:03:46 +08:00
|
|
|
keys, err := cast.ToStringSliceE(res)
|
2020-02-23 12:42:53 +08:00
|
|
|
if err != nil {
|
2021-06-14 06:03:46 +08:00
|
|
|
return nil, errors.E(op, errors.Internal, fmt.Sprintf("unexpeced return value from Lua script: %v", res))
|
2020-02-23 12:42:53 +08:00
|
|
|
}
|
|
|
|
var workers []*base.WorkerInfo
|
2021-06-14 06:03:46 +08:00
|
|
|
for _, key := range keys {
|
2021-09-02 20:56:02 +08:00
|
|
|
data, err := r.client.HVals(context.Background(), key).Result()
|
2020-02-23 12:42:53 +08:00
|
|
|
if err != nil {
|
|
|
|
continue // skip bad data
|
|
|
|
}
|
2021-06-14 06:03:46 +08:00
|
|
|
for _, s := range data {
|
|
|
|
w, err := base.DecodeWorkerInfo([]byte(s))
|
|
|
|
if err != nil {
|
|
|
|
continue // skip bad data
|
|
|
|
}
|
|
|
|
workers = append(workers, w)
|
|
|
|
}
|
2020-02-23 12:42:53 +08:00
|
|
|
}
|
|
|
|
return workers, nil
|
|
|
|
}
|
2020-06-03 21:44:12 +08:00
|
|
|
|
2020-09-27 08:33:29 +08:00
|
|
|
// Note: Script also removes stale keys.
|
|
|
|
var listSchedulerKeysCmd = redis.NewScript(`
|
|
|
|
local now = tonumber(ARGV[1])
|
|
|
|
local keys = redis.call("ZRANGEBYSCORE", KEYS[1], now, "+inf")
|
|
|
|
redis.call("ZREMRANGEBYSCORE", KEYS[1], "-inf", now-1)
|
|
|
|
return keys`)
|
|
|
|
|
|
|
|
// ListSchedulerEntries returns the list of scheduler entries.
|
|
|
|
func (r *RDB) ListSchedulerEntries() ([]*base.SchedulerEntry, error) {
|
|
|
|
now := time.Now()
|
2021-09-02 20:56:02 +08:00
|
|
|
res, err := listSchedulerKeysCmd.Run(context.Background(), r.client, []string{base.AllSchedulers}, now.Unix()).Result()
|
2020-09-27 08:33:29 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
keys, err := cast.ToStringSliceE(res)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
var entries []*base.SchedulerEntry
|
|
|
|
for _, key := range keys {
|
2021-09-02 20:56:02 +08:00
|
|
|
data, err := r.client.LRange(context.Background(), key, 0, -1).Result()
|
2020-09-27 08:33:29 +08:00
|
|
|
if err != nil {
|
|
|
|
continue // skip bad data
|
|
|
|
}
|
|
|
|
for _, s := range data {
|
2021-03-13 08:23:08 +08:00
|
|
|
e, err := base.DecodeSchedulerEntry([]byte(s))
|
|
|
|
if err != nil {
|
2020-09-27 08:33:29 +08:00
|
|
|
continue // skip bad data
|
|
|
|
}
|
2021-03-13 08:23:08 +08:00
|
|
|
entries = append(entries, e)
|
2020-09-27 08:33:29 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return entries, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// ListSchedulerEnqueueEvents returns the list of scheduler enqueue events.
|
2020-12-26 23:03:04 +08:00
|
|
|
func (r *RDB) ListSchedulerEnqueueEvents(entryID string, pgn Pagination) ([]*base.SchedulerEnqueueEvent, error) {
|
2020-09-27 08:33:29 +08:00
|
|
|
key := base.SchedulerHistoryKey(entryID)
|
2021-09-02 20:56:02 +08:00
|
|
|
zs, err := r.client.ZRevRangeWithScores(context.Background(), key, pgn.start(), pgn.stop()).Result()
|
2020-09-27 08:33:29 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
var events []*base.SchedulerEnqueueEvent
|
|
|
|
for _, z := range zs {
|
|
|
|
data, err := cast.ToStringE(z.Member)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2021-03-13 08:23:08 +08:00
|
|
|
e, err := base.DecodeSchedulerEnqueueEvent([]byte(data))
|
|
|
|
if err != nil {
|
2020-09-27 08:33:29 +08:00
|
|
|
return nil, err
|
|
|
|
}
|
2021-03-13 08:23:08 +08:00
|
|
|
events = append(events, e)
|
2020-09-27 08:33:29 +08:00
|
|
|
}
|
|
|
|
return events, nil
|
|
|
|
}
|
|
|
|
|
2020-06-03 21:44:12 +08:00
|
|
|
// Pause pauses processing of tasks from the given queue.
|
|
|
|
func (r *RDB) Pause(qname string) error {
|
2020-08-13 21:54:32 +08:00
|
|
|
key := base.PausedKey(qname)
|
2021-09-02 20:56:02 +08:00
|
|
|
ok, err := r.client.SetNX(context.Background(), key, time.Now().Unix(), 0).Result()
|
2020-08-12 12:36:49 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2020-08-13 21:54:32 +08:00
|
|
|
if !ok {
|
2020-08-12 12:36:49 +08:00
|
|
|
return fmt.Errorf("queue %q is already paused", qname)
|
|
|
|
}
|
|
|
|
return nil
|
2020-06-03 21:44:12 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Unpause resumes processing of tasks from the given queue.
|
|
|
|
func (r *RDB) Unpause(qname string) error {
|
2020-08-13 21:54:32 +08:00
|
|
|
key := base.PausedKey(qname)
|
2021-09-02 20:56:02 +08:00
|
|
|
deleted, err := r.client.Del(context.Background(), key).Result()
|
2020-08-12 12:36:49 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if deleted == 0 {
|
|
|
|
return fmt.Errorf("queue %q is not paused", qname)
|
|
|
|
}
|
|
|
|
return nil
|
2020-06-03 21:44:12 +08:00
|
|
|
}
|
2020-09-01 21:57:08 +08:00
|
|
|
|
|
|
|
// ClusterKeySlot returns an integer identifying the hash slot the given queue hashes to.
|
|
|
|
func (r *RDB) ClusterKeySlot(qname string) (int64, error) {
|
2021-03-13 08:23:08 +08:00
|
|
|
key := base.PendingKey(qname)
|
2021-09-02 20:56:02 +08:00
|
|
|
return r.client.ClusterKeySlot(context.Background(), key).Result()
|
2020-09-01 21:57:08 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// ClusterNodes returns a list of nodes the given queue belongs to.
|
|
|
|
func (r *RDB) ClusterNodes(qname string) ([]redis.ClusterNode, error) {
|
|
|
|
keyslot, err := r.ClusterKeySlot(qname)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2021-09-02 20:56:02 +08:00
|
|
|
clusterSlots, err := r.client.ClusterSlots(context.Background()).Result()
|
2020-09-01 21:57:08 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
for _, slotRange := range clusterSlots {
|
|
|
|
if int64(slotRange.Start) <= keyslot && keyslot <= int64(slotRange.End) {
|
|
|
|
return slotRange.Nodes, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil, fmt.Errorf("nodes not found")
|
|
|
|
}
|