mirror of
https://github.com/hibiken/asynq.git
synced 2025-08-19 15:08:55 +08:00
Introduce Task Results
* Added Retention Option to specify retention TTL for tasks * Added ResultWriter as a client interface to write result data for the associated task
This commit is contained in:
@@ -63,7 +63,7 @@ func cronList(cmd *cobra.Command, args []string) {
|
||||
cols := []string{"EntryID", "Spec", "Type", "Payload", "Options", "Next", "Prev"}
|
||||
printRows := func(w io.Writer, tmpl string) {
|
||||
for _, e := range entries {
|
||||
fmt.Fprintf(w, tmpl, e.ID, e.Spec, e.Task.Type(), formatPayload(e.Task.Payload()), e.Opts,
|
||||
fmt.Fprintf(w, tmpl, e.ID, e.Spec, e.Task.Type(), sprintBytes(e.Task.Payload()), e.Opts,
|
||||
nextEnqueue(e.Next), prevEnqueue(e.Prev))
|
||||
}
|
||||
}
|
||||
|
@@ -1,405 +0,0 @@
|
||||
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-redis/redis/v8"
|
||||
"github.com/google/uuid"
|
||||
"github.com/hibiken/asynq/internal/base"
|
||||
"github.com/hibiken/asynq/internal/errors"
|
||||
"github.com/hibiken/asynq/internal/rdb"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// migrateCmd represents the migrate command.
|
||||
var migrateCmd = &cobra.Command{
|
||||
Use: "migrate",
|
||||
Short: fmt.Sprintf("Migrate existing tasks and queues to be asynq%s compatible", base.Version),
|
||||
Long: `Migrate (asynq migrate) will migrate existing tasks and queues in redis to be compatible with the latest version of asynq.
|
||||
`,
|
||||
Args: cobra.NoArgs,
|
||||
Run: migrate,
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(migrateCmd)
|
||||
}
|
||||
|
||||
func backupKey(key string) string {
|
||||
return fmt.Sprintf("%s:backup", key)
|
||||
}
|
||||
|
||||
func renameKeyAsBackup(c redis.UniversalClient, key string) error {
|
||||
if c.Exists(context.Background(), key).Val() == 0 {
|
||||
return nil // key doesn't exist; no-op
|
||||
}
|
||||
return c.Rename(context.Background(), key, backupKey(key)).Err()
|
||||
}
|
||||
|
||||
func failIfError(err error, msg string) {
|
||||
if err != nil {
|
||||
fmt.Printf("error: %s: %v\n", msg, err)
|
||||
fmt.Println("*** Please report this issue at https://github.com/hibiken/asynq/issues ***")
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func logIfError(err error, msg string) {
|
||||
if err != nil {
|
||||
fmt.Printf("warning: %s: %v\n", msg, err)
|
||||
}
|
||||
}
|
||||
|
||||
func migrate(cmd *cobra.Command, args []string) {
|
||||
r := createRDB()
|
||||
queues, err := r.AllQueues()
|
||||
failIfError(err, "Failed to get queue names")
|
||||
|
||||
// ---------------------------------------------
|
||||
// Pre-check: Ensure no active servers, tasks.
|
||||
// ---------------------------------------------
|
||||
srvs, err := r.ListServers()
|
||||
failIfError(err, "Failed to get server infos")
|
||||
if len(srvs) > 0 {
|
||||
fmt.Println("(error): Server(s) still running. Please ensure that no asynq servers are running when runnning migrate command.")
|
||||
os.Exit(1)
|
||||
}
|
||||
for _, qname := range queues {
|
||||
stats, err := r.CurrentStats(qname)
|
||||
failIfError(err, "Failed to get stats")
|
||||
if stats.Active > 0 {
|
||||
fmt.Printf("(error): %d active tasks found. Please ensure that no active tasks exist when running migrate command.\n", stats.Active)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------
|
||||
// Rename pending key
|
||||
// ---------------------------------------------
|
||||
fmt.Print("Renaming pending keys...")
|
||||
for _, qname := range queues {
|
||||
oldKey := fmt.Sprintf("asynq:{%s}", qname)
|
||||
if r.Client().Exists(context.Background(), oldKey).Val() == 0 {
|
||||
continue
|
||||
}
|
||||
newKey := base.PendingKey(qname)
|
||||
err := r.Client().Rename(context.Background(), oldKey, newKey).Err()
|
||||
failIfError(err, "Failed to rename key")
|
||||
}
|
||||
fmt.Print("Done\n")
|
||||
|
||||
// ---------------------------------------------
|
||||
// Rename keys as backup
|
||||
// ---------------------------------------------
|
||||
fmt.Print("Renaming keys for backup...")
|
||||
for _, qname := range queues {
|
||||
keys := []string{
|
||||
base.ActiveKey(qname),
|
||||
base.PendingKey(qname),
|
||||
base.ScheduledKey(qname),
|
||||
base.RetryKey(qname),
|
||||
base.ArchivedKey(qname),
|
||||
}
|
||||
for _, key := range keys {
|
||||
err := renameKeyAsBackup(r.Client(), key)
|
||||
failIfError(err, fmt.Sprintf("Failed to rename key %q for backup", key))
|
||||
}
|
||||
}
|
||||
fmt.Print("Done\n")
|
||||
|
||||
// ---------------------------------------------
|
||||
// Update to new schema
|
||||
// ---------------------------------------------
|
||||
fmt.Print("Updating to new schema...")
|
||||
for _, qname := range queues {
|
||||
updatePendingMessages(r, qname)
|
||||
updateZSetMessages(r.Client(), base.ScheduledKey(qname), "scheduled")
|
||||
updateZSetMessages(r.Client(), base.RetryKey(qname), "retry")
|
||||
updateZSetMessages(r.Client(), base.ArchivedKey(qname), "archived")
|
||||
}
|
||||
fmt.Print("Done\n")
|
||||
|
||||
// ---------------------------------------------
|
||||
// Delete backup keys
|
||||
// ---------------------------------------------
|
||||
fmt.Print("Deleting backup keys...")
|
||||
for _, qname := range queues {
|
||||
keys := []string{
|
||||
backupKey(base.ActiveKey(qname)),
|
||||
backupKey(base.PendingKey(qname)),
|
||||
backupKey(base.ScheduledKey(qname)),
|
||||
backupKey(base.RetryKey(qname)),
|
||||
backupKey(base.ArchivedKey(qname)),
|
||||
}
|
||||
for _, key := range keys {
|
||||
err := r.Client().Del(context.Background(), key).Err()
|
||||
failIfError(err, "Failed to delete backup key")
|
||||
}
|
||||
}
|
||||
fmt.Print("Done\n")
|
||||
}
|
||||
|
||||
func UnmarshalOldMessage(encoded string) (*base.TaskMessage, error) {
|
||||
oldMsg, err := DecodeMessage(encoded)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
payload, err := json.Marshal(oldMsg.Payload)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not marshal payload: %v", err)
|
||||
}
|
||||
return &base.TaskMessage{
|
||||
Type: oldMsg.Type,
|
||||
Payload: payload,
|
||||
ID: oldMsg.ID,
|
||||
Queue: oldMsg.Queue,
|
||||
Retry: oldMsg.Retry,
|
||||
Retried: oldMsg.Retried,
|
||||
ErrorMsg: oldMsg.ErrorMsg,
|
||||
LastFailedAt: 0,
|
||||
Timeout: oldMsg.Timeout,
|
||||
Deadline: oldMsg.Deadline,
|
||||
UniqueKey: oldMsg.UniqueKey,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// TaskMessage from v0.17
|
||||
type OldTaskMessage struct {
|
||||
// Type indicates the kind of the task to be performed.
|
||||
Type string
|
||||
|
||||
// Payload holds data needed to process the task.
|
||||
Payload map[string]interface{}
|
||||
|
||||
// ID is a unique identifier for each task.
|
||||
ID uuid.UUID
|
||||
|
||||
// Queue is a name this message should be enqueued to.
|
||||
Queue string
|
||||
|
||||
// Retry is the max number of retry for this task.
|
||||
Retry int
|
||||
|
||||
// Retried is the number of times we've retried this task so far.
|
||||
Retried int
|
||||
|
||||
// ErrorMsg holds the error message from the last failure.
|
||||
ErrorMsg string
|
||||
|
||||
// Timeout specifies timeout in seconds.
|
||||
// If task processing doesn't complete within the timeout, the task will be retried
|
||||
// if retry count is remaining. Otherwise it will be moved to the archive.
|
||||
//
|
||||
// Use zero to indicate no timeout.
|
||||
Timeout int64
|
||||
|
||||
// Deadline specifies the deadline for the task in Unix time,
|
||||
// the number of seconds elapsed since January 1, 1970 UTC.
|
||||
// If task processing doesn't complete before the deadline, the task will be retried
|
||||
// if retry count is remaining. Otherwise it will be moved to the archive.
|
||||
//
|
||||
// Use zero to indicate no deadline.
|
||||
Deadline int64
|
||||
|
||||
// UniqueKey holds the redis key used for uniqueness lock for this task.
|
||||
//
|
||||
// Empty string indicates that no uniqueness lock was used.
|
||||
UniqueKey string
|
||||
}
|
||||
|
||||
// DecodeMessage unmarshals the given encoded string and returns a decoded task message.
|
||||
// Code from v0.17.
|
||||
func DecodeMessage(s string) (*OldTaskMessage, error) {
|
||||
d := json.NewDecoder(strings.NewReader(s))
|
||||
d.UseNumber()
|
||||
var msg OldTaskMessage
|
||||
if err := d.Decode(&msg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &msg, nil
|
||||
}
|
||||
|
||||
func updatePendingMessages(r *rdb.RDB, qname string) {
|
||||
data, err := r.Client().LRange(context.Background(), backupKey(base.PendingKey(qname)), 0, -1).Result()
|
||||
failIfError(err, "Failed to read backup pending key")
|
||||
|
||||
for _, s := range data {
|
||||
msg, err := UnmarshalOldMessage(s)
|
||||
failIfError(err, "Failed to unmarshal message")
|
||||
|
||||
if msg.UniqueKey != "" {
|
||||
ttl, err := r.Client().TTL(context.Background(), msg.UniqueKey).Result()
|
||||
failIfError(err, "Failed to get ttl")
|
||||
|
||||
if ttl > 0 {
|
||||
err = r.Client().Del(context.Background(), msg.UniqueKey).Err()
|
||||
logIfError(err, "Failed to delete unique key")
|
||||
}
|
||||
|
||||
// Regenerate unique key.
|
||||
msg.UniqueKey = base.UniqueKey(msg.Queue, msg.Type, msg.Payload)
|
||||
if ttl > 0 {
|
||||
err = r.EnqueueUnique(msg, ttl)
|
||||
} else {
|
||||
err = r.Enqueue(msg)
|
||||
}
|
||||
failIfError(err, "Failed to enqueue message")
|
||||
|
||||
} else {
|
||||
err := r.Enqueue(msg)
|
||||
failIfError(err, "Failed to enqueue message")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// KEYS[1] -> asynq:{<qname>}:t:<task_id>
|
||||
// KEYS[2] -> asynq:{<qname>}:scheduled
|
||||
// ARGV[1] -> task message data
|
||||
// ARGV[2] -> zset score
|
||||
// ARGV[3] -> task ID
|
||||
// ARGV[4] -> task timeout in seconds (0 if not timeout)
|
||||
// ARGV[5] -> task deadline in unix time (0 if no deadline)
|
||||
// ARGV[6] -> task state (e.g. "retry", "archived")
|
||||
var taskZAddCmd = redis.NewScript(`
|
||||
redis.call("HSET", KEYS[1],
|
||||
"msg", ARGV[1],
|
||||
"state", ARGV[6],
|
||||
"timeout", ARGV[4],
|
||||
"deadline", ARGV[5])
|
||||
redis.call("ZADD", KEYS[2], ARGV[2], ARGV[3])
|
||||
return 1
|
||||
`)
|
||||
|
||||
// ZAddTask adds task to zset.
|
||||
func ZAddTask(c redis.UniversalClient, key string, msg *base.TaskMessage, score float64, state string) error {
|
||||
// Special case; LastFailedAt field is new so assign a value inferred from zscore.
|
||||
if state == "archived" {
|
||||
msg.LastFailedAt = int64(score)
|
||||
}
|
||||
|
||||
encoded, err := base.EncodeMessage(msg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := c.SAdd(context.Background(), base.AllQueues, msg.Queue).Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
keys := []string{
|
||||
base.TaskKey(msg.Queue, msg.ID.String()),
|
||||
key,
|
||||
}
|
||||
argv := []interface{}{
|
||||
encoded,
|
||||
score,
|
||||
msg.ID.String(),
|
||||
msg.Timeout,
|
||||
msg.Deadline,
|
||||
state,
|
||||
}
|
||||
return taskZAddCmd.Run(context.Background(), c, keys, argv...).Err()
|
||||
}
|
||||
|
||||
// KEYS[1] -> unique key
|
||||
// KEYS[2] -> asynq:{<qname>}:t:<task_id>
|
||||
// KEYS[3] -> zset key (e.g. asynq:{<qname>}:scheduled)
|
||||
// --
|
||||
// ARGV[1] -> task ID
|
||||
// ARGV[2] -> uniqueness lock TTL
|
||||
// ARGV[3] -> score (process_at timestamp)
|
||||
// ARGV[4] -> task message
|
||||
// ARGV[5] -> task timeout in seconds (0 if not timeout)
|
||||
// ARGV[6] -> task deadline in unix time (0 if no deadline)
|
||||
// ARGV[7] -> task state (oneof "scheduled", "retry", "archived")
|
||||
var taskZAddUniqueCmd = redis.NewScript(`
|
||||
local ok = redis.call("SET", KEYS[1], ARGV[1], "NX", "EX", ARGV[2])
|
||||
if not ok then
|
||||
return 0
|
||||
end
|
||||
redis.call("HSET", KEYS[2],
|
||||
"msg", ARGV[4],
|
||||
"state", ARGV[7],
|
||||
"timeout", ARGV[5],
|
||||
"deadline", ARGV[6],
|
||||
"unique_key", KEYS[1])
|
||||
redis.call("ZADD", KEYS[3], ARGV[3], ARGV[1])
|
||||
return 1
|
||||
`)
|
||||
|
||||
// ScheduleUnique adds the task to the backlog queue to be processed in the future if the uniqueness lock can be acquired.
|
||||
// It returns ErrDuplicateTask if the lock cannot be acquired.
|
||||
func ZAddTaskUnique(c redis.UniversalClient, key string, msg *base.TaskMessage, score float64, state string, ttl time.Duration) error {
|
||||
encoded, err := base.EncodeMessage(msg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := c.SAdd(context.Background(), base.AllQueues, msg.Queue).Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
keys := []string{
|
||||
msg.UniqueKey,
|
||||
base.TaskKey(msg.Queue, msg.ID.String()),
|
||||
key,
|
||||
}
|
||||
argv := []interface{}{
|
||||
msg.ID.String(),
|
||||
int(ttl.Seconds()),
|
||||
score,
|
||||
encoded,
|
||||
msg.Timeout,
|
||||
msg.Deadline,
|
||||
state,
|
||||
}
|
||||
res, err := taskZAddUniqueCmd.Run(context.Background(), c, keys, argv...).Result()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
n, ok := res.(int64)
|
||||
if !ok {
|
||||
return errors.E(errors.Internal, fmt.Sprintf("cast error: unexpected return value from Lua script: %v", res))
|
||||
}
|
||||
if n == 0 {
|
||||
return errors.E(errors.AlreadyExists, errors.ErrDuplicateTask)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func updateZSetMessages(c redis.UniversalClient, key, state string) {
|
||||
zs, err := c.ZRangeWithScores(context.Background(), backupKey(key), 0, -1).Result()
|
||||
failIfError(err, "Failed to read")
|
||||
|
||||
for _, z := range zs {
|
||||
msg, err := UnmarshalOldMessage(z.Member.(string))
|
||||
failIfError(err, "Failed to unmarshal message")
|
||||
|
||||
if msg.UniqueKey != "" {
|
||||
ttl, err := c.TTL(context.Background(), msg.UniqueKey).Result()
|
||||
failIfError(err, "Failed to get ttl")
|
||||
|
||||
if ttl > 0 {
|
||||
err = c.Del(context.Background(), msg.UniqueKey).Err()
|
||||
logIfError(err, "Failed to delete unique key")
|
||||
}
|
||||
|
||||
// Regenerate unique key.
|
||||
msg.UniqueKey = base.UniqueKey(msg.Queue, msg.Type, msg.Payload)
|
||||
if ttl > 0 {
|
||||
err = ZAddTaskUnique(c, key, msg, z.Score, state, ttl)
|
||||
} else {
|
||||
err = ZAddTask(c, key, msg, z.Score, state)
|
||||
}
|
||||
failIfError(err, "Failed to zadd message")
|
||||
} else {
|
||||
err := ZAddTask(c, key, msg, z.Score, state)
|
||||
failIfError(err, "Failed to enqueue scheduled message")
|
||||
}
|
||||
}
|
||||
}
|
@@ -148,9 +148,9 @@ func printQueueInfo(info *asynq.QueueInfo) {
|
||||
fmt.Printf("Paused: %t\n\n", info.Paused)
|
||||
bold.Println("Task Count by State")
|
||||
printTable(
|
||||
[]string{"active", "pending", "scheduled", "retry", "archived"},
|
||||
[]string{"active", "pending", "scheduled", "retry", "archived", "completed"},
|
||||
func(w io.Writer, tmpl string) {
|
||||
fmt.Fprintf(w, tmpl, info.Active, info.Pending, info.Scheduled, info.Retry, info.Archived)
|
||||
fmt.Fprintf(w, tmpl, info.Active, info.Pending, info.Scheduled, info.Retry, info.Archived, info.Completed)
|
||||
},
|
||||
)
|
||||
fmt.Println()
|
||||
|
@@ -199,9 +199,9 @@ func printTable(cols []string, printRows func(w io.Writer, tmpl string)) {
|
||||
tw.Flush()
|
||||
}
|
||||
|
||||
// formatPayload returns string representation of payload if data is printable.
|
||||
// If data is not printable, it returns a string describing payload is not printable.
|
||||
func formatPayload(payload []byte) string {
|
||||
// sprintBytes returns a string representation of the given byte slice if data is printable.
|
||||
// If data is not printable, it returns a string describing it is not printable.
|
||||
func sprintBytes(payload []byte) string {
|
||||
if !isPrintable(payload) {
|
||||
return "non-printable bytes"
|
||||
}
|
||||
|
@@ -7,11 +7,13 @@ package cmd
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/fatih/color"
|
||||
"github.com/hibiken/asynq/internal/rdb"
|
||||
@@ -58,6 +60,7 @@ type AggregateStats struct {
|
||||
Scheduled int
|
||||
Retry int
|
||||
Archived int
|
||||
Completed int
|
||||
Processed int
|
||||
Failed int
|
||||
Timestamp time.Time
|
||||
@@ -85,6 +88,7 @@ func stats(cmd *cobra.Command, args []string) {
|
||||
aggStats.Scheduled += s.Scheduled
|
||||
aggStats.Retry += s.Retry
|
||||
aggStats.Archived += s.Archived
|
||||
aggStats.Completed += s.Completed
|
||||
aggStats.Processed += s.Processed
|
||||
aggStats.Failed += s.Failed
|
||||
aggStats.Timestamp = s.Timestamp
|
||||
@@ -124,22 +128,50 @@ func stats(cmd *cobra.Command, args []string) {
|
||||
}
|
||||
|
||||
func printStatsByState(s *AggregateStats) {
|
||||
format := strings.Repeat("%v\t", 5) + "\n"
|
||||
format := strings.Repeat("%v\t", 6) + "\n"
|
||||
tw := new(tabwriter.Writer).Init(os.Stdout, 0, 8, 2, ' ', 0)
|
||||
fmt.Fprintf(tw, format, "active", "pending", "scheduled", "retry", "archived")
|
||||
fmt.Fprintf(tw, format, "----------", "--------", "---------", "-----", "----")
|
||||
fmt.Fprintf(tw, format, s.Active, s.Pending, s.Scheduled, s.Retry, s.Archived)
|
||||
fmt.Fprintf(tw, format, "active", "pending", "scheduled", "retry", "archived", "completed")
|
||||
width := maxInt(9 /* defaultWidth */, maxWidthOf(s.Active, s.Pending, s.Scheduled, s.Retry, s.Archived, s.Completed)) // length of widest column
|
||||
sep := strings.Repeat("-", width)
|
||||
fmt.Fprintf(tw, format, sep, sep, sep, sep, sep, sep)
|
||||
fmt.Fprintf(tw, format, s.Active, s.Pending, s.Scheduled, s.Retry, s.Archived, s.Completed)
|
||||
tw.Flush()
|
||||
}
|
||||
|
||||
// numDigits returns the number of digits in n.
|
||||
func numDigits(n int) int {
|
||||
return len(strconv.Itoa(n))
|
||||
}
|
||||
|
||||
// maxWidthOf returns the max number of digits amount the provided vals.
|
||||
func maxWidthOf(vals ...int) int {
|
||||
max := 0
|
||||
for _, v := range vals {
|
||||
if vw := numDigits(v); vw > max {
|
||||
max = vw
|
||||
}
|
||||
}
|
||||
return max
|
||||
}
|
||||
|
||||
func maxInt(a, b int) int {
|
||||
return int(math.Max(float64(a), float64(b)))
|
||||
}
|
||||
|
||||
func printStatsByQueue(stats []*rdb.Stats) {
|
||||
var headers, seps, counts []string
|
||||
maxHeaderWidth := 0
|
||||
for _, s := range stats {
|
||||
title := queueTitle(s)
|
||||
headers = append(headers, title)
|
||||
seps = append(seps, strings.Repeat("-", len(title)))
|
||||
if w := utf8.RuneCountInString(title); w > maxHeaderWidth {
|
||||
maxHeaderWidth = w
|
||||
}
|
||||
counts = append(counts, strconv.Itoa(s.Size))
|
||||
}
|
||||
for i := 0; i < len(headers); i++ {
|
||||
seps = append(seps, strings.Repeat("-", maxHeaderWidth))
|
||||
}
|
||||
format := strings.Repeat("%v\t", len(headers)) + "\n"
|
||||
tw := new(tabwriter.Writer).Init(os.Stdout, 0, 8, 2, ' ', 0)
|
||||
fmt.Fprintf(tw, format, toInterfaceSlice(headers)...)
|
||||
|
@@ -86,6 +86,7 @@ The value for the state flag should be one of:
|
||||
- scheduled
|
||||
- retry
|
||||
- archived
|
||||
- completed
|
||||
|
||||
List opeartion paginates the result set.
|
||||
By default, the command fetches the first 30 tasks.
|
||||
@@ -189,6 +190,8 @@ func taskList(cmd *cobra.Command, args []string) {
|
||||
listRetryTasks(qname, pageNum, pageSize)
|
||||
case "archived":
|
||||
listArchivedTasks(qname, pageNum, pageSize)
|
||||
case "completed":
|
||||
listCompletedTasks(qname, pageNum, pageSize)
|
||||
default:
|
||||
fmt.Printf("error: state=%q is not supported\n", state)
|
||||
os.Exit(1)
|
||||
@@ -210,7 +213,7 @@ func listActiveTasks(qname string, pageNum, pageSize int) {
|
||||
[]string{"ID", "Type", "Payload"},
|
||||
func(w io.Writer, tmpl string) {
|
||||
for _, t := range tasks {
|
||||
fmt.Fprintf(w, tmpl, t.ID, t.Type, formatPayload(t.Payload))
|
||||
fmt.Fprintf(w, tmpl, t.ID, t.Type, sprintBytes(t.Payload))
|
||||
}
|
||||
},
|
||||
)
|
||||
@@ -231,7 +234,7 @@ func listPendingTasks(qname string, pageNum, pageSize int) {
|
||||
[]string{"ID", "Type", "Payload"},
|
||||
func(w io.Writer, tmpl string) {
|
||||
for _, t := range tasks {
|
||||
fmt.Fprintf(w, tmpl, t.ID, t.Type, formatPayload(t.Payload))
|
||||
fmt.Fprintf(w, tmpl, t.ID, t.Type, sprintBytes(t.Payload))
|
||||
}
|
||||
},
|
||||
)
|
||||
@@ -252,7 +255,7 @@ func listScheduledTasks(qname string, pageNum, pageSize int) {
|
||||
[]string{"ID", "Type", "Payload", "Process In"},
|
||||
func(w io.Writer, tmpl string) {
|
||||
for _, t := range tasks {
|
||||
fmt.Fprintf(w, tmpl, t.ID, t.Type, formatPayload(t.Payload), formatProcessAt(t.NextProcessAt))
|
||||
fmt.Fprintf(w, tmpl, t.ID, t.Type, sprintBytes(t.Payload), formatProcessAt(t.NextProcessAt))
|
||||
}
|
||||
},
|
||||
)
|
||||
@@ -284,8 +287,8 @@ func listRetryTasks(qname string, pageNum, pageSize int) {
|
||||
[]string{"ID", "Type", "Payload", "Next Retry", "Last Error", "Last Failed", "Retried", "Max Retry"},
|
||||
func(w io.Writer, tmpl string) {
|
||||
for _, t := range tasks {
|
||||
fmt.Fprintf(w, tmpl, t.ID, t.Type, formatPayload(t.Payload), formatProcessAt(t.NextProcessAt),
|
||||
t.LastErr, formatLastFailedAt(t.LastFailedAt), t.Retried, t.MaxRetry)
|
||||
fmt.Fprintf(w, tmpl, t.ID, t.Type, sprintBytes(t.Payload), formatProcessAt(t.NextProcessAt),
|
||||
t.LastErr, formatPastTime(t.LastFailedAt), t.Retried, t.MaxRetry)
|
||||
}
|
||||
},
|
||||
)
|
||||
@@ -306,7 +309,27 @@ func listArchivedTasks(qname string, pageNum, pageSize int) {
|
||||
[]string{"ID", "Type", "Payload", "Last Failed", "Last Error"},
|
||||
func(w io.Writer, tmpl string) {
|
||||
for _, t := range tasks {
|
||||
fmt.Fprintf(w, tmpl, t.ID, t.Type, formatPayload(t.Payload), formatLastFailedAt(t.LastFailedAt), t.LastErr)
|
||||
fmt.Fprintf(w, tmpl, t.ID, t.Type, sprintBytes(t.Payload), formatPastTime(t.LastFailedAt), t.LastErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func listCompletedTasks(qname string, pageNum, pageSize int) {
|
||||
i := createInspector()
|
||||
tasks, err := i.ListCompletedTasks(qname, asynq.PageSize(pageSize), asynq.Page(pageNum))
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
if len(tasks) == 0 {
|
||||
fmt.Printf("No completed tasks in %q queue\n", qname)
|
||||
return
|
||||
}
|
||||
printTable(
|
||||
[]string{"ID", "Type", "Payload", "CompletedAt", "Result"},
|
||||
func(w io.Writer, tmpl string) {
|
||||
for _, t := range tasks {
|
||||
fmt.Fprintf(w, tmpl, t.ID, t.Type, sprintBytes(t.Payload), formatPastTime(t.CompletedAt), sprintBytes(t.Result))
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -356,7 +379,7 @@ func printTaskInfo(info *asynq.TaskInfo) {
|
||||
if len(info.LastErr) != 0 {
|
||||
fmt.Println()
|
||||
bold.Println("Last Failure")
|
||||
fmt.Printf("Failed at: %s\n", formatLastFailedAt(info.LastFailedAt))
|
||||
fmt.Printf("Failed at: %s\n", formatPastTime(info.LastFailedAt))
|
||||
fmt.Printf("Error message: %s\n", info.LastErr)
|
||||
}
|
||||
}
|
||||
@@ -371,11 +394,12 @@ func formatNextProcessAt(processAt time.Time) string {
|
||||
return fmt.Sprintf("%s (in %v)", processAt.Format(time.UnixDate), processAt.Sub(time.Now()).Round(time.Second))
|
||||
}
|
||||
|
||||
func formatLastFailedAt(lastFailedAt time.Time) string {
|
||||
if lastFailedAt.IsZero() || lastFailedAt.Unix() == 0 {
|
||||
// formatPastTime takes t which is time in the past and returns a user-friendly string.
|
||||
func formatPastTime(t time.Time) string {
|
||||
if t.IsZero() || t.Unix() == 0 {
|
||||
return ""
|
||||
}
|
||||
return lastFailedAt.Format(time.UnixDate)
|
||||
return t.Format(time.UnixDate)
|
||||
}
|
||||
|
||||
func taskArchive(cmd *cobra.Command, args []string) {
|
||||
@@ -496,6 +520,8 @@ func taskDeleteAll(cmd *cobra.Command, args []string) {
|
||||
n, err = i.DeleteAllRetryTasks(qname)
|
||||
case "archived":
|
||||
n, err = i.DeleteAllArchivedTasks(qname)
|
||||
case "completed":
|
||||
n, err = i.DeleteAllCompletedTasks(qname)
|
||||
default:
|
||||
fmt.Printf("error: unsupported state %q\n", state)
|
||||
os.Exit(1)
|
||||
|
Reference in New Issue
Block a user