mirror of
https://github.com/hibiken/asynq.git
synced 2025-08-19 15:08:55 +08:00
Add IsFailure to Config
With this IsFailure config, users can provide a predicate function to determine whether the error returned from Handler counts as a failure.
This commit is contained in:
@@ -645,7 +645,7 @@ type Broker interface {
|
||||
Requeue(msg *TaskMessage) error
|
||||
Schedule(msg *TaskMessage, processAt time.Time) error
|
||||
ScheduleUnique(msg *TaskMessage, processAt time.Time, ttl time.Duration) error
|
||||
Retry(msg *TaskMessage, processAt time.Time, errMsg string) error
|
||||
Retry(msg *TaskMessage, processAt time.Time, errMsg string, isFailure bool) error
|
||||
Archive(msg *TaskMessage, errMsg string) error
|
||||
ForwardIfReady(qnames ...string) error
|
||||
ListDeadlineExceeded(deadline time.Time, qnames ...string) ([]*TaskMessage, error)
|
||||
|
@@ -184,7 +184,7 @@ func BenchmarkRetry(b *testing.B) {
|
||||
asynqtest.SeedDeadlines(b, r.client, zs, base.DefaultQueueName)
|
||||
b.StartTimer()
|
||||
|
||||
if err := r.Retry(msgs[0], time.Now().Add(1*time.Minute), "error"); err != nil {
|
||||
if err := r.Retry(msgs[0], time.Now().Add(1*time.Minute), "error", true /*isFailure*/); err != nil {
|
||||
b.Fatalf("Retry failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
@@ -467,6 +467,7 @@ func (r *RDB) ScheduleUnique(msg *base.TaskMessage, processAt time.Time, ttl tim
|
||||
// ARGV[2] -> updated base.TaskMessage value
|
||||
// ARGV[3] -> retry_at UNIX timestamp
|
||||
// ARGV[4] -> stats expiration timestamp
|
||||
// ARGV[5] -> is_failure (bool)
|
||||
var retryCmd = redis.NewScript(`
|
||||
if redis.call("LREM", KEYS[2], 0, ARGV[1]) == 0 then
|
||||
return redis.error_reply("NOT FOUND")
|
||||
@@ -476,23 +477,28 @@ if redis.call("ZREM", KEYS[3], ARGV[1]) == 0 then
|
||||
end
|
||||
redis.call("ZADD", KEYS[4], ARGV[3], ARGV[1])
|
||||
redis.call("HSET", KEYS[1], "msg", ARGV[2], "state", "retry")
|
||||
local n = redis.call("INCR", KEYS[5])
|
||||
if tonumber(n) == 1 then
|
||||
redis.call("EXPIREAT", KEYS[5], ARGV[4])
|
||||
end
|
||||
local m = redis.call("INCR", KEYS[6])
|
||||
if tonumber(m) == 1 then
|
||||
redis.call("EXPIREAT", KEYS[6], ARGV[4])
|
||||
if tonumber(ARGV[5]) == 1 then
|
||||
local n = redis.call("INCR", KEYS[5])
|
||||
if tonumber(n) == 1 then
|
||||
redis.call("EXPIREAT", KEYS[5], ARGV[4])
|
||||
end
|
||||
local m = redis.call("INCR", KEYS[6])
|
||||
if tonumber(m) == 1 then
|
||||
redis.call("EXPIREAT", KEYS[6], ARGV[4])
|
||||
end
|
||||
end
|
||||
return redis.status_reply("OK")`)
|
||||
|
||||
// Retry moves the task from active to retry queue, incrementing retry count
|
||||
// and assigning error message to the task message.
|
||||
func (r *RDB) Retry(msg *base.TaskMessage, processAt time.Time, errMsg string) error {
|
||||
// Retry moves the task from active to retry queue.
|
||||
// It also annotates the message with the given error message and
|
||||
// if isFailure is true increments the retried counter.
|
||||
func (r *RDB) Retry(msg *base.TaskMessage, processAt time.Time, errMsg string, isFailure bool) error {
|
||||
var op errors.Op = "rdb.Retry"
|
||||
now := time.Now()
|
||||
modified := *msg
|
||||
modified.Retried++
|
||||
if isFailure {
|
||||
modified.Retried++
|
||||
}
|
||||
modified.ErrorMsg = errMsg
|
||||
modified.LastFailedAt = now.Unix()
|
||||
encoded, err := base.EncodeMessage(&modified)
|
||||
@@ -513,6 +519,7 @@ func (r *RDB) Retry(msg *base.TaskMessage, processAt time.Time, errMsg string) e
|
||||
encoded,
|
||||
processAt.Unix(),
|
||||
expireAt.Unix(),
|
||||
isFailure,
|
||||
}
|
||||
return r.runScript(op, retryCmd, keys, argv...)
|
||||
}
|
||||
|
@@ -1158,7 +1158,7 @@ func TestRetry(t *testing.T) {
|
||||
h.SeedAllRetryQueues(t, r.client, tc.retry)
|
||||
|
||||
callTime := time.Now() // time when method was called
|
||||
err := r.Retry(tc.msg, tc.processAt, tc.errMsg)
|
||||
err := r.Retry(tc.msg, tc.processAt, tc.errMsg, true /*isFailure*/)
|
||||
if err != nil {
|
||||
t.Errorf("(*RDB).Retry = %v, want nil", err)
|
||||
continue
|
||||
@@ -1210,6 +1210,173 @@ func TestRetry(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestRetryWithNonFailureError(t *testing.T) {
|
||||
r := setup(t)
|
||||
defer r.Close()
|
||||
now := time.Now()
|
||||
t1 := &base.TaskMessage{
|
||||
ID: uuid.New(),
|
||||
Type: "send_email",
|
||||
Payload: h.JSON(map[string]interface{}{"subject": "Hola!"}),
|
||||
Retried: 10,
|
||||
Timeout: 1800,
|
||||
Queue: "default",
|
||||
}
|
||||
t2 := &base.TaskMessage{
|
||||
ID: uuid.New(),
|
||||
Type: "gen_thumbnail",
|
||||
Payload: h.JSON(map[string]interface{}{"path": "some/path/to/image.jpg"}),
|
||||
Timeout: 3000,
|
||||
Queue: "default",
|
||||
}
|
||||
t3 := &base.TaskMessage{
|
||||
ID: uuid.New(),
|
||||
Type: "reindex",
|
||||
Payload: nil,
|
||||
Timeout: 60,
|
||||
Queue: "default",
|
||||
}
|
||||
t4 := &base.TaskMessage{
|
||||
ID: uuid.New(),
|
||||
Type: "send_notification",
|
||||
Payload: nil,
|
||||
Timeout: 1800,
|
||||
Queue: "custom",
|
||||
}
|
||||
t1Deadline := now.Unix() + t1.Timeout
|
||||
t2Deadline := now.Unix() + t2.Timeout
|
||||
t4Deadline := now.Unix() + t4.Timeout
|
||||
errMsg := "SMTP server is not responding"
|
||||
|
||||
tests := []struct {
|
||||
active map[string][]*base.TaskMessage
|
||||
deadlines map[string][]base.Z
|
||||
retry map[string][]base.Z
|
||||
msg *base.TaskMessage
|
||||
processAt time.Time
|
||||
errMsg string
|
||||
wantActive map[string][]*base.TaskMessage
|
||||
wantDeadlines map[string][]base.Z
|
||||
getWantRetry func(failedAt time.Time) map[string][]base.Z
|
||||
}{
|
||||
{
|
||||
active: map[string][]*base.TaskMessage{
|
||||
"default": {t1, t2},
|
||||
},
|
||||
deadlines: map[string][]base.Z{
|
||||
"default": {{Message: t1, Score: t1Deadline}, {Message: t2, Score: t2Deadline}},
|
||||
},
|
||||
retry: map[string][]base.Z{
|
||||
"default": {{Message: t3, Score: now.Add(time.Minute).Unix()}},
|
||||
},
|
||||
msg: t1,
|
||||
processAt: now.Add(5 * time.Minute),
|
||||
errMsg: errMsg,
|
||||
wantActive: map[string][]*base.TaskMessage{
|
||||
"default": {t2},
|
||||
},
|
||||
wantDeadlines: map[string][]base.Z{
|
||||
"default": {{Message: t2, Score: t2Deadline}},
|
||||
},
|
||||
getWantRetry: func(failedAt time.Time) map[string][]base.Z {
|
||||
return map[string][]base.Z{
|
||||
"default": {
|
||||
// Task message should include the error message but without incrementing the retry count.
|
||||
{Message: h.TaskMessageWithError(*t1, errMsg, failedAt), Score: now.Add(5 * time.Minute).Unix()},
|
||||
{Message: t3, Score: now.Add(time.Minute).Unix()},
|
||||
},
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
active: map[string][]*base.TaskMessage{
|
||||
"default": {t1, t2},
|
||||
"custom": {t4},
|
||||
},
|
||||
deadlines: map[string][]base.Z{
|
||||
"default": {{Message: t1, Score: t1Deadline}, {Message: t2, Score: t2Deadline}},
|
||||
"custom": {{Message: t4, Score: t4Deadline}},
|
||||
},
|
||||
retry: map[string][]base.Z{
|
||||
"default": {},
|
||||
"custom": {},
|
||||
},
|
||||
msg: t4,
|
||||
processAt: now.Add(5 * time.Minute),
|
||||
errMsg: errMsg,
|
||||
wantActive: map[string][]*base.TaskMessage{
|
||||
"default": {t1, t2},
|
||||
"custom": {},
|
||||
},
|
||||
wantDeadlines: map[string][]base.Z{
|
||||
"default": {{Message: t1, Score: t1Deadline}, {Message: t2, Score: t2Deadline}},
|
||||
"custom": {},
|
||||
},
|
||||
getWantRetry: func(failedAt time.Time) map[string][]base.Z {
|
||||
return map[string][]base.Z{
|
||||
"default": {},
|
||||
"custom": {
|
||||
// Task message should include the error message but without incrementing the retry count.
|
||||
{Message: h.TaskMessageWithError(*t4, errMsg, failedAt), Score: now.Add(5 * time.Minute).Unix()},
|
||||
},
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
h.FlushDB(t, r.client)
|
||||
h.SeedAllActiveQueues(t, r.client, tc.active)
|
||||
h.SeedAllDeadlines(t, r.client, tc.deadlines)
|
||||
h.SeedAllRetryQueues(t, r.client, tc.retry)
|
||||
|
||||
callTime := time.Now() // time when method was called
|
||||
err := r.Retry(tc.msg, tc.processAt, tc.errMsg, false /*isFailure*/)
|
||||
if err != nil {
|
||||
t.Errorf("(*RDB).Retry = %v, want nil", err)
|
||||
continue
|
||||
}
|
||||
|
||||
for queue, want := range tc.wantActive {
|
||||
gotActive := h.GetActiveMessages(t, r.client, queue)
|
||||
if diff := cmp.Diff(want, gotActive, h.SortMsgOpt); diff != "" {
|
||||
t.Errorf("mismatch found in %q; (-want, +got)\n%s", base.ActiveKey(queue), diff)
|
||||
}
|
||||
}
|
||||
for queue, want := range tc.wantDeadlines {
|
||||
gotDeadlines := h.GetDeadlinesEntries(t, r.client, queue)
|
||||
if diff := cmp.Diff(want, gotDeadlines, h.SortZSetEntryOpt); diff != "" {
|
||||
t.Errorf("mismatch found in %q; (-want, +got)\n%s", base.DeadlinesKey(queue), diff)
|
||||
}
|
||||
}
|
||||
cmpOpts := []cmp.Option{
|
||||
h.SortZSetEntryOpt,
|
||||
cmpopts.EquateApproxTime(5 * time.Second), // for LastFailedAt field
|
||||
}
|
||||
wantRetry := tc.getWantRetry(callTime)
|
||||
for queue, want := range wantRetry {
|
||||
gotRetry := h.GetRetryEntries(t, r.client, queue)
|
||||
if diff := cmp.Diff(want, gotRetry, cmpOpts...); diff != "" {
|
||||
t.Errorf("mismatch found in %q; (-want, +got)\n%s", base.RetryKey(queue), diff)
|
||||
}
|
||||
}
|
||||
|
||||
// If isFailure is set to false, no stats should be recorded to avoid skewing the error rate.
|
||||
processedKey := base.ProcessedKey(tc.msg.Queue, time.Now())
|
||||
gotProcessed := r.client.Get(processedKey).Val()
|
||||
if gotProcessed != "" {
|
||||
t.Errorf("GET %q = %q, want empty", processedKey, gotProcessed)
|
||||
}
|
||||
|
||||
// If isFailure is set to false, no stats should be recorded to avoid skewing the error rate.
|
||||
failedKey := base.FailedKey(tc.msg.Queue, time.Now())
|
||||
gotFailed := r.client.Get(failedKey).Val()
|
||||
if gotFailed != "" {
|
||||
t.Errorf("GET %q = %q, want empty", failedKey, gotFailed)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestArchive(t *testing.T) {
|
||||
r := setup(t)
|
||||
defer r.Close()
|
||||
|
@@ -108,13 +108,13 @@ func (tb *TestBroker) ScheduleUnique(msg *base.TaskMessage, processAt time.Time,
|
||||
return tb.real.ScheduleUnique(msg, processAt, ttl)
|
||||
}
|
||||
|
||||
func (tb *TestBroker) Retry(msg *base.TaskMessage, processAt time.Time, errMsg string) error {
|
||||
func (tb *TestBroker) Retry(msg *base.TaskMessage, processAt time.Time, errMsg string, isFailure bool) error {
|
||||
tb.mu.Lock()
|
||||
defer tb.mu.Unlock()
|
||||
if tb.sleeping {
|
||||
return errRedisDown
|
||||
}
|
||||
return tb.real.Retry(msg, processAt, errMsg)
|
||||
return tb.real.Retry(msg, processAt, errMsg, isFailure)
|
||||
}
|
||||
|
||||
func (tb *TestBroker) Archive(msg *base.TaskMessage, errMsg string) error {
|
||||
|
Reference in New Issue
Block a user