mirror of
https://github.com/hibiken/asynq.git
synced 2025-09-17 12:20:07 +08:00
Compare commits
14 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
c197902dc0 | ||
![]() |
e6355bf3f5 | ||
![]() |
95c90a5cb8 | ||
![]() |
6817af366a | ||
![]() |
4bce28d677 | ||
![]() |
73f930313c | ||
![]() |
bff2a05d59 | ||
![]() |
684a7e0c98 | ||
![]() |
46b23d6495 | ||
![]() |
c0ae62499f | ||
![]() |
7744ade362 | ||
![]() |
f532c95394 | ||
![]() |
ff6768f9bb | ||
![]() |
d5e9f3b1bd |
29
CHANGELOG.md
29
CHANGELOG.md
@@ -7,7 +7,34 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
|||||||
|
|
||||||
## [Unreleased]
|
## [Unreleased]
|
||||||
|
|
||||||
## [0.18.2] - 2021-06-29
|
## [0.18.3] - 2020-08-09
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- `Client.Enqueue` no longer enqueues tasks with empty typename; Error message is returned.
|
||||||
|
|
||||||
|
## [0.18.2] - 2020-07-15
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- Changed `Queue` function to not to convert the provided queue name to lowercase. Queue names are now case-sensitive.
|
||||||
|
- `QueueInfo.MemoryUsage` is now an approximate usage value.
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- Fixed latency issue around memory usage (see https://github.com/hibiken/asynq/issues/309).
|
||||||
|
|
||||||
|
## [0.18.1] - 2020-07-04
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- Changed to execute task recovering logic when server starts up; Previously it needed to wait for a minute for task recovering logic to exeucte.
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- Fixed task recovering logic to execute every minute
|
||||||
|
|
||||||
|
## [0.18.0] - 2021-06-29
|
||||||
|
|
||||||
### Changed
|
### Changed
|
||||||
|
|
||||||
|
28
README.md
28
README.md
@@ -26,11 +26,10 @@ Task queues are used as a mechanism to distribute work across multiple machines.
|
|||||||
|
|
||||||
- Guaranteed [at least one execution](https://www.cloudcomputingpatterns.org/at_least_once_delivery/) of a task
|
- Guaranteed [at least one execution](https://www.cloudcomputingpatterns.org/at_least_once_delivery/) of a task
|
||||||
- Scheduling of tasks
|
- Scheduling of tasks
|
||||||
- Durability since tasks are written to Redis
|
|
||||||
- [Retries](https://github.com/hibiken/asynq/wiki/Task-Retry) of failed tasks
|
- [Retries](https://github.com/hibiken/asynq/wiki/Task-Retry) of failed tasks
|
||||||
- Automatic recovery of tasks in the event of a worker crash
|
- Automatic recovery of tasks in the event of a worker crash
|
||||||
- [Weighted priority queues](https://github.com/hibiken/asynq/wiki/Priority-Queues#weighted-priority-queues)
|
- [Weighted priority queues](https://github.com/hibiken/asynq/wiki/Queue-Priority#weighted-priority)
|
||||||
- [Strict priority queues](https://github.com/hibiken/asynq/wiki/Priority-Queues#strict-priority-queues)
|
- [Strict priority queues](https://github.com/hibiken/asynq/wiki/Queue-Priority#strict-priority)
|
||||||
- Low latency to add a task since writes are fast in Redis
|
- Low latency to add a task since writes are fast in Redis
|
||||||
- De-duplication of tasks using [unique option](https://github.com/hibiken/asynq/wiki/Unique-Tasks)
|
- De-duplication of tasks using [unique option](https://github.com/hibiken/asynq/wiki/Unique-Tasks)
|
||||||
- Allow [timeout and deadline per task](https://github.com/hibiken/asynq/wiki/Task-Timeout-and-Cancelation)
|
- Allow [timeout and deadline per task](https://github.com/hibiken/asynq/wiki/Task-Timeout-and-Cancelation)
|
||||||
@@ -58,7 +57,7 @@ Initialize your project by creating a folder and then running `go mod init githu
|
|||||||
go get -u github.com/hibiken/asynq
|
go get -u github.com/hibiken/asynq
|
||||||
```
|
```
|
||||||
|
|
||||||
Make sure you're running a Redis server locally or from a [Docker](https://hub.docker.com/_/redis) container. Version `3.0` or higher is required.
|
Make sure you're running a Redis server locally or from a [Docker](https://hub.docker.com/_/redis) container. Version `4.0` or higher is required.
|
||||||
|
|
||||||
Next, write a package that encapsulates task creation and task handling.
|
Next, write a package that encapsulates task creation and task handling.
|
||||||
|
|
||||||
@@ -92,7 +91,7 @@ type ImageResizePayload struct {
|
|||||||
//----------------------------------------------
|
//----------------------------------------------
|
||||||
|
|
||||||
func NewEmailDeliveryTask(userID int, tmplID string) (*asynq.Task, error) {
|
func NewEmailDeliveryTask(userID int, tmplID string) (*asynq.Task, error) {
|
||||||
payload, err := json.Marshal(EmailDeliveryPayload{UserID: userID, TemplateID: templID})
|
payload, err := json.Marshal(EmailDeliveryPayload{UserID: userID, TemplateID: tmplID})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -120,7 +119,7 @@ func HandleEmailDeliveryTask(ctx context.Context, t *asynq.Task) error {
|
|||||||
if err := json.Unmarshal(t.Payload(), &p); err != nil {
|
if err := json.Unmarshal(t.Payload(), &p); err != nil {
|
||||||
return fmt.Errorf("json.Unmarshal failed: %v: %w", err, asynq.SkipRetry)
|
return fmt.Errorf("json.Unmarshal failed: %v: %w", err, asynq.SkipRetry)
|
||||||
}
|
}
|
||||||
log.Printf("Sending Email to User: user_id = %d, template_id = %s\n", p.UserID, p.TemplateID)
|
log.Printf("Sending Email to User: user_id=%d, template_id=%s", p.UserID, p.TemplateID)
|
||||||
// Email delivery code ...
|
// Email delivery code ...
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -130,28 +129,27 @@ type ImageProcessor struct {
|
|||||||
// ... fields for struct
|
// ... fields for struct
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *ImageProcessor) ProcessTask(ctx context.Context, t *asynq.Task) error {
|
func (processor *ImageProcessor) ProcessTask(ctx context.Context, t *asynq.Task) error {
|
||||||
var p ImageResizePayload
|
var p ImageResizePayload
|
||||||
if err := json.Unmarshal(t.Payload(), &p); err != nil {
|
if err := json.Unmarshal(t.Payload(), &p); err != nil {
|
||||||
return fmt.Errorf("json.Unmarshal failed: %v: %w", err, asynq.SkipRetry)
|
return fmt.Errorf("json.Unmarshal failed: %v: %w", err, asynq.SkipRetry)
|
||||||
}
|
}
|
||||||
log.Printf("Resizing image: src = %s\n", p.SourceURL)
|
log.Printf("Resizing image: src=%s", p.SourceURL)
|
||||||
// Image resizing code ...
|
// Image resizing code ...
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewImageProcessor() *ImageProcessor {
|
func NewImageProcessor() *ImageProcessor {
|
||||||
// ... return an instance
|
return &ImageProcessor{}
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
In your application code, import the above package and use [`Client`](https://pkg.go.dev/github.com/hibiken/asynq?tab=doc#Client) to put tasks on the queue.
|
In your application code, import the above package and use [`Client`](https://pkg.go.dev/github.com/hibiken/asynq?tab=doc#Client) to put tasks on queues.
|
||||||
|
|
||||||
```go
|
```go
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"log"
|
"log"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -178,7 +176,7 @@ func main() {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("could not enqueue task: %v", err)
|
log.Fatalf("could not enqueue task: %v", err)
|
||||||
}
|
}
|
||||||
fmt.Printf("enqueued task: id=%s queue=%s\n", info.ID, info.Queue)
|
log.Printf("enqueued task: id=%s queue=%s", info.ID, info.Queue)
|
||||||
|
|
||||||
|
|
||||||
// ------------------------------------------------------------
|
// ------------------------------------------------------------
|
||||||
@@ -190,7 +188,7 @@ func main() {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("could not schedule task: %v", err)
|
log.Fatalf("could not schedule task: %v", err)
|
||||||
}
|
}
|
||||||
fmt.Printf("enqueued task: id=%s queue=%s\n", info.ID, info.Queue)
|
log.Printf("enqueued task: id=%s queue=%s", info.ID, info.Queue)
|
||||||
|
|
||||||
|
|
||||||
// ----------------------------------------------------------------------------
|
// ----------------------------------------------------------------------------
|
||||||
@@ -208,7 +206,7 @@ func main() {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("could not enqueue task: %v", err)
|
log.Fatalf("could not enqueue task: %v", err)
|
||||||
}
|
}
|
||||||
fmt.Printf("enqueued task: id=%s queue=%s\n", info.ID, info.Queue)
|
log.Printf("enqueued task: id=%s queue=%s", info.ID, info.Queue)
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
// Example 4: Pass options to tune task processing behavior at enqueue time.
|
// Example 4: Pass options to tune task processing behavior at enqueue time.
|
||||||
@@ -219,7 +217,7 @@ func main() {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal("could not enqueue task: %v", err)
|
log.Fatal("could not enqueue task: %v", err)
|
||||||
}
|
}
|
||||||
fmt.Printf("enqueued task: id=%s queue=%s\n", info.ID, info.Queue)
|
log.Printf("enqueued task: id=%s queue=%s", info.ID, info.Queue)
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
13
client.go
13
client.go
@@ -93,10 +93,8 @@ func (n retryOption) Type() OptionType { return MaxRetryOpt }
|
|||||||
func (n retryOption) Value() interface{} { return int(n) }
|
func (n retryOption) Value() interface{} { return int(n) }
|
||||||
|
|
||||||
// Queue returns an option to specify the queue to enqueue the task into.
|
// Queue returns an option to specify the queue to enqueue the task into.
|
||||||
//
|
|
||||||
// Queue name is case-insensitive and the lowercased version is used.
|
|
||||||
func Queue(qname string) Option {
|
func Queue(qname string) Option {
|
||||||
return queueOption(strings.ToLower(qname))
|
return queueOption(qname)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (qname queueOption) String() string { return fmt.Sprintf("Queue(%q)", string(qname)) }
|
func (qname queueOption) String() string { return fmt.Sprintf("Queue(%q)", string(qname)) }
|
||||||
@@ -207,11 +205,11 @@ func composeOptions(opts ...Option) (option, error) {
|
|||||||
case retryOption:
|
case retryOption:
|
||||||
res.retry = int(opt)
|
res.retry = int(opt)
|
||||||
case queueOption:
|
case queueOption:
|
||||||
trimmed := strings.TrimSpace(string(opt))
|
qname := string(opt)
|
||||||
if err := base.ValidateQueueName(trimmed); err != nil {
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
return option{}, err
|
return option{}, err
|
||||||
}
|
}
|
||||||
res.queue = trimmed
|
res.queue = qname
|
||||||
case timeoutOption:
|
case timeoutOption:
|
||||||
res.timeout = time.Duration(opt)
|
res.timeout = time.Duration(opt)
|
||||||
case deadlineOption:
|
case deadlineOption:
|
||||||
@@ -269,6 +267,9 @@ func (c *Client) Close() error {
|
|||||||
//
|
//
|
||||||
// If no ProcessAt or ProcessIn options are provided, the task will be pending immediately.
|
// If no ProcessAt or ProcessIn options are provided, the task will be pending immediately.
|
||||||
func (c *Client) Enqueue(task *Task, opts ...Option) (*TaskInfo, error) {
|
func (c *Client) Enqueue(task *Task, opts ...Option) (*TaskInfo, error) {
|
||||||
|
if strings.TrimSpace(task.Type()) == "" {
|
||||||
|
return nil, fmt.Errorf("task typename cannot be empty")
|
||||||
|
}
|
||||||
c.mu.Lock()
|
c.mu.Lock()
|
||||||
if defaults, ok := c.opts[task.Type()]; ok {
|
if defaults, ok := c.opts[task.Type()]; ok {
|
||||||
opts = append(defaults, opts...)
|
opts = append(defaults, opts...)
|
||||||
|
@@ -287,13 +287,13 @@ func TestClientEnqueue(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
desc: "Queue option should be case-insensitive",
|
desc: "Queue option should be case sensitive",
|
||||||
task: task,
|
task: task,
|
||||||
opts: []Option{
|
opts: []Option{
|
||||||
Queue("HIGH"),
|
Queue("MyQueue"),
|
||||||
},
|
},
|
||||||
wantInfo: &TaskInfo{
|
wantInfo: &TaskInfo{
|
||||||
Queue: "high",
|
Queue: "MyQueue",
|
||||||
Type: task.Type(),
|
Type: task.Type(),
|
||||||
Payload: task.Payload(),
|
Payload: task.Payload(),
|
||||||
State: TaskStatePending,
|
State: TaskStatePending,
|
||||||
@@ -306,12 +306,12 @@ func TestClientEnqueue(t *testing.T) {
|
|||||||
NextProcessAt: now,
|
NextProcessAt: now,
|
||||||
},
|
},
|
||||||
wantPending: map[string][]*base.TaskMessage{
|
wantPending: map[string][]*base.TaskMessage{
|
||||||
"high": {
|
"MyQueue": {
|
||||||
{
|
{
|
||||||
Type: task.Type(),
|
Type: task.Type(),
|
||||||
Payload: task.Payload(),
|
Payload: task.Payload(),
|
||||||
Retry: defaultMaxRetry,
|
Retry: defaultMaxRetry,
|
||||||
Queue: "high",
|
Queue: "MyQueue",
|
||||||
Timeout: int64(defaultTimeout.Seconds()),
|
Timeout: int64(defaultTimeout.Seconds()),
|
||||||
Deadline: noDeadline.Unix(),
|
Deadline: noDeadline.Unix(),
|
||||||
},
|
},
|
||||||
@@ -585,6 +585,16 @@ func TestClientEnqueueError(t *testing.T) {
|
|||||||
Queue(""),
|
Queue(""),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
desc: "With empty task typename",
|
||||||
|
task: NewTask("", h.JSON(map[string]interface{}{})),
|
||||||
|
opts: []Option{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "With blank task typename",
|
||||||
|
task: NewTask(" ", h.JSON(map[string]interface{}{})),
|
||||||
|
opts: []Option{},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
|
@@ -50,6 +50,7 @@ type QueueInfo struct {
|
|||||||
Queue string
|
Queue string
|
||||||
|
|
||||||
// Total number of bytes that the queue and its tasks require to be stored in redis.
|
// Total number of bytes that the queue and its tasks require to be stored in redis.
|
||||||
|
// It is an approximate memory usage value in bytes since the value is computed by sampling.
|
||||||
MemoryUsage int64
|
MemoryUsage int64
|
||||||
|
|
||||||
// Size is the total number of tasks in the queue.
|
// Size is the total number of tasks in the queue.
|
||||||
|
@@ -10,6 +10,7 @@ import (
|
|||||||
"crypto/md5"
|
"crypto/md5"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -22,7 +23,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// Version of asynq library and CLI.
|
// Version of asynq library and CLI.
|
||||||
const Version = "0.18.0"
|
const Version = "0.18.3"
|
||||||
|
|
||||||
// DefaultQueueName is the queue name used if none are specified by user.
|
// DefaultQueueName is the queue name used if none are specified by user.
|
||||||
const DefaultQueueName = "default"
|
const DefaultQueueName = "default"
|
||||||
@@ -85,7 +86,7 @@ func TaskStateFromString(s string) (TaskState, error) {
|
|||||||
// ValidateQueueName validates a given qname to be used as a queue name.
|
// ValidateQueueName validates a given qname to be used as a queue name.
|
||||||
// Returns nil if valid, otherwise returns non-nil error.
|
// Returns nil if valid, otherwise returns non-nil error.
|
||||||
func ValidateQueueName(qname string) error {
|
func ValidateQueueName(qname string) error {
|
||||||
if len(qname) == 0 {
|
if len(strings.TrimSpace(qname)) == 0 {
|
||||||
return fmt.Errorf("queue name must contain one or more characters")
|
return fmt.Errorf("queue name must contain one or more characters")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@@ -26,7 +26,8 @@ type Stats struct {
|
|||||||
// Name of the queue (e.g. "default", "critical").
|
// Name of the queue (e.g. "default", "critical").
|
||||||
Queue string
|
Queue string
|
||||||
// MemoryUsage is the total number of bytes the queue and its tasks require
|
// MemoryUsage is the total number of bytes the queue and its tasks require
|
||||||
// to be stored in redis.
|
// to be stored in redis. It is an approximate memory usage value in bytes
|
||||||
|
// since the value is computed by sampling.
|
||||||
MemoryUsage int64
|
MemoryUsage int64
|
||||||
// Paused indicates whether the queue is paused.
|
// Paused indicates whether the queue is paused.
|
||||||
// If true, tasks in the queue should not be processed.
|
// If true, tasks in the queue should not be processed.
|
||||||
@@ -172,31 +173,82 @@ func (r *RDB) CurrentStats(qname string) (*Stats, error) {
|
|||||||
return stats, nil
|
return stats, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Computes memory usage for the given queue by sampling tasks
|
||||||
|
// from each redis list/zset. Returns approximate memory usage value
|
||||||
|
// in bytes.
|
||||||
|
//
|
||||||
|
// KEYS[1] -> asynq:{qname}:active
|
||||||
|
// KEYS[2] -> asynq:{qname}:pending
|
||||||
|
// KEYS[3] -> asynq:{qname}:scheduled
|
||||||
|
// KEYS[4] -> asynq:{qname}:retry
|
||||||
|
// KEYS[5] -> asynq:{qname}:archived
|
||||||
|
//
|
||||||
|
// ARGV[1] -> asynq:{qname}:t:
|
||||||
|
// ARGV[2] -> sample_size (e.g 20)
|
||||||
|
var memoryUsageCmd = redis.NewScript(`
|
||||||
|
local sample_size = tonumber(ARGV[2])
|
||||||
|
if sample_size <= 0 then
|
||||||
|
return redis.error_reply("sample size must be a positive number")
|
||||||
|
end
|
||||||
|
local memusg = 0
|
||||||
|
for i=1,2 do
|
||||||
|
local ids = redis.call("LRANGE", KEYS[i], 0, sample_size - 1)
|
||||||
|
local sample_total = 0
|
||||||
|
if (table.getn(ids) > 0) then
|
||||||
|
for _, id in ipairs(ids) do
|
||||||
|
local bytes = redis.call("MEMORY", "USAGE", ARGV[1] .. id)
|
||||||
|
sample_total = sample_total + bytes
|
||||||
|
end
|
||||||
|
local n = redis.call("LLEN", KEYS[i])
|
||||||
|
local avg = sample_total / table.getn(ids)
|
||||||
|
memusg = memusg + (avg * n)
|
||||||
|
end
|
||||||
|
local m = redis.call("MEMORY", "USAGE", KEYS[i])
|
||||||
|
if (m) then
|
||||||
|
memusg = memusg + m
|
||||||
|
end
|
||||||
|
end
|
||||||
|
for i=3,5 do
|
||||||
|
local ids = redis.call("ZRANGE", KEYS[i], 0, sample_size - 1)
|
||||||
|
local sample_total = 0
|
||||||
|
if (table.getn(ids) > 0) then
|
||||||
|
for _, id in ipairs(ids) do
|
||||||
|
local bytes = redis.call("MEMORY", "USAGE", ARGV[1] .. id)
|
||||||
|
sample_total = sample_total + bytes
|
||||||
|
end
|
||||||
|
local n = redis.call("ZCARD", KEYS[i])
|
||||||
|
local avg = sample_total / table.getn(ids)
|
||||||
|
memusg = memusg + (avg * n)
|
||||||
|
end
|
||||||
|
local m = redis.call("MEMORY", "USAGE", KEYS[i])
|
||||||
|
if (m) then
|
||||||
|
memusg = memusg + m
|
||||||
|
end
|
||||||
|
end
|
||||||
|
return memusg
|
||||||
|
`)
|
||||||
|
|
||||||
func (r *RDB) memoryUsage(qname string) (int64, error) {
|
func (r *RDB) memoryUsage(qname string) (int64, error) {
|
||||||
var op errors.Op = "rdb.memoryUsage"
|
var op errors.Op = "rdb.memoryUsage"
|
||||||
var (
|
const sampleSize = 20
|
||||||
keys []string
|
keys := []string{
|
||||||
data []string
|
base.ActiveKey(qname),
|
||||||
cursor uint64
|
base.PendingKey(qname),
|
||||||
err error
|
base.ScheduledKey(qname),
|
||||||
)
|
base.RetryKey(qname),
|
||||||
for {
|
base.ArchivedKey(qname),
|
||||||
data, cursor, err = r.client.Scan(cursor, fmt.Sprintf("asynq:{%s}*", qname), 100).Result()
|
|
||||||
if err != nil {
|
|
||||||
return 0, errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "scan", Err: err})
|
|
||||||
}
|
|
||||||
keys = append(keys, data...)
|
|
||||||
if cursor == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
var usg int64
|
argv := []interface{}{
|
||||||
for _, k := range keys {
|
base.TaskKeyPrefix(qname),
|
||||||
n, err := r.client.MemoryUsage(k).Result()
|
sampleSize,
|
||||||
if err != nil {
|
}
|
||||||
return 0, errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "memory usage", Err: err})
|
res, err := memoryUsageCmd.Run(r.client, keys, argv...).Result()
|
||||||
}
|
if err != nil {
|
||||||
usg += n
|
return 0, errors.E(op, errors.Unknown, fmt.Sprintf("redis eval error: %v", err))
|
||||||
|
}
|
||||||
|
usg, err := cast.ToInt64E(res)
|
||||||
|
if err != nil {
|
||||||
|
return 0, errors.E(op, errors.Internal, fmt.Sprintf("could not cast script return value to int64"))
|
||||||
}
|
}
|
||||||
return usg, nil
|
return usg, nil
|
||||||
}
|
}
|
||||||
|
37
recoverer.go
37
recoverer.go
@@ -57,6 +57,7 @@ func (r *recoverer) start(wg *sync.WaitGroup) {
|
|||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
|
r.recover()
|
||||||
timer := time.NewTimer(r.interval)
|
timer := time.NewTimer(r.interval)
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
@@ -65,27 +66,31 @@ func (r *recoverer) start(wg *sync.WaitGroup) {
|
|||||||
timer.Stop()
|
timer.Stop()
|
||||||
return
|
return
|
||||||
case <-timer.C:
|
case <-timer.C:
|
||||||
// Get all tasks which have expired 30 seconds ago or earlier.
|
r.recover()
|
||||||
deadline := time.Now().Add(-30 * time.Second)
|
timer.Reset(r.interval)
|
||||||
msgs, err := r.broker.ListDeadlineExceeded(deadline, r.queues...)
|
|
||||||
if err != nil {
|
|
||||||
r.logger.Warn("recoverer: could not list deadline exceeded tasks")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
const errMsg = "deadline exceeded" // TODO: better error message
|
|
||||||
for _, msg := range msgs {
|
|
||||||
if msg.Retried >= msg.Retry {
|
|
||||||
r.archive(msg, errMsg)
|
|
||||||
} else {
|
|
||||||
r.retry(msg, errMsg)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (r *recoverer) recover() {
|
||||||
|
// Get all tasks which have expired 30 seconds ago or earlier.
|
||||||
|
deadline := time.Now().Add(-30 * time.Second)
|
||||||
|
msgs, err := r.broker.ListDeadlineExceeded(deadline, r.queues...)
|
||||||
|
if err != nil {
|
||||||
|
r.logger.Warn("recoverer: could not list deadline exceeded tasks")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
const errMsg = "deadline exceeded"
|
||||||
|
for _, msg := range msgs {
|
||||||
|
if msg.Retried >= msg.Retry {
|
||||||
|
r.archive(msg, errMsg)
|
||||||
|
} else {
|
||||||
|
r.retry(msg, errMsg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (r *recoverer) retry(msg *base.TaskMessage, errMsg string) {
|
func (r *recoverer) retry(msg *base.TaskMessage, errMsg string) {
|
||||||
delay := r.retryDelayFunc(msg.Retried, fmt.Errorf(errMsg), NewTask(msg.Type, msg.Payload))
|
delay := r.retryDelayFunc(msg.Retried, fmt.Errorf(errMsg), NewTask(msg.Type, msg.Payload))
|
||||||
retryAt := time.Now().Add(delay)
|
retryAt := time.Now().Add(delay)
|
||||||
|
@@ -98,7 +98,7 @@ func (mux *ServeMux) Handle(pattern string, handler Handler) {
|
|||||||
mux.mu.Lock()
|
mux.mu.Lock()
|
||||||
defer mux.mu.Unlock()
|
defer mux.mu.Unlock()
|
||||||
|
|
||||||
if pattern == "" {
|
if strings.TrimSpace(pattern) == "" {
|
||||||
panic("asynq: invalid pattern")
|
panic("asynq: invalid pattern")
|
||||||
}
|
}
|
||||||
if handler == nil {
|
if handler == nil {
|
||||||
|
@@ -295,6 +295,9 @@ func NewServer(r RedisConnOpt, cfg Config) *Server {
|
|||||||
}
|
}
|
||||||
queues := make(map[string]int)
|
queues := make(map[string]int)
|
||||||
for qname, p := range cfg.Queues {
|
for qname, p := range cfg.Queues {
|
||||||
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
|
continue // ignore invalid queue names
|
||||||
|
}
|
||||||
if p > 0 {
|
if p > 0 {
|
||||||
queues[qname] = p
|
queues[qname] = p
|
||||||
}
|
}
|
||||||
|
Reference in New Issue
Block a user