mirror of
https://github.com/hibiken/asynq.git
synced 2025-09-17 12:20:07 +08:00
Compare commits
14 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
c197902dc0 | ||
![]() |
e6355bf3f5 | ||
![]() |
95c90a5cb8 | ||
![]() |
6817af366a | ||
![]() |
4bce28d677 | ||
![]() |
73f930313c | ||
![]() |
bff2a05d59 | ||
![]() |
684a7e0c98 | ||
![]() |
46b23d6495 | ||
![]() |
c0ae62499f | ||
![]() |
7744ade362 | ||
![]() |
f532c95394 | ||
![]() |
ff6768f9bb | ||
![]() |
d5e9f3b1bd |
29
CHANGELOG.md
29
CHANGELOG.md
@@ -7,7 +7,34 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
||||
|
||||
## [Unreleased]
|
||||
|
||||
## [0.18.2] - 2021-06-29
|
||||
## [0.18.3] - 2020-08-09
|
||||
|
||||
### Changed
|
||||
|
||||
- `Client.Enqueue` no longer enqueues tasks with empty typename; Error message is returned.
|
||||
|
||||
## [0.18.2] - 2020-07-15
|
||||
|
||||
### Changed
|
||||
|
||||
- Changed `Queue` function to not to convert the provided queue name to lowercase. Queue names are now case-sensitive.
|
||||
- `QueueInfo.MemoryUsage` is now an approximate usage value.
|
||||
|
||||
### Fixed
|
||||
|
||||
- Fixed latency issue around memory usage (see https://github.com/hibiken/asynq/issues/309).
|
||||
|
||||
## [0.18.1] - 2020-07-04
|
||||
|
||||
### Changed
|
||||
|
||||
- Changed to execute task recovering logic when server starts up; Previously it needed to wait for a minute for task recovering logic to exeucte.
|
||||
|
||||
### Fixed
|
||||
|
||||
- Fixed task recovering logic to execute every minute
|
||||
|
||||
## [0.18.0] - 2021-06-29
|
||||
|
||||
### Changed
|
||||
|
||||
|
28
README.md
28
README.md
@@ -26,11 +26,10 @@ Task queues are used as a mechanism to distribute work across multiple machines.
|
||||
|
||||
- Guaranteed [at least one execution](https://www.cloudcomputingpatterns.org/at_least_once_delivery/) of a task
|
||||
- Scheduling of tasks
|
||||
- Durability since tasks are written to Redis
|
||||
- [Retries](https://github.com/hibiken/asynq/wiki/Task-Retry) of failed tasks
|
||||
- Automatic recovery of tasks in the event of a worker crash
|
||||
- [Weighted priority queues](https://github.com/hibiken/asynq/wiki/Priority-Queues#weighted-priority-queues)
|
||||
- [Strict priority queues](https://github.com/hibiken/asynq/wiki/Priority-Queues#strict-priority-queues)
|
||||
- [Weighted priority queues](https://github.com/hibiken/asynq/wiki/Queue-Priority#weighted-priority)
|
||||
- [Strict priority queues](https://github.com/hibiken/asynq/wiki/Queue-Priority#strict-priority)
|
||||
- Low latency to add a task since writes are fast in Redis
|
||||
- De-duplication of tasks using [unique option](https://github.com/hibiken/asynq/wiki/Unique-Tasks)
|
||||
- Allow [timeout and deadline per task](https://github.com/hibiken/asynq/wiki/Task-Timeout-and-Cancelation)
|
||||
@@ -58,7 +57,7 @@ Initialize your project by creating a folder and then running `go mod init githu
|
||||
go get -u github.com/hibiken/asynq
|
||||
```
|
||||
|
||||
Make sure you're running a Redis server locally or from a [Docker](https://hub.docker.com/_/redis) container. Version `3.0` or higher is required.
|
||||
Make sure you're running a Redis server locally or from a [Docker](https://hub.docker.com/_/redis) container. Version `4.0` or higher is required.
|
||||
|
||||
Next, write a package that encapsulates task creation and task handling.
|
||||
|
||||
@@ -92,7 +91,7 @@ type ImageResizePayload struct {
|
||||
//----------------------------------------------
|
||||
|
||||
func NewEmailDeliveryTask(userID int, tmplID string) (*asynq.Task, error) {
|
||||
payload, err := json.Marshal(EmailDeliveryPayload{UserID: userID, TemplateID: templID})
|
||||
payload, err := json.Marshal(EmailDeliveryPayload{UserID: userID, TemplateID: tmplID})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -120,7 +119,7 @@ func HandleEmailDeliveryTask(ctx context.Context, t *asynq.Task) error {
|
||||
if err := json.Unmarshal(t.Payload(), &p); err != nil {
|
||||
return fmt.Errorf("json.Unmarshal failed: %v: %w", err, asynq.SkipRetry)
|
||||
}
|
||||
log.Printf("Sending Email to User: user_id = %d, template_id = %s\n", p.UserID, p.TemplateID)
|
||||
log.Printf("Sending Email to User: user_id=%d, template_id=%s", p.UserID, p.TemplateID)
|
||||
// Email delivery code ...
|
||||
return nil
|
||||
}
|
||||
@@ -130,28 +129,27 @@ type ImageProcessor struct {
|
||||
// ... fields for struct
|
||||
}
|
||||
|
||||
func (p *ImageProcessor) ProcessTask(ctx context.Context, t *asynq.Task) error {
|
||||
func (processor *ImageProcessor) ProcessTask(ctx context.Context, t *asynq.Task) error {
|
||||
var p ImageResizePayload
|
||||
if err := json.Unmarshal(t.Payload(), &p); err != nil {
|
||||
return fmt.Errorf("json.Unmarshal failed: %v: %w", err, asynq.SkipRetry)
|
||||
}
|
||||
log.Printf("Resizing image: src = %s\n", p.SourceURL)
|
||||
log.Printf("Resizing image: src=%s", p.SourceURL)
|
||||
// Image resizing code ...
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewImageProcessor() *ImageProcessor {
|
||||
// ... return an instance
|
||||
return &ImageProcessor{}
|
||||
}
|
||||
```
|
||||
|
||||
In your application code, import the above package and use [`Client`](https://pkg.go.dev/github.com/hibiken/asynq?tab=doc#Client) to put tasks on the queue.
|
||||
In your application code, import the above package and use [`Client`](https://pkg.go.dev/github.com/hibiken/asynq?tab=doc#Client) to put tasks on queues.
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
@@ -178,7 +176,7 @@ func main() {
|
||||
if err != nil {
|
||||
log.Fatalf("could not enqueue task: %v", err)
|
||||
}
|
||||
fmt.Printf("enqueued task: id=%s queue=%s\n", info.ID, info.Queue)
|
||||
log.Printf("enqueued task: id=%s queue=%s", info.ID, info.Queue)
|
||||
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@@ -190,7 +188,7 @@ func main() {
|
||||
if err != nil {
|
||||
log.Fatalf("could not schedule task: %v", err)
|
||||
}
|
||||
fmt.Printf("enqueued task: id=%s queue=%s\n", info.ID, info.Queue)
|
||||
log.Printf("enqueued task: id=%s queue=%s", info.ID, info.Queue)
|
||||
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
@@ -208,7 +206,7 @@ func main() {
|
||||
if err != nil {
|
||||
log.Fatalf("could not enqueue task: %v", err)
|
||||
}
|
||||
fmt.Printf("enqueued task: id=%s queue=%s\n", info.ID, info.Queue)
|
||||
log.Printf("enqueued task: id=%s queue=%s", info.ID, info.Queue)
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Example 4: Pass options to tune task processing behavior at enqueue time.
|
||||
@@ -219,7 +217,7 @@ func main() {
|
||||
if err != nil {
|
||||
log.Fatal("could not enqueue task: %v", err)
|
||||
}
|
||||
fmt.Printf("enqueued task: id=%s queue=%s\n", info.ID, info.Queue)
|
||||
log.Printf("enqueued task: id=%s queue=%s", info.ID, info.Queue)
|
||||
}
|
||||
```
|
||||
|
||||
|
13
client.go
13
client.go
@@ -93,10 +93,8 @@ func (n retryOption) Type() OptionType { return MaxRetryOpt }
|
||||
func (n retryOption) Value() interface{} { return int(n) }
|
||||
|
||||
// Queue returns an option to specify the queue to enqueue the task into.
|
||||
//
|
||||
// Queue name is case-insensitive and the lowercased version is used.
|
||||
func Queue(qname string) Option {
|
||||
return queueOption(strings.ToLower(qname))
|
||||
return queueOption(qname)
|
||||
}
|
||||
|
||||
func (qname queueOption) String() string { return fmt.Sprintf("Queue(%q)", string(qname)) }
|
||||
@@ -207,11 +205,11 @@ func composeOptions(opts ...Option) (option, error) {
|
||||
case retryOption:
|
||||
res.retry = int(opt)
|
||||
case queueOption:
|
||||
trimmed := strings.TrimSpace(string(opt))
|
||||
if err := base.ValidateQueueName(trimmed); err != nil {
|
||||
qname := string(opt)
|
||||
if err := base.ValidateQueueName(qname); err != nil {
|
||||
return option{}, err
|
||||
}
|
||||
res.queue = trimmed
|
||||
res.queue = qname
|
||||
case timeoutOption:
|
||||
res.timeout = time.Duration(opt)
|
||||
case deadlineOption:
|
||||
@@ -269,6 +267,9 @@ func (c *Client) Close() error {
|
||||
//
|
||||
// If no ProcessAt or ProcessIn options are provided, the task will be pending immediately.
|
||||
func (c *Client) Enqueue(task *Task, opts ...Option) (*TaskInfo, error) {
|
||||
if strings.TrimSpace(task.Type()) == "" {
|
||||
return nil, fmt.Errorf("task typename cannot be empty")
|
||||
}
|
||||
c.mu.Lock()
|
||||
if defaults, ok := c.opts[task.Type()]; ok {
|
||||
opts = append(defaults, opts...)
|
||||
|
@@ -287,13 +287,13 @@ func TestClientEnqueue(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "Queue option should be case-insensitive",
|
||||
desc: "Queue option should be case sensitive",
|
||||
task: task,
|
||||
opts: []Option{
|
||||
Queue("HIGH"),
|
||||
Queue("MyQueue"),
|
||||
},
|
||||
wantInfo: &TaskInfo{
|
||||
Queue: "high",
|
||||
Queue: "MyQueue",
|
||||
Type: task.Type(),
|
||||
Payload: task.Payload(),
|
||||
State: TaskStatePending,
|
||||
@@ -306,12 +306,12 @@ func TestClientEnqueue(t *testing.T) {
|
||||
NextProcessAt: now,
|
||||
},
|
||||
wantPending: map[string][]*base.TaskMessage{
|
||||
"high": {
|
||||
"MyQueue": {
|
||||
{
|
||||
Type: task.Type(),
|
||||
Payload: task.Payload(),
|
||||
Retry: defaultMaxRetry,
|
||||
Queue: "high",
|
||||
Queue: "MyQueue",
|
||||
Timeout: int64(defaultTimeout.Seconds()),
|
||||
Deadline: noDeadline.Unix(),
|
||||
},
|
||||
@@ -585,6 +585,16 @@ func TestClientEnqueueError(t *testing.T) {
|
||||
Queue(""),
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "With empty task typename",
|
||||
task: NewTask("", h.JSON(map[string]interface{}{})),
|
||||
opts: []Option{},
|
||||
},
|
||||
{
|
||||
desc: "With blank task typename",
|
||||
task: NewTask(" ", h.JSON(map[string]interface{}{})),
|
||||
opts: []Option{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
|
@@ -50,6 +50,7 @@ type QueueInfo struct {
|
||||
Queue string
|
||||
|
||||
// Total number of bytes that the queue and its tasks require to be stored in redis.
|
||||
// It is an approximate memory usage value in bytes since the value is computed by sampling.
|
||||
MemoryUsage int64
|
||||
|
||||
// Size is the total number of tasks in the queue.
|
||||
|
@@ -10,6 +10,7 @@ import (
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -22,7 +23,7 @@ import (
|
||||
)
|
||||
|
||||
// Version of asynq library and CLI.
|
||||
const Version = "0.18.0"
|
||||
const Version = "0.18.3"
|
||||
|
||||
// DefaultQueueName is the queue name used if none are specified by user.
|
||||
const DefaultQueueName = "default"
|
||||
@@ -85,7 +86,7 @@ func TaskStateFromString(s string) (TaskState, error) {
|
||||
// ValidateQueueName validates a given qname to be used as a queue name.
|
||||
// Returns nil if valid, otherwise returns non-nil error.
|
||||
func ValidateQueueName(qname string) error {
|
||||
if len(qname) == 0 {
|
||||
if len(strings.TrimSpace(qname)) == 0 {
|
||||
return fmt.Errorf("queue name must contain one or more characters")
|
||||
}
|
||||
return nil
|
||||
|
@@ -26,7 +26,8 @@ type Stats struct {
|
||||
// Name of the queue (e.g. "default", "critical").
|
||||
Queue string
|
||||
// MemoryUsage is the total number of bytes the queue and its tasks require
|
||||
// to be stored in redis.
|
||||
// to be stored in redis. It is an approximate memory usage value in bytes
|
||||
// since the value is computed by sampling.
|
||||
MemoryUsage int64
|
||||
// Paused indicates whether the queue is paused.
|
||||
// If true, tasks in the queue should not be processed.
|
||||
@@ -172,31 +173,82 @@ func (r *RDB) CurrentStats(qname string) (*Stats, error) {
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
// Computes memory usage for the given queue by sampling tasks
|
||||
// from each redis list/zset. Returns approximate memory usage value
|
||||
// in bytes.
|
||||
//
|
||||
// KEYS[1] -> asynq:{qname}:active
|
||||
// KEYS[2] -> asynq:{qname}:pending
|
||||
// KEYS[3] -> asynq:{qname}:scheduled
|
||||
// KEYS[4] -> asynq:{qname}:retry
|
||||
// KEYS[5] -> asynq:{qname}:archived
|
||||
//
|
||||
// ARGV[1] -> asynq:{qname}:t:
|
||||
// ARGV[2] -> sample_size (e.g 20)
|
||||
var memoryUsageCmd = redis.NewScript(`
|
||||
local sample_size = tonumber(ARGV[2])
|
||||
if sample_size <= 0 then
|
||||
return redis.error_reply("sample size must be a positive number")
|
||||
end
|
||||
local memusg = 0
|
||||
for i=1,2 do
|
||||
local ids = redis.call("LRANGE", KEYS[i], 0, sample_size - 1)
|
||||
local sample_total = 0
|
||||
if (table.getn(ids) > 0) then
|
||||
for _, id in ipairs(ids) do
|
||||
local bytes = redis.call("MEMORY", "USAGE", ARGV[1] .. id)
|
||||
sample_total = sample_total + bytes
|
||||
end
|
||||
local n = redis.call("LLEN", KEYS[i])
|
||||
local avg = sample_total / table.getn(ids)
|
||||
memusg = memusg + (avg * n)
|
||||
end
|
||||
local m = redis.call("MEMORY", "USAGE", KEYS[i])
|
||||
if (m) then
|
||||
memusg = memusg + m
|
||||
end
|
||||
end
|
||||
for i=3,5 do
|
||||
local ids = redis.call("ZRANGE", KEYS[i], 0, sample_size - 1)
|
||||
local sample_total = 0
|
||||
if (table.getn(ids) > 0) then
|
||||
for _, id in ipairs(ids) do
|
||||
local bytes = redis.call("MEMORY", "USAGE", ARGV[1] .. id)
|
||||
sample_total = sample_total + bytes
|
||||
end
|
||||
local n = redis.call("ZCARD", KEYS[i])
|
||||
local avg = sample_total / table.getn(ids)
|
||||
memusg = memusg + (avg * n)
|
||||
end
|
||||
local m = redis.call("MEMORY", "USAGE", KEYS[i])
|
||||
if (m) then
|
||||
memusg = memusg + m
|
||||
end
|
||||
end
|
||||
return memusg
|
||||
`)
|
||||
|
||||
func (r *RDB) memoryUsage(qname string) (int64, error) {
|
||||
var op errors.Op = "rdb.memoryUsage"
|
||||
var (
|
||||
keys []string
|
||||
data []string
|
||||
cursor uint64
|
||||
err error
|
||||
)
|
||||
for {
|
||||
data, cursor, err = r.client.Scan(cursor, fmt.Sprintf("asynq:{%s}*", qname), 100).Result()
|
||||
if err != nil {
|
||||
return 0, errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "scan", Err: err})
|
||||
}
|
||||
keys = append(keys, data...)
|
||||
if cursor == 0 {
|
||||
break
|
||||
}
|
||||
const sampleSize = 20
|
||||
keys := []string{
|
||||
base.ActiveKey(qname),
|
||||
base.PendingKey(qname),
|
||||
base.ScheduledKey(qname),
|
||||
base.RetryKey(qname),
|
||||
base.ArchivedKey(qname),
|
||||
}
|
||||
var usg int64
|
||||
for _, k := range keys {
|
||||
n, err := r.client.MemoryUsage(k).Result()
|
||||
if err != nil {
|
||||
return 0, errors.E(op, errors.Unknown, &errors.RedisCommandError{Command: "memory usage", Err: err})
|
||||
}
|
||||
usg += n
|
||||
argv := []interface{}{
|
||||
base.TaskKeyPrefix(qname),
|
||||
sampleSize,
|
||||
}
|
||||
res, err := memoryUsageCmd.Run(r.client, keys, argv...).Result()
|
||||
if err != nil {
|
||||
return 0, errors.E(op, errors.Unknown, fmt.Sprintf("redis eval error: %v", err))
|
||||
}
|
||||
usg, err := cast.ToInt64E(res)
|
||||
if err != nil {
|
||||
return 0, errors.E(op, errors.Internal, fmt.Sprintf("could not cast script return value to int64"))
|
||||
}
|
||||
return usg, nil
|
||||
}
|
||||
|
37
recoverer.go
37
recoverer.go
@@ -57,6 +57,7 @@ func (r *recoverer) start(wg *sync.WaitGroup) {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
r.recover()
|
||||
timer := time.NewTimer(r.interval)
|
||||
for {
|
||||
select {
|
||||
@@ -65,27 +66,31 @@ func (r *recoverer) start(wg *sync.WaitGroup) {
|
||||
timer.Stop()
|
||||
return
|
||||
case <-timer.C:
|
||||
// Get all tasks which have expired 30 seconds ago or earlier.
|
||||
deadline := time.Now().Add(-30 * time.Second)
|
||||
msgs, err := r.broker.ListDeadlineExceeded(deadline, r.queues...)
|
||||
if err != nil {
|
||||
r.logger.Warn("recoverer: could not list deadline exceeded tasks")
|
||||
continue
|
||||
}
|
||||
const errMsg = "deadline exceeded" // TODO: better error message
|
||||
for _, msg := range msgs {
|
||||
if msg.Retried >= msg.Retry {
|
||||
r.archive(msg, errMsg)
|
||||
} else {
|
||||
r.retry(msg, errMsg)
|
||||
}
|
||||
}
|
||||
|
||||
r.recover()
|
||||
timer.Reset(r.interval)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (r *recoverer) recover() {
|
||||
// Get all tasks which have expired 30 seconds ago or earlier.
|
||||
deadline := time.Now().Add(-30 * time.Second)
|
||||
msgs, err := r.broker.ListDeadlineExceeded(deadline, r.queues...)
|
||||
if err != nil {
|
||||
r.logger.Warn("recoverer: could not list deadline exceeded tasks")
|
||||
return
|
||||
}
|
||||
const errMsg = "deadline exceeded"
|
||||
for _, msg := range msgs {
|
||||
if msg.Retried >= msg.Retry {
|
||||
r.archive(msg, errMsg)
|
||||
} else {
|
||||
r.retry(msg, errMsg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *recoverer) retry(msg *base.TaskMessage, errMsg string) {
|
||||
delay := r.retryDelayFunc(msg.Retried, fmt.Errorf(errMsg), NewTask(msg.Type, msg.Payload))
|
||||
retryAt := time.Now().Add(delay)
|
||||
|
@@ -98,7 +98,7 @@ func (mux *ServeMux) Handle(pattern string, handler Handler) {
|
||||
mux.mu.Lock()
|
||||
defer mux.mu.Unlock()
|
||||
|
||||
if pattern == "" {
|
||||
if strings.TrimSpace(pattern) == "" {
|
||||
panic("asynq: invalid pattern")
|
||||
}
|
||||
if handler == nil {
|
||||
|
Reference in New Issue
Block a user