mirror of
https://github.com/hibiken/asynq.git
synced 2024-12-26 07:42:17 +08:00
Merge pull request #2 from hibiken/feature/lua
Use Lua script to make multiple redis commands atomic
This commit is contained in:
commit
d35d345e2d
@ -37,7 +37,7 @@ func (c *Client) Process(task *Task, executeAt time.Time) error {
|
||||
// enqueue pushes a given task to the specified queue.
|
||||
func (c *Client) enqueue(msg *taskMessage, executeAt time.Time) error {
|
||||
if time.Now().After(executeAt) {
|
||||
return c.rdb.push(msg)
|
||||
return c.rdb.enqueue(msg)
|
||||
}
|
||||
return c.rdb.zadd(scheduled, float64(executeAt.Unix()), msg)
|
||||
}
|
||||
|
20
poller.go
20
poller.go
@ -3,10 +3,7 @@ package asynq
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/go-redis/redis/v7"
|
||||
)
|
||||
|
||||
type poller struct {
|
||||
@ -56,21 +53,8 @@ func (p *poller) start() {
|
||||
|
||||
func (p *poller) exec() {
|
||||
for _, zset := range p.zsets {
|
||||
// Get next items in the queue with scores (time to execute) <= now.
|
||||
now := time.Now().Unix()
|
||||
msgs, err := p.rdb.zRangeByScore(zset, &redis.ZRangeBy{Min: "-inf", Max: strconv.Itoa(int(now))})
|
||||
if err != nil {
|
||||
log.Printf("radis command ZRANGEBYSCORE failed: %v\n", err)
|
||||
continue
|
||||
}
|
||||
fmt.Printf("[DEBUG] got %d tasks from %q\n", len(msgs), zset)
|
||||
|
||||
for _, m := range msgs {
|
||||
// TODO(hibiken): Make this move operation atomic.
|
||||
if err := p.rdb.move(zset, m); err != nil {
|
||||
log.Printf("could not move task %+v to queue %q: %v", m, m.Queue, err)
|
||||
continue
|
||||
}
|
||||
if err := p.rdb.forward(zset); err != nil {
|
||||
log.Printf("[ERROR] could not forward scheduled tasks from %q: %v", zset, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
103
rdb.go
103
rdb.go
@ -4,7 +4,6 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
@ -28,7 +27,7 @@ var (
|
||||
errDeserializeTask = errors.New("could not decode task message from json")
|
||||
)
|
||||
|
||||
// rdb encapsulates the interaction with redis server.
|
||||
// rdb encapsulates the interactions with redis server.
|
||||
type rdb struct {
|
||||
client *redis.Client
|
||||
}
|
||||
@ -37,28 +36,28 @@ func newRDB(client *redis.Client) *rdb {
|
||||
return &rdb{client}
|
||||
}
|
||||
|
||||
// push enqueues the task to queue.
|
||||
func (r *rdb) push(msg *taskMessage) error {
|
||||
// enqueue inserts the given task to the end of the queue.
|
||||
// It also adds the queue name to the "all-queues" list.
|
||||
func (r *rdb) enqueue(msg *taskMessage) error {
|
||||
bytes, err := json.Marshal(msg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not encode task into JSON: %v", err)
|
||||
}
|
||||
qname := queuePrefix + msg.Queue
|
||||
err = r.client.SAdd(allQueues, qname).Err()
|
||||
pipe := r.client.Pipeline()
|
||||
pipe.SAdd(allQueues, qname)
|
||||
pipe.LPush(qname, string(bytes))
|
||||
_, err = pipe.Exec()
|
||||
if err != nil {
|
||||
return fmt.Errorf("command SADD %q %q failed: %v",
|
||||
allQueues, qname, err)
|
||||
}
|
||||
err = r.client.LPush(qname, string(bytes)).Err()
|
||||
if err != nil {
|
||||
return fmt.Errorf("command RPUSH %q %q failed: %v",
|
||||
qname, string(bytes), err)
|
||||
return fmt.Errorf("could not enqueue task %+v to %q: %v",
|
||||
msg, qname, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// dequeue blocks until there is a taskMessage available to be processed,
|
||||
// once available, it adds the task to "in progress" list and returns the task.
|
||||
// dequeue blocks until there is a task available to be processed,
|
||||
// once a task is available, it adds the task to "in progress" list
|
||||
// and returns the task.
|
||||
func (r *rdb) dequeue(qname string, timeout time.Duration) (*taskMessage, error) {
|
||||
data, err := r.client.BRPopLPush(qname, inProgress, timeout).Result()
|
||||
if err != nil {
|
||||
@ -103,45 +102,6 @@ func (r *rdb) zadd(zset string, zscore float64, msg *taskMessage) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *rdb) zRangeByScore(key string, opt *redis.ZRangeBy) ([]*taskMessage, error) {
|
||||
jobs, err := r.client.ZRangeByScore(key, opt).Result()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("command ZRANGEBYSCORE %s %v failed: %v", key, opt, err)
|
||||
}
|
||||
var msgs []*taskMessage
|
||||
for _, j := range jobs {
|
||||
fmt.Printf("[debug] j = %v\n", j)
|
||||
var msg taskMessage
|
||||
err = json.Unmarshal([]byte(j), &msg)
|
||||
if err != nil {
|
||||
log.Printf("[WARNING] could not unmarshal task data %s: %v\n", j, err)
|
||||
continue
|
||||
}
|
||||
msgs = append(msgs, &msg)
|
||||
}
|
||||
return msgs, nil
|
||||
}
|
||||
|
||||
// move moves taskMessage from zfrom to the specified queue.
|
||||
func (r *rdb) move(from string, msg *taskMessage) error {
|
||||
// TODO(hibiken): Lua script, make this atomic.
|
||||
bytes, err := json.Marshal(msg)
|
||||
if err != nil {
|
||||
return errSerializeTask
|
||||
}
|
||||
if r.client.ZRem(from, string(bytes)).Val() > 0 {
|
||||
err = r.push(msg)
|
||||
if err != nil {
|
||||
log.Printf("[SERVERE ERROR] could not push task to queue %q: %v\n",
|
||||
msg.Queue, err)
|
||||
// TODO(hibiken): Handle this error properly.
|
||||
// Add back to zfrom?
|
||||
return fmt.Errorf("could not push task %v from %q: %v", msg, msg.Queue, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
const maxDeadTask = 100
|
||||
const deadExpirationInDays = 90
|
||||
|
||||
@ -175,13 +135,32 @@ func (r *rdb) listQueues() []string {
|
||||
|
||||
// moveAll moves all tasks from src list to dst list.
|
||||
func (r *rdb) moveAll(src, dst string) error {
|
||||
// TODO(hibiken): Lua script
|
||||
txf := func(tx *redis.Tx) error {
|
||||
length := tx.LLen(src).Val()
|
||||
for i := 0; i < int(length); i++ {
|
||||
tx.RPopLPush(src, dst)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return r.client.Watch(txf, src)
|
||||
script := redis.NewScript(`
|
||||
local len = redis.call("LLEN", KEYS[1])
|
||||
for i = len, 1, -1 do
|
||||
redis.call("RPOPLPUSH", KEYS[1], KEYS[2])
|
||||
end
|
||||
return len
|
||||
`)
|
||||
_, err := script.Run(r.client, []string{src, dst}).Result()
|
||||
return err
|
||||
}
|
||||
|
||||
// forward moves all tasks with a score less than the current unix time
|
||||
// from the given zset to the default queue.
|
||||
// TODO(hibiken): Find a better method name that reflects what this does.
|
||||
func (r *rdb) forward(from string) error {
|
||||
script := redis.NewScript(`
|
||||
local msgs = redis.call("ZRANGEBYSCORE", KEYS[1], "-inf", ARGV[1])
|
||||
for _, msg in ipairs(msgs) do
|
||||
redis.call("ZREM", KEYS[1], msg)
|
||||
redis.call("SADD", KEYS[2], KEYS[3])
|
||||
redis.call("LPUSH", KEYS[3], msg)
|
||||
end
|
||||
return msgs
|
||||
`)
|
||||
now := float64(time.Now().Unix())
|
||||
res, err := script.Run(r.client, []string{from, allQueues, defaultQueue}, now).Result()
|
||||
fmt.Printf("[DEBUG] got %d tasks from %q\n", len(res.([]interface{})), from)
|
||||
return err
|
||||
}
|
||||
|
291
rdb_test.go
291
rdb_test.go
@ -3,6 +3,7 @@ package asynq
|
||||
import (
|
||||
"encoding/json"
|
||||
"math/rand"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -17,9 +18,16 @@ func init() {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
}
|
||||
|
||||
var sortStrOpt = cmp.Transformer("SortStr", func(in []string) []string {
|
||||
out := append([]string(nil), in...) // Copy input to avoid mutating it
|
||||
sort.Strings(out)
|
||||
return out
|
||||
})
|
||||
|
||||
// setup connects to a redis database and flush all keys
|
||||
// before returning an instance of rdb.
|
||||
func setup() *rdb {
|
||||
func setup(t *testing.T) *rdb {
|
||||
t.Helper()
|
||||
client = redis.NewClient(&redis.Options{
|
||||
Addr: "localhost:6379",
|
||||
DB: 15, // use database 15 to separate from other applications
|
||||
@ -31,98 +39,231 @@ func setup() *rdb {
|
||||
return newRDB(client)
|
||||
}
|
||||
|
||||
func randomTask(taskType, qname string) *taskMessage {
|
||||
func randomTask(taskType, qname string, payload map[string]interface{}) *taskMessage {
|
||||
return &taskMessage{
|
||||
ID: uuid.New(),
|
||||
Type: taskType,
|
||||
Queue: qname,
|
||||
Retry: rand.Intn(100),
|
||||
ID: uuid.New(),
|
||||
Type: taskType,
|
||||
Queue: qname,
|
||||
Retry: rand.Intn(100),
|
||||
Payload: make(map[string]interface{}),
|
||||
}
|
||||
}
|
||||
|
||||
func TestPush(t *testing.T) {
|
||||
r := setup()
|
||||
msg := randomTask("send_email", "default")
|
||||
|
||||
err := r.push(msg)
|
||||
if err != nil {
|
||||
t.Fatalf("could not push message to queue: %v", err)
|
||||
func TestEnqueue(t *testing.T) {
|
||||
r := setup(t)
|
||||
tests := []struct {
|
||||
msg *taskMessage
|
||||
}{
|
||||
{msg: randomTask("send_email", "default",
|
||||
map[string]interface{}{"to": "exampleuser@gmail.com", "from": "noreply@example.com"})},
|
||||
{msg: randomTask("generate_csv", "default",
|
||||
map[string]interface{}{})},
|
||||
{msg: randomTask("sync", "default", nil)},
|
||||
}
|
||||
|
||||
res := client.LRange("asynq:queues:default", 0, -1).Val()
|
||||
if len(res) != 1 {
|
||||
t.Fatalf("len(res) = %d, want %d", len(res), 1)
|
||||
}
|
||||
bytes, err := json.Marshal(msg)
|
||||
if err != nil {
|
||||
t.Fatalf("json.Marshal(msg) failed: %v", err)
|
||||
}
|
||||
if res[0] != string(bytes) {
|
||||
t.Fatalf("res[0] = %s, want %s", res[0], string(bytes))
|
||||
for _, tc := range tests {
|
||||
// clean up db before each test case.
|
||||
if err := client.FlushDB().Err(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err := r.enqueue(tc.msg)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
res := client.LRange(defaultQueue, 0, -1).Val()
|
||||
if len(res) != 1 {
|
||||
t.Errorf("LIST %q has length %d, want 1", defaultQueue, len(res))
|
||||
continue
|
||||
}
|
||||
if !client.SIsMember(allQueues, defaultQueue).Val() {
|
||||
t.Errorf("SISMEMBER %q %q = false, want true", allQueues, defaultQueue)
|
||||
}
|
||||
var persisted taskMessage
|
||||
if err := json.Unmarshal([]byte(res[0]), &persisted); err != nil {
|
||||
t.Error(err)
|
||||
continue
|
||||
}
|
||||
if diff := cmp.Diff(*tc.msg, persisted); diff != "" {
|
||||
t.Errorf("persisted data differed from the original input (-want, +got)\n%s", diff)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDequeueImmediateReturn(t *testing.T) {
|
||||
r := setup()
|
||||
msg := randomTask("export_csv", "csv")
|
||||
r.push(msg)
|
||||
|
||||
res, err := r.dequeue("asynq:queues:csv", time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("r.bpop() failed: %v", err)
|
||||
func TestDequeue(t *testing.T) {
|
||||
r := setup(t)
|
||||
t1 := randomTask("send_email", "default", map[string]interface{}{"subject": "hello!"})
|
||||
tests := []struct {
|
||||
queued []*taskMessage
|
||||
want *taskMessage
|
||||
err error
|
||||
inProgress int64 // length of "in-progress" tasks after dequeue
|
||||
}{
|
||||
{queued: []*taskMessage{t1}, want: t1, err: nil, inProgress: 1},
|
||||
{queued: []*taskMessage{}, want: nil, err: errQueuePopTimeout, inProgress: 0},
|
||||
}
|
||||
|
||||
if !cmp.Equal(res, msg) {
|
||||
t.Errorf("cmp.Equal(res, msg) = %t, want %t", false, true)
|
||||
}
|
||||
jobs := client.LRange(inProgress, 0, -1).Val()
|
||||
if len(jobs) != 1 {
|
||||
t.Fatalf("len(jobs) = %d, want %d", len(jobs), 1)
|
||||
}
|
||||
var tm taskMessage
|
||||
if err := json.Unmarshal([]byte(jobs[0]), &tm); err != nil {
|
||||
t.Fatalf("json.Marshal() failed: %v", err)
|
||||
}
|
||||
if diff := cmp.Diff(res, &tm); diff != "" {
|
||||
t.Errorf("cmp.Diff(res, tm) = %s", diff)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDequeueTimeout(t *testing.T) {
|
||||
r := setup()
|
||||
|
||||
_, err := r.dequeue("asynq:queues:default", time.Second)
|
||||
if err != errQueuePopTimeout {
|
||||
t.Errorf("err = %v, want %v", err, errQueuePopTimeout)
|
||||
for _, tc := range tests {
|
||||
// clean up db before each test case.
|
||||
if err := client.FlushDB().Err(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for _, m := range tc.queued {
|
||||
r.enqueue(m)
|
||||
}
|
||||
got, err := r.dequeue(defaultQueue, time.Second)
|
||||
if !cmp.Equal(got, tc.want) || err != tc.err {
|
||||
t.Errorf("(*rdb).dequeue(%q, time.Second) = %v, %v; want %v, %v",
|
||||
defaultQueue, got, err, tc.want, tc.err)
|
||||
continue
|
||||
}
|
||||
if l := client.LLen(inProgress).Val(); l != tc.inProgress {
|
||||
t.Errorf("LIST %q has length %d, want %d", inProgress, l, tc.inProgress)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMoveAll(t *testing.T) {
|
||||
r := setup()
|
||||
seed := []*taskMessage{
|
||||
randomTask("send_email", "default"),
|
||||
randomTask("export_csv", "csv"),
|
||||
randomTask("sync_stuff", "sync"),
|
||||
}
|
||||
for _, task := range seed {
|
||||
bytes, err := json.Marshal(task)
|
||||
if err != nil {
|
||||
t.Errorf("json.Marhsal() failed: %v", err)
|
||||
}
|
||||
if err := client.LPush(inProgress, string(bytes)).Err(); err != nil {
|
||||
t.Errorf("LPUSH %q %s failed: %v", inProgress, string(bytes), err)
|
||||
}
|
||||
}
|
||||
|
||||
err := r.moveAll(inProgress, defaultQueue)
|
||||
r := setup(t)
|
||||
t1 := randomTask("send_email", "default", nil)
|
||||
t2 := randomTask("export_csv", "csv", nil)
|
||||
t3 := randomTask("sync_stuff", "sync", nil)
|
||||
json1, err := json.Marshal(t1)
|
||||
if err != nil {
|
||||
t.Errorf("moveAll failed: %v", err)
|
||||
t.Fatal(err)
|
||||
}
|
||||
json2, err := json.Marshal(t2)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
json3, err := json.Marshal(t3)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if l := client.LLen(inProgress).Val(); l != 0 {
|
||||
t.Errorf("LLEN %q = %d, want 0", inProgress, l)
|
||||
tests := []struct {
|
||||
beforeSrc []string
|
||||
beforeDst []string
|
||||
afterSrc []string
|
||||
afterDst []string
|
||||
}{
|
||||
{
|
||||
beforeSrc: []string{string(json1), string(json2), string(json3)},
|
||||
beforeDst: []string{},
|
||||
afterSrc: []string{},
|
||||
afterDst: []string{string(json1), string(json2), string(json3)},
|
||||
},
|
||||
{
|
||||
beforeSrc: []string{},
|
||||
beforeDst: []string{string(json1), string(json2), string(json3)},
|
||||
afterSrc: []string{},
|
||||
afterDst: []string{string(json1), string(json2), string(json3)},
|
||||
},
|
||||
{
|
||||
beforeSrc: []string{string(json2), string(json3)},
|
||||
beforeDst: []string{string(json1)},
|
||||
afterSrc: []string{},
|
||||
afterDst: []string{string(json1), string(json2), string(json3)},
|
||||
},
|
||||
}
|
||||
if l := client.LLen(defaultQueue).Val(); int(l) != len(seed) {
|
||||
t.Errorf("LLEN %q = %d, want %d", defaultQueue, l, len(seed))
|
||||
|
||||
for _, tc := range tests {
|
||||
// clean up db before each test case.
|
||||
if err := client.FlushDB().Err(); err != nil {
|
||||
t.Error(err)
|
||||
continue
|
||||
}
|
||||
// seed src list.
|
||||
for _, msg := range tc.beforeSrc {
|
||||
client.LPush(inProgress, msg)
|
||||
}
|
||||
// seed dst list.
|
||||
for _, msg := range tc.beforeDst {
|
||||
client.LPush(defaultQueue, msg)
|
||||
}
|
||||
|
||||
if err := r.moveAll(inProgress, defaultQueue); err != nil {
|
||||
t.Errorf("(*rdb).moveAll(%q, %q) = %v, want nil", inProgress, defaultQueue, err)
|
||||
continue
|
||||
}
|
||||
|
||||
gotSrc := client.LRange(inProgress, 0, -1).Val()
|
||||
if diff := cmp.Diff(tc.afterSrc, gotSrc, sortStrOpt); diff != "" {
|
||||
t.Errorf("mismatch found in %q (-want, +got)\n%s", inProgress, diff)
|
||||
}
|
||||
gotDst := client.LRange(defaultQueue, 0, -1).Val()
|
||||
if diff := cmp.Diff(tc.afterDst, gotDst, sortStrOpt); diff != "" {
|
||||
t.Errorf("mismatch found in %q (-want, +got)\n%s", defaultQueue, diff)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestForward(t *testing.T) {
|
||||
r := setup(t)
|
||||
t1 := randomTask("send_email", defaultQueue, nil)
|
||||
t2 := randomTask("generate_csv", defaultQueue, nil)
|
||||
json1, err := json.Marshal(t1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
json2, err := json.Marshal(t2)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
secondAgo := time.Now().Add(-time.Second)
|
||||
hourFromNow := time.Now().Add(time.Hour)
|
||||
|
||||
tests := []struct {
|
||||
tasks []*redis.Z // scheduled tasks with timestamp as a score
|
||||
wantQueued []string // queue after calling forward
|
||||
wantScheduled []string // scheduled queue after calling forward
|
||||
}{
|
||||
{
|
||||
tasks: []*redis.Z{
|
||||
&redis.Z{Member: string(json1), Score: float64(secondAgo.Unix())},
|
||||
&redis.Z{Member: string(json2), Score: float64(secondAgo.Unix())}},
|
||||
wantQueued: []string{string(json1), string(json2)},
|
||||
wantScheduled: []string{},
|
||||
},
|
||||
{
|
||||
tasks: []*redis.Z{
|
||||
&redis.Z{Member: string(json1), Score: float64(hourFromNow.Unix())},
|
||||
&redis.Z{Member: string(json2), Score: float64(secondAgo.Unix())}},
|
||||
wantQueued: []string{string(json2)},
|
||||
wantScheduled: []string{string(json1)},
|
||||
},
|
||||
{
|
||||
tasks: []*redis.Z{
|
||||
&redis.Z{Member: string(json1), Score: float64(hourFromNow.Unix())},
|
||||
&redis.Z{Member: string(json2), Score: float64(hourFromNow.Unix())}},
|
||||
wantQueued: []string{},
|
||||
wantScheduled: []string{string(json1), string(json2)},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
// clean up db before each test case.
|
||||
if err := client.FlushDB().Err(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := client.ZAdd(scheduled, tc.tasks...).Err(); err != nil {
|
||||
t.Error(err)
|
||||
continue
|
||||
}
|
||||
|
||||
err = r.forward(scheduled)
|
||||
if err != nil {
|
||||
t.Errorf("(*rdb).forward(%q) = %v, want nil", scheduled, err)
|
||||
continue
|
||||
}
|
||||
gotQueued := client.LRange(defaultQueue, 0, -1).Val()
|
||||
if diff := cmp.Diff(tc.wantQueued, gotQueued, sortStrOpt); diff != "" {
|
||||
t.Errorf("%q has %d tasks, want %d tasks; (-want, +got)\n%s", defaultQueue, len(gotQueued), len(tc.wantQueued), diff)
|
||||
continue
|
||||
}
|
||||
gotScheduled := client.ZRangeByScore(scheduled, &redis.ZRangeBy{Min: "-inf", Max: "+inf"}).Val()
|
||||
if diff := cmp.Diff(tc.wantScheduled, gotScheduled, sortStrOpt); diff != "" {
|
||||
t.Errorf("%q has %d tasks, want %d tasks; (-want, +got)\n%s", scheduled, len(gotScheduled), len(tc.wantScheduled), diff)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user