mirror of
https://github.com/hibiken/asynq.git
synced 2024-12-25 07:12:17 +08:00
Add benchmark tests for rdb
This commit is contained in:
parent
b7c0c5d3aa
commit
4376dc1e9d
@ -53,169 +53,169 @@ func NewTaskMessage(taskType string, payload map[string]interface{}) *base.TaskM
|
|||||||
|
|
||||||
// MustMarshal marshals given task message and returns a json string.
|
// MustMarshal marshals given task message and returns a json string.
|
||||||
// Calling test will fail if marshaling errors out.
|
// Calling test will fail if marshaling errors out.
|
||||||
func MustMarshal(t *testing.T, msg *base.TaskMessage) string {
|
func MustMarshal(tb testing.TB, msg *base.TaskMessage) string {
|
||||||
t.Helper()
|
tb.Helper()
|
||||||
data, err := json.Marshal(msg)
|
data, err := json.Marshal(msg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
tb.Fatal(err)
|
||||||
}
|
}
|
||||||
return string(data)
|
return string(data)
|
||||||
}
|
}
|
||||||
|
|
||||||
// MustUnmarshal unmarshals given string into task message struct.
|
// MustUnmarshal unmarshals given string into task message struct.
|
||||||
// Calling test will fail if unmarshaling errors out.
|
// Calling test will fail if unmarshaling errors out.
|
||||||
func MustUnmarshal(t *testing.T, data string) *base.TaskMessage {
|
func MustUnmarshal(tb testing.TB, data string) *base.TaskMessage {
|
||||||
t.Helper()
|
tb.Helper()
|
||||||
var msg base.TaskMessage
|
var msg base.TaskMessage
|
||||||
err := json.Unmarshal([]byte(data), &msg)
|
err := json.Unmarshal([]byte(data), &msg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
tb.Fatal(err)
|
||||||
}
|
}
|
||||||
return &msg
|
return &msg
|
||||||
}
|
}
|
||||||
|
|
||||||
// MustMarshalSlice marshals a slice of task messages and return a slice of
|
// MustMarshalSlice marshals a slice of task messages and return a slice of
|
||||||
// json strings. Calling test will fail if marshaling errors out.
|
// json strings. Calling test will fail if marshaling errors out.
|
||||||
func MustMarshalSlice(t *testing.T, msgs []*base.TaskMessage) []string {
|
func MustMarshalSlice(tb testing.TB, msgs []*base.TaskMessage) []string {
|
||||||
t.Helper()
|
tb.Helper()
|
||||||
var data []string
|
var data []string
|
||||||
for _, m := range msgs {
|
for _, m := range msgs {
|
||||||
data = append(data, MustMarshal(t, m))
|
data = append(data, MustMarshal(tb, m))
|
||||||
}
|
}
|
||||||
return data
|
return data
|
||||||
}
|
}
|
||||||
|
|
||||||
// MustUnmarshalSlice unmarshals a slice of strings into a slice of task message structs.
|
// MustUnmarshalSlice unmarshals a slice of strings into a slice of task message structs.
|
||||||
// Calling test will fail if marshaling errors out.
|
// Calling test will fail if marshaling errors out.
|
||||||
func MustUnmarshalSlice(t *testing.T, data []string) []*base.TaskMessage {
|
func MustUnmarshalSlice(tb testing.TB, data []string) []*base.TaskMessage {
|
||||||
t.Helper()
|
tb.Helper()
|
||||||
var msgs []*base.TaskMessage
|
var msgs []*base.TaskMessage
|
||||||
for _, s := range data {
|
for _, s := range data {
|
||||||
msgs = append(msgs, MustUnmarshal(t, s))
|
msgs = append(msgs, MustUnmarshal(tb, s))
|
||||||
}
|
}
|
||||||
return msgs
|
return msgs
|
||||||
}
|
}
|
||||||
|
|
||||||
// FlushDB deletes all the keys of the currently selected DB.
|
// FlushDB deletes all the keys of the currently selected DB.
|
||||||
func FlushDB(t *testing.T, r *redis.Client) {
|
func FlushDB(tb testing.TB, r *redis.Client) {
|
||||||
t.Helper()
|
tb.Helper()
|
||||||
if err := r.FlushDB().Err(); err != nil {
|
if err := r.FlushDB().Err(); err != nil {
|
||||||
t.Fatal(err)
|
tb.Fatal(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// SeedDefaultQueue initializes the default queue with the given messages.
|
// SeedDefaultQueue initializes the default queue with the given messages.
|
||||||
func SeedDefaultQueue(t *testing.T, r *redis.Client, msgs []*base.TaskMessage) {
|
func SeedDefaultQueue(tb testing.TB, r *redis.Client, msgs []*base.TaskMessage) {
|
||||||
t.Helper()
|
tb.Helper()
|
||||||
seedRedisList(t, r, base.DefaultQueue, msgs)
|
seedRedisList(tb, r, base.DefaultQueue, msgs)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SeedInProgressQueue initializes the in-progress queue with the given messages.
|
// SeedInProgressQueue initializes the in-progress queue with the given messages.
|
||||||
func SeedInProgressQueue(t *testing.T, r *redis.Client, msgs []*base.TaskMessage) {
|
func SeedInProgressQueue(tb testing.TB, r *redis.Client, msgs []*base.TaskMessage) {
|
||||||
t.Helper()
|
tb.Helper()
|
||||||
seedRedisList(t, r, base.InProgressQueue, msgs)
|
seedRedisList(tb, r, base.InProgressQueue, msgs)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SeedScheduledQueue initializes the scheduled queue with the given messages.
|
// SeedScheduledQueue initializes the scheduled queue with the given messages.
|
||||||
func SeedScheduledQueue(t *testing.T, r *redis.Client, entries []ZSetEntry) {
|
func SeedScheduledQueue(tb testing.TB, r *redis.Client, entries []ZSetEntry) {
|
||||||
t.Helper()
|
tb.Helper()
|
||||||
seedRedisZSet(t, r, base.ScheduledQueue, entries)
|
seedRedisZSet(tb, r, base.ScheduledQueue, entries)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SeedRetryQueue initializes the retry queue with the given messages.
|
// SeedRetryQueue initializes the retry queue with the given messages.
|
||||||
func SeedRetryQueue(t *testing.T, r *redis.Client, entries []ZSetEntry) {
|
func SeedRetryQueue(tb testing.TB, r *redis.Client, entries []ZSetEntry) {
|
||||||
t.Helper()
|
tb.Helper()
|
||||||
seedRedisZSet(t, r, base.RetryQueue, entries)
|
seedRedisZSet(tb, r, base.RetryQueue, entries)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SeedDeadQueue initializes the dead queue with the given messages.
|
// SeedDeadQueue initializes the dead queue with the given messages.
|
||||||
func SeedDeadQueue(t *testing.T, r *redis.Client, entries []ZSetEntry) {
|
func SeedDeadQueue(tb testing.TB, r *redis.Client, entries []ZSetEntry) {
|
||||||
t.Helper()
|
tb.Helper()
|
||||||
seedRedisZSet(t, r, base.DeadQueue, entries)
|
seedRedisZSet(tb, r, base.DeadQueue, entries)
|
||||||
}
|
}
|
||||||
|
|
||||||
func seedRedisList(t *testing.T, c *redis.Client, key string, msgs []*base.TaskMessage) {
|
func seedRedisList(tb testing.TB, c *redis.Client, key string, msgs []*base.TaskMessage) {
|
||||||
data := MustMarshalSlice(t, msgs)
|
data := MustMarshalSlice(tb, msgs)
|
||||||
for _, s := range data {
|
for _, s := range data {
|
||||||
if err := c.LPush(key, s).Err(); err != nil {
|
if err := c.LPush(key, s).Err(); err != nil {
|
||||||
t.Fatal(err)
|
tb.Fatal(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func seedRedisZSet(t *testing.T, c *redis.Client, key string, items []ZSetEntry) {
|
func seedRedisZSet(tb testing.TB, c *redis.Client, key string, items []ZSetEntry) {
|
||||||
for _, item := range items {
|
for _, item := range items {
|
||||||
z := &redis.Z{Member: MustMarshal(t, item.Msg), Score: float64(item.Score)}
|
z := &redis.Z{Member: MustMarshal(tb, item.Msg), Score: float64(item.Score)}
|
||||||
if err := c.ZAdd(key, z).Err(); err != nil {
|
if err := c.ZAdd(key, z).Err(); err != nil {
|
||||||
t.Fatal(err)
|
tb.Fatal(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetEnqueuedMessages returns all task messages in the default queue.
|
// GetEnqueuedMessages returns all task messages in the default queue.
|
||||||
func GetEnqueuedMessages(t *testing.T, r *redis.Client) []*base.TaskMessage {
|
func GetEnqueuedMessages(tb testing.TB, r *redis.Client) []*base.TaskMessage {
|
||||||
t.Helper()
|
tb.Helper()
|
||||||
return getListMessages(t, r, base.DefaultQueue)
|
return getListMessages(tb, r, base.DefaultQueue)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetInProgressMessages returns all task messages in the in-progress queue.
|
// GetInProgressMessages returns all task messages in the in-progress queue.
|
||||||
func GetInProgressMessages(t *testing.T, r *redis.Client) []*base.TaskMessage {
|
func GetInProgressMessages(tb testing.TB, r *redis.Client) []*base.TaskMessage {
|
||||||
t.Helper()
|
tb.Helper()
|
||||||
return getListMessages(t, r, base.InProgressQueue)
|
return getListMessages(tb, r, base.InProgressQueue)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetScheduledMessages returns all task messages in the scheduled queue.
|
// GetScheduledMessages returns all task messages in the scheduled queue.
|
||||||
func GetScheduledMessages(t *testing.T, r *redis.Client) []*base.TaskMessage {
|
func GetScheduledMessages(tb testing.TB, r *redis.Client) []*base.TaskMessage {
|
||||||
t.Helper()
|
tb.Helper()
|
||||||
return getZSetMessages(t, r, base.ScheduledQueue)
|
return getZSetMessages(tb, r, base.ScheduledQueue)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetRetryMessages returns all task messages in the retry queue.
|
// GetRetryMessages returns all task messages in the retry queue.
|
||||||
func GetRetryMessages(t *testing.T, r *redis.Client) []*base.TaskMessage {
|
func GetRetryMessages(tb testing.TB, r *redis.Client) []*base.TaskMessage {
|
||||||
t.Helper()
|
tb.Helper()
|
||||||
return getZSetMessages(t, r, base.RetryQueue)
|
return getZSetMessages(tb, r, base.RetryQueue)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetDeadMessages returns all task messages in the dead queue.
|
// GetDeadMessages returns all task messages in the dead queue.
|
||||||
func GetDeadMessages(t *testing.T, r *redis.Client) []*base.TaskMessage {
|
func GetDeadMessages(tb testing.TB, r *redis.Client) []*base.TaskMessage {
|
||||||
t.Helper()
|
tb.Helper()
|
||||||
return getZSetMessages(t, r, base.DeadQueue)
|
return getZSetMessages(tb, r, base.DeadQueue)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetScheduledEntries returns all task messages and its score in the scheduled queue.
|
// GetScheduledEntries returns all task messages and its score in the scheduled queue.
|
||||||
func GetScheduledEntries(t *testing.T, r *redis.Client) []ZSetEntry {
|
func GetScheduledEntries(tb testing.TB, r *redis.Client) []ZSetEntry {
|
||||||
t.Helper()
|
tb.Helper()
|
||||||
return getZSetEntries(t, r, base.ScheduledQueue)
|
return getZSetEntries(tb, r, base.ScheduledQueue)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetRetryEntries returns all task messages and its score in the retry queue.
|
// GetRetryEntries returns all task messages and its score in the retry queue.
|
||||||
func GetRetryEntries(t *testing.T, r *redis.Client) []ZSetEntry {
|
func GetRetryEntries(tb testing.TB, r *redis.Client) []ZSetEntry {
|
||||||
t.Helper()
|
tb.Helper()
|
||||||
return getZSetEntries(t, r, base.RetryQueue)
|
return getZSetEntries(tb, r, base.RetryQueue)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetDeadEntries returns all task messages and its score in the dead queue.
|
// GetDeadEntries returns all task messages and its score in the dead queue.
|
||||||
func GetDeadEntries(t *testing.T, r *redis.Client) []ZSetEntry {
|
func GetDeadEntries(tb testing.TB, r *redis.Client) []ZSetEntry {
|
||||||
t.Helper()
|
tb.Helper()
|
||||||
return getZSetEntries(t, r, base.DeadQueue)
|
return getZSetEntries(tb, r, base.DeadQueue)
|
||||||
}
|
}
|
||||||
|
|
||||||
func getListMessages(t *testing.T, r *redis.Client, list string) []*base.TaskMessage {
|
func getListMessages(tb testing.TB, r *redis.Client, list string) []*base.TaskMessage {
|
||||||
data := r.LRange(list, 0, -1).Val()
|
data := r.LRange(list, 0, -1).Val()
|
||||||
return MustUnmarshalSlice(t, data)
|
return MustUnmarshalSlice(tb, data)
|
||||||
}
|
}
|
||||||
|
|
||||||
func getZSetMessages(t *testing.T, r *redis.Client, zset string) []*base.TaskMessage {
|
func getZSetMessages(tb testing.TB, r *redis.Client, zset string) []*base.TaskMessage {
|
||||||
data := r.ZRange(zset, 0, -1).Val()
|
data := r.ZRange(zset, 0, -1).Val()
|
||||||
return MustUnmarshalSlice(t, data)
|
return MustUnmarshalSlice(tb, data)
|
||||||
}
|
}
|
||||||
|
|
||||||
func getZSetEntries(t *testing.T, r *redis.Client, zset string) []ZSetEntry {
|
func getZSetEntries(tb testing.TB, r *redis.Client, zset string) []ZSetEntry {
|
||||||
data := r.ZRangeWithScores(zset, 0, -1).Val()
|
data := r.ZRangeWithScores(zset, 0, -1).Val()
|
||||||
var entries []ZSetEntry
|
var entries []ZSetEntry
|
||||||
for _, z := range data {
|
for _, z := range data {
|
||||||
entries = append(entries, ZSetEntry{
|
entries = append(entries, ZSetEntry{
|
||||||
Msg: MustUnmarshal(t, z.Member.(string)),
|
Msg: MustUnmarshal(tb, z.Member.(string)),
|
||||||
Score: int64(z.Score),
|
Score: int64(z.Score),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
37
internal/rdb/benchmark_test.go
Normal file
37
internal/rdb/benchmark_test.go
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
package rdb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/go-redis/redis/v7"
|
||||||
|
h "github.com/hibiken/asynq/internal/asynqtest"
|
||||||
|
"github.com/hibiken/asynq/internal/base"
|
||||||
|
)
|
||||||
|
|
||||||
|
func BenchmarkDone(b *testing.B) {
|
||||||
|
r := redis.NewClient(&redis.Options{
|
||||||
|
Addr: "localhost:6379",
|
||||||
|
DB: 8,
|
||||||
|
})
|
||||||
|
h.FlushDB(b, r)
|
||||||
|
|
||||||
|
// populate in-progress queue with messages
|
||||||
|
var inProgress []*base.TaskMessage
|
||||||
|
for i := 0; i < 40; i++ {
|
||||||
|
inProgress = append(inProgress,
|
||||||
|
h.NewTaskMessage("send_email", map[string]interface{}{"subject": "hello", "recipient_id": 123}))
|
||||||
|
}
|
||||||
|
h.SeedInProgressQueue(b, r, inProgress)
|
||||||
|
|
||||||
|
rdb := NewRDB(r)
|
||||||
|
|
||||||
|
b.ResetTimer()
|
||||||
|
for n := 0; n < b.N; n++ {
|
||||||
|
b.StopTimer()
|
||||||
|
msg := h.NewTaskMessage("reindex", map[string]interface{}{"config": "path/to/config/file"})
|
||||||
|
r.LPush(base.InProgressQueue, h.MustMarshal(b, msg))
|
||||||
|
b.StartTimer()
|
||||||
|
|
||||||
|
rdb.Done(msg)
|
||||||
|
}
|
||||||
|
}
|
@ -74,8 +74,8 @@ func (r *RDB) Done(msg *base.TaskMessage) error {
|
|||||||
}
|
}
|
||||||
// Note: LREM count ZERO means "remove all elements equal to val"
|
// Note: LREM count ZERO means "remove all elements equal to val"
|
||||||
// Note: Script will try removing the message by exact match first,
|
// Note: Script will try removing the message by exact match first,
|
||||||
// if the task is muated and exact match is not found, it'll fallback
|
// if the task is mutated and exact match is not found, it'll fallback
|
||||||
// to linear scan of the list and find a match with ID.
|
// to finding a match with ID.
|
||||||
// KEYS[1] -> asynq:in_progress
|
// KEYS[1] -> asynq:in_progress
|
||||||
// KEYS[2] -> asynq:processed:<yyyy-mm-dd>
|
// KEYS[2] -> asynq:processed:<yyyy-mm-dd>
|
||||||
// ARGV[1] -> base.TaskMessage value
|
// ARGV[1] -> base.TaskMessage value
|
||||||
@ -154,8 +154,8 @@ func (r *RDB) Retry(msg *base.TaskMessage, processAt time.Time, errMsg string) e
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// Note: Script will try removing the message by exact match first,
|
// Note: Script will try removing the message by exact match first,
|
||||||
// if the task is muated and exact match is not found, it'll fallback
|
// if the task is mutated and exact match is not found, it'll fallback
|
||||||
// to linear scan of the list and find a match with ID.
|
// to finding a match with ID.
|
||||||
// KEYS[1] -> asynq:in_progress
|
// KEYS[1] -> asynq:in_progress
|
||||||
// KEYS[2] -> asynq:retry
|
// KEYS[2] -> asynq:retry
|
||||||
// KEYS[3] -> asynq:processed:<yyyy-mm-dd>
|
// KEYS[3] -> asynq:processed:<yyyy-mm-dd>
|
||||||
@ -222,8 +222,8 @@ func (r *RDB) Kill(msg *base.TaskMessage, errMsg string) error {
|
|||||||
failureKey := base.FailureKey(now)
|
failureKey := base.FailureKey(now)
|
||||||
expireAt := now.Add(statsTTL)
|
expireAt := now.Add(statsTTL)
|
||||||
// Note: Script will try removing the message by exact match first,
|
// Note: Script will try removing the message by exact match first,
|
||||||
// if the task is muated and exact match is not found, it'll fallback
|
// if the task is mutated and exact match is not found, it'll fallback
|
||||||
// to linear scan of the list and find a match with ID.
|
// to finding a match with ID.
|
||||||
// KEYS[1] -> asynq:in_progress
|
// KEYS[1] -> asynq:in_progress
|
||||||
// KEYS[2] -> asynq:dead
|
// KEYS[2] -> asynq:dead
|
||||||
// KEYS[3] -> asynq:processed:<yyyy-mm-dd>
|
// KEYS[3] -> asynq:processed:<yyyy-mm-dd>
|
||||||
|
Loading…
Reference in New Issue
Block a user