mirror of
https://github.com/hibiken/asynq.git
synced 2024-12-24 23:02:18 +08:00
Add end-to-end benchmark tests
This commit is contained in:
parent
4376dc1e9d
commit
09ee8df5a0
@ -12,14 +12,14 @@ import (
|
|||||||
// This file defines test helper functions used by
|
// This file defines test helper functions used by
|
||||||
// other test files.
|
// other test files.
|
||||||
|
|
||||||
func setup(t *testing.T) *redis.Client {
|
func setup(tb testing.TB) *redis.Client {
|
||||||
t.Helper()
|
tb.Helper()
|
||||||
r := redis.NewClient(&redis.Options{
|
r := redis.NewClient(&redis.Options{
|
||||||
Addr: "localhost:6379",
|
Addr: "localhost:6379",
|
||||||
DB: 14,
|
DB: 14,
|
||||||
})
|
})
|
||||||
// Start each test with a clean slate.
|
// Start each test with a clean slate.
|
||||||
h.FlushDB(t, r)
|
h.FlushDB(tb, r)
|
||||||
return r
|
return r
|
||||||
}
|
}
|
||||||
|
|
||||||
|
91
benchmark_test.go
Normal file
91
benchmark_test.go
Normal file
@ -0,0 +1,91 @@
|
|||||||
|
package asynq
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"math/rand"
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Simple E2E Benchmark testing with no scheduled tasks and
|
||||||
|
// no retries.
|
||||||
|
func BenchmarkEndToEndSimple(b *testing.B) {
|
||||||
|
const count = 100000
|
||||||
|
for n := 0; n < b.N; n++ {
|
||||||
|
b.StopTimer() // begin setup
|
||||||
|
r := setup(b)
|
||||||
|
client := NewClient(r)
|
||||||
|
bg := NewBackground(r, &Config{
|
||||||
|
Concurrency: 10,
|
||||||
|
RetryDelayFunc: func(n int, err error, t *Task) time.Duration {
|
||||||
|
return time.Second
|
||||||
|
},
|
||||||
|
})
|
||||||
|
// Create a bunch of tasks
|
||||||
|
for i := 0; i < count; i++ {
|
||||||
|
t := Task{Type: fmt.Sprintf("task%d", i), Payload: Payload{"data": i}}
|
||||||
|
client.Process(&t, time.Now())
|
||||||
|
}
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(count)
|
||||||
|
handler := func(t *Task) error {
|
||||||
|
wg.Done()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
b.StartTimer() // end setup
|
||||||
|
|
||||||
|
bg.start(HandlerFunc(handler))
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
b.StopTimer() // begin teardown
|
||||||
|
bg.stop()
|
||||||
|
b.StartTimer() // end teardown
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// E2E benchmark with scheduled tasks and retries.
|
||||||
|
func BenchmarkEndToEnd(b *testing.B) {
|
||||||
|
const count = 100000
|
||||||
|
for n := 0; n < b.N; n++ {
|
||||||
|
b.StopTimer() // begin setup
|
||||||
|
rand.Seed(time.Now().UnixNano())
|
||||||
|
r := setup(b)
|
||||||
|
client := NewClient(r)
|
||||||
|
bg := NewBackground(r, &Config{
|
||||||
|
Concurrency: 10,
|
||||||
|
RetryDelayFunc: func(n int, err error, t *Task) time.Duration {
|
||||||
|
return time.Second
|
||||||
|
},
|
||||||
|
})
|
||||||
|
// Create a bunch of tasks
|
||||||
|
for i := 0; i < count; i++ {
|
||||||
|
t := Task{Type: fmt.Sprintf("task%d", i), Payload: Payload{"data": i}}
|
||||||
|
client.Process(&t, time.Now())
|
||||||
|
}
|
||||||
|
for i := 0; i < count; i++ {
|
||||||
|
t := Task{Type: fmt.Sprintf("scheduled%d", i), Payload: Payload{"data": i}}
|
||||||
|
client.Process(&t, time.Now().Add(time.Second))
|
||||||
|
}
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(count * 2)
|
||||||
|
handler := func(t *Task) error {
|
||||||
|
// randomly fail 1% of tasks
|
||||||
|
if rand.Intn(100) == 1 {
|
||||||
|
return fmt.Errorf(":(")
|
||||||
|
}
|
||||||
|
wg.Done()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
b.StartTimer() // end setup
|
||||||
|
|
||||||
|
bg.start(HandlerFunc(handler))
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
b.StopTimer() // begin teardown
|
||||||
|
bg.stop()
|
||||||
|
b.StartTimer() // end teardown
|
||||||
|
}
|
||||||
|
}
|
Loading…
Reference in New Issue
Block a user