mirror of
https://github.com/hibiken/asynq.git
synced 2025-09-19 13:21:58 +08:00
Update heartbeat goroutine to call ExtendLease on active tasks
This commit is contained in:
@@ -15,21 +15,79 @@ import (
|
||||
"github.com/hibiken/asynq/internal/base"
|
||||
"github.com/hibiken/asynq/internal/rdb"
|
||||
"github.com/hibiken/asynq/internal/testbroker"
|
||||
"github.com/hibiken/asynq/internal/timeutil"
|
||||
)
|
||||
|
||||
// Test goes through a few phases.
|
||||
//
|
||||
// Phase1: Simulate Server startup; Simulate starting tasks listed in startedTasks
|
||||
// Phase2: Simluate finishing tasks listed in finishedTasks
|
||||
// Phase3: Simulate Server shutdown;
|
||||
func TestHeartbeater(t *testing.T) {
|
||||
r := setup(t)
|
||||
defer r.Close()
|
||||
rdbClient := rdb.NewRDB(r)
|
||||
|
||||
now := time.Now()
|
||||
const elapsedTime = 42 * time.Second // simulated time elapsed between phase1 and phase2
|
||||
|
||||
t1 := h.NewTaskMessageWithQueue("task1", nil, "default")
|
||||
t2 := h.NewTaskMessageWithQueue("task2", nil, "default")
|
||||
t3 := h.NewTaskMessageWithQueue("task3", nil, "default")
|
||||
|
||||
tests := []struct {
|
||||
interval time.Duration
|
||||
// Interval between heartbeats.
|
||||
interval time.Duration
|
||||
|
||||
// Server info.
|
||||
host string
|
||||
pid int
|
||||
queues map[string]int
|
||||
concurrency int
|
||||
|
||||
active map[string][]*base.TaskMessage // initial active set state
|
||||
lease map[string][]base.Z // initial lease set state
|
||||
wantLease1 map[string][]base.Z // expected lease set state after starting all startedTasks
|
||||
wantLease2 map[string][]base.Z // expected lease set state after finishing all finishedTasks
|
||||
startedTasks []*base.TaskMessage // tasks to send via the started channel
|
||||
finishedTasks []*base.TaskMessage // tasks to send via the finished channel
|
||||
|
||||
startTime time.Time // simulated start time
|
||||
elapsedTime time.Duration // simulated time elapsed between starting and finishing processing tasks
|
||||
}{
|
||||
{2 * time.Second, "localhost", 45678, map[string]int{"default": 1}, 10},
|
||||
{
|
||||
interval: 2 * time.Second,
|
||||
host: "localhost",
|
||||
pid: 45678,
|
||||
queues: map[string]int{"default": 1}, // TODO: Test with multple queues
|
||||
concurrency: 10,
|
||||
active: map[string][]*base.TaskMessage{
|
||||
"default": {t1, t2, t3},
|
||||
},
|
||||
lease: map[string][]base.Z{
|
||||
"default": {
|
||||
{Message: t1, Score: now.Add(10 * time.Second).Unix()},
|
||||
{Message: t2, Score: now.Add(10 * time.Second).Unix()},
|
||||
{Message: t3, Score: now.Add(10 * time.Second).Unix()},
|
||||
},
|
||||
},
|
||||
startedTasks: []*base.TaskMessage{t1, t2, t3},
|
||||
finishedTasks: []*base.TaskMessage{t1, t2},
|
||||
wantLease1: map[string][]base.Z{
|
||||
"default": {
|
||||
{Message: t1, Score: now.Add(rdb.LeaseDuration).Unix()},
|
||||
{Message: t2, Score: now.Add(rdb.LeaseDuration).Unix()},
|
||||
{Message: t3, Score: now.Add(rdb.LeaseDuration).Unix()},
|
||||
},
|
||||
},
|
||||
wantLease2: map[string][]base.Z{
|
||||
"default": {
|
||||
{Message: t3, Score: now.Add(elapsedTime).Add(rdb.LeaseDuration).Unix()},
|
||||
},
|
||||
},
|
||||
startTime: now,
|
||||
elapsedTime: elapsedTime,
|
||||
},
|
||||
}
|
||||
|
||||
timeCmpOpt := cmpopts.EquateApproxTime(10 * time.Millisecond)
|
||||
@@ -37,8 +95,15 @@ func TestHeartbeater(t *testing.T) {
|
||||
ignoreFieldOpt := cmpopts.IgnoreFields(base.ServerInfo{}, "ServerID")
|
||||
for _, tc := range tests {
|
||||
h.FlushDB(t, r)
|
||||
h.SeedAllActiveQueues(t, r, tc.active)
|
||||
h.SeedAllLease(t, r, tc.lease)
|
||||
|
||||
clock := timeutil.NewSimulatedClock(tc.startTime)
|
||||
rdbClient.SetClock(clock)
|
||||
|
||||
srvState := &serverState{}
|
||||
startingCh := make(chan *workerInfo)
|
||||
finishedCh := make(chan *base.TaskMessage)
|
||||
hb := newHeartbeater(heartbeaterParams{
|
||||
logger: testLogger,
|
||||
broker: rdbClient,
|
||||
@@ -47,14 +112,18 @@ func TestHeartbeater(t *testing.T) {
|
||||
queues: tc.queues,
|
||||
strictPriority: false,
|
||||
state: srvState,
|
||||
starting: make(chan *workerInfo),
|
||||
finished: make(chan *base.TaskMessage),
|
||||
starting: startingCh,
|
||||
finished: finishedCh,
|
||||
})
|
||||
|
||||
// Change host and pid fields for testing purpose.
|
||||
hb.host = tc.host
|
||||
hb.pid = tc.pid
|
||||
|
||||
//===================
|
||||
// Start Phase1
|
||||
//===================
|
||||
|
||||
srvState.mu.Lock()
|
||||
srvState.value = srvStateActive // simulating Server.Start
|
||||
srvState.mu.Unlock()
|
||||
@@ -62,17 +131,17 @@ func TestHeartbeater(t *testing.T) {
|
||||
var wg sync.WaitGroup
|
||||
hb.start(&wg)
|
||||
|
||||
want := &base.ServerInfo{
|
||||
Host: tc.host,
|
||||
PID: tc.pid,
|
||||
Queues: tc.queues,
|
||||
Concurrency: tc.concurrency,
|
||||
Started: time.Now(),
|
||||
Status: "active",
|
||||
// Simulate processor starting to work on tasks.
|
||||
for _, msg := range tc.startedTasks {
|
||||
startingCh <- &workerInfo{
|
||||
msg: msg,
|
||||
started: now,
|
||||
deadline: now.Add(30 * time.Minute),
|
||||
}
|
||||
}
|
||||
|
||||
// allow for heartbeater to write to redis
|
||||
time.Sleep(tc.interval)
|
||||
// Wait for heartbeater to write to redis
|
||||
time.Sleep(tc.interval * 2)
|
||||
|
||||
ss, err := rdbClient.ListServers()
|
||||
if err != nil {
|
||||
@@ -82,41 +151,91 @@ func TestHeartbeater(t *testing.T) {
|
||||
}
|
||||
|
||||
if len(ss) != 1 {
|
||||
t.Errorf("(*RDB).ListServers returned %d process info, want 1", len(ss))
|
||||
t.Errorf("(*RDB).ListServers returned %d server info, want 1", len(ss))
|
||||
hb.shutdown()
|
||||
continue
|
||||
}
|
||||
|
||||
if diff := cmp.Diff(want, ss[0], timeCmpOpt, ignoreOpt, ignoreFieldOpt); diff != "" {
|
||||
t.Errorf("redis stored process status %+v, want %+v; (-want, +got)\n%s", ss[0], want, diff)
|
||||
wantInfo := &base.ServerInfo{
|
||||
Host: tc.host,
|
||||
PID: tc.pid,
|
||||
Queues: tc.queues,
|
||||
Concurrency: tc.concurrency,
|
||||
Started: now,
|
||||
Status: "active",
|
||||
ActiveWorkerCount: len(tc.startedTasks),
|
||||
}
|
||||
if diff := cmp.Diff(wantInfo, ss[0], timeCmpOpt, ignoreOpt, ignoreFieldOpt); diff != "" {
|
||||
t.Errorf("redis stored server status %+v, want %+v; (-want, +got)\n%s", ss[0], wantInfo, diff)
|
||||
hb.shutdown()
|
||||
continue
|
||||
}
|
||||
|
||||
// server state change; simulating Server.Shutdown
|
||||
for qname, wantLease := range tc.wantLease1 {
|
||||
gotLease := h.GetLeaseEntries(t, r, qname)
|
||||
if diff := cmp.Diff(wantLease, gotLease, h.SortZSetEntryOpt); diff != "" {
|
||||
t.Errorf("mismatch found in %q: (-want,+got):\n%s", base.LeaseKey(qname), diff)
|
||||
}
|
||||
}
|
||||
|
||||
//===================
|
||||
// Start Phase2
|
||||
//===================
|
||||
|
||||
clock.AdvanceTime(tc.elapsedTime)
|
||||
// Simulate processor finished processing tasks.
|
||||
for _, msg := range tc.finishedTasks {
|
||||
if err := rdbClient.Done(msg); err != nil {
|
||||
t.Fatalf("RDB.Done failed: %v", err)
|
||||
}
|
||||
finishedCh <- msg
|
||||
}
|
||||
// Wait for heartbeater to write to redis
|
||||
time.Sleep(tc.interval * 2)
|
||||
|
||||
for qname, wantLease := range tc.wantLease2 {
|
||||
gotLease := h.GetLeaseEntries(t, r, qname)
|
||||
if diff := cmp.Diff(wantLease, gotLease, h.SortZSetEntryOpt); diff != "" {
|
||||
t.Errorf("mismatch found in %q: (-want,+got):\n%s", base.LeaseKey(qname), diff)
|
||||
}
|
||||
}
|
||||
|
||||
//===================
|
||||
// Start Phase3
|
||||
//===================
|
||||
|
||||
// Server state change; simulating Server.Shutdown
|
||||
srvState.mu.Lock()
|
||||
srvState.value = srvStateClosed
|
||||
srvState.mu.Unlock()
|
||||
|
||||
// allow for heartbeater to write to redis
|
||||
// Wait for heartbeater to write to redis
|
||||
time.Sleep(tc.interval * 2)
|
||||
|
||||
want.Status = "closed"
|
||||
wantInfo = &base.ServerInfo{
|
||||
Host: tc.host,
|
||||
PID: tc.pid,
|
||||
Queues: tc.queues,
|
||||
Concurrency: tc.concurrency,
|
||||
Started: now,
|
||||
Status: "closed",
|
||||
ActiveWorkerCount: len(tc.startedTasks) - len(tc.finishedTasks),
|
||||
}
|
||||
ss, err = rdbClient.ListServers()
|
||||
if err != nil {
|
||||
t.Errorf("could not read process status from redis: %v", err)
|
||||
t.Errorf("could not read server status from redis: %v", err)
|
||||
hb.shutdown()
|
||||
continue
|
||||
}
|
||||
|
||||
if len(ss) != 1 {
|
||||
t.Errorf("(*RDB).ListProcesses returned %d process info, want 1", len(ss))
|
||||
t.Errorf("(*RDB).ListServers returned %d server info, want 1", len(ss))
|
||||
hb.shutdown()
|
||||
continue
|
||||
}
|
||||
|
||||
if diff := cmp.Diff(want, ss[0], timeCmpOpt, ignoreOpt, ignoreFieldOpt); diff != "" {
|
||||
t.Errorf("redis stored process status %+v, want %+v; (-want, +got)\n%s", ss[0], want, diff)
|
||||
if diff := cmp.Diff(wantInfo, ss[0], timeCmpOpt, ignoreOpt, ignoreFieldOpt); diff != "" {
|
||||
t.Errorf("redis stored process status %+v, want %+v; (-want, +got)\n%s", ss[0], wantInfo, diff)
|
||||
hb.shutdown()
|
||||
continue
|
||||
}
|
||||
|
Reference in New Issue
Block a user