mirror of
https://github.com/hibiken/asynq.git
synced 2025-08-19 15:08:55 +08:00
Add Scheduler
- Renamed previously called scheduler to forwarder to resolve name conflicts
This commit is contained in:
@@ -758,7 +758,7 @@ return keys`)
|
||||
|
||||
// ListServers returns the list of server info.
|
||||
func (r *RDB) ListServers() ([]*base.ServerInfo, error) {
|
||||
now := time.Now().UTC()
|
||||
now := time.Now()
|
||||
res, err := listServerKeysCmd.Run(r.client, []string{base.AllServers}, now.Unix()).Result()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -791,7 +791,7 @@ return keys`)
|
||||
|
||||
// ListWorkers returns the list of worker stats.
|
||||
func (r *RDB) ListWorkers() ([]*base.WorkerInfo, error) {
|
||||
now := time.Now().UTC()
|
||||
now := time.Now()
|
||||
res, err := listWorkerKeysCmd.Run(r.client, []string{base.AllWorkers}, now.Unix()).Result()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -818,6 +818,63 @@ func (r *RDB) ListWorkers() ([]*base.WorkerInfo, error) {
|
||||
return workers, nil
|
||||
}
|
||||
|
||||
// Note: Script also removes stale keys.
|
||||
var listSchedulerKeysCmd = redis.NewScript(`
|
||||
local now = tonumber(ARGV[1])
|
||||
local keys = redis.call("ZRANGEBYSCORE", KEYS[1], now, "+inf")
|
||||
redis.call("ZREMRANGEBYSCORE", KEYS[1], "-inf", now-1)
|
||||
return keys`)
|
||||
|
||||
// ListSchedulerEntries returns the list of scheduler entries.
|
||||
func (r *RDB) ListSchedulerEntries() ([]*base.SchedulerEntry, error) {
|
||||
now := time.Now()
|
||||
res, err := listSchedulerKeysCmd.Run(r.client, []string{base.AllSchedulers}, now.Unix()).Result()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
keys, err := cast.ToStringSliceE(res)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var entries []*base.SchedulerEntry
|
||||
for _, key := range keys {
|
||||
data, err := r.client.LRange(key, 0, -1).Result()
|
||||
if err != nil {
|
||||
continue // skip bad data
|
||||
}
|
||||
for _, s := range data {
|
||||
var e base.SchedulerEntry
|
||||
if err := json.Unmarshal([]byte(s), &e); err != nil {
|
||||
continue // skip bad data
|
||||
}
|
||||
entries = append(entries, &e)
|
||||
}
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// ListSchedulerEnqueueEvents returns the list of scheduler enqueue events.
|
||||
func (r *RDB) ListSchedulerEnqueueEvents(entryID string) ([]*base.SchedulerEnqueueEvent, error) {
|
||||
key := base.SchedulerHistoryKey(entryID)
|
||||
zs, err := r.client.ZRangeWithScores(key, 0, -1).Result()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var events []*base.SchedulerEnqueueEvent
|
||||
for _, z := range zs {
|
||||
data, err := cast.ToStringE(z.Member)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var e base.SchedulerEnqueueEvent
|
||||
if err := json.Unmarshal([]byte(data), &e); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
events = append(events, &e)
|
||||
}
|
||||
return events, nil
|
||||
}
|
||||
|
||||
// Pause pauses processing of tasks from the given queue.
|
||||
func (r *RDB) Pause(qname string) error {
|
||||
key := base.PausedKey(qname)
|
||||
|
@@ -2983,6 +2983,103 @@ func TestListWorkers(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteListClearSchedulerEntries(t *testing.T) {
|
||||
r := setup(t)
|
||||
now := time.Now().UTC()
|
||||
schedulerID := "127.0.0.1:9876:abc123"
|
||||
|
||||
data := []*base.SchedulerEntry{
|
||||
&base.SchedulerEntry{
|
||||
Spec: "* * * * *",
|
||||
Type: "foo",
|
||||
Payload: nil,
|
||||
Opts: "",
|
||||
Next: now.Add(5 * time.Hour),
|
||||
Prev: now.Add(-2 * time.Hour),
|
||||
},
|
||||
&base.SchedulerEntry{
|
||||
Spec: "@every 20m",
|
||||
Type: "bar",
|
||||
Payload: map[string]interface{}{"fiz": "baz"},
|
||||
Opts: "",
|
||||
Next: now.Add(1 * time.Minute),
|
||||
Prev: now.Add(-19 * time.Minute),
|
||||
},
|
||||
}
|
||||
|
||||
if err := r.WriteSchedulerEntries(schedulerID, data, 30*time.Second); err != nil {
|
||||
t.Fatalf("WriteSchedulerEnties failed: %v", err)
|
||||
}
|
||||
entries, err := r.ListSchedulerEntries()
|
||||
if err != nil {
|
||||
t.Fatalf("ListSchedulerEntries failed: %v", err)
|
||||
}
|
||||
if diff := cmp.Diff(data, entries, h.SortSchedulerEntryOpt); diff != "" {
|
||||
t.Errorf("ListSchedulerEntries() = %v, want %v; (-want,+got)\n%s", entries, data, diff)
|
||||
}
|
||||
if err := r.ClearSchedulerEntries(schedulerID); err != nil {
|
||||
t.Fatalf("ClearSchedulerEntries failed: %v", err)
|
||||
}
|
||||
entries, err = r.ListSchedulerEntries()
|
||||
if err != nil {
|
||||
t.Fatalf("ListSchedulerEntries() after clear failed: %v", err)
|
||||
}
|
||||
if len(entries) != 0 {
|
||||
t.Errorf("found %d entries, want 0 after clearing", len(entries))
|
||||
}
|
||||
}
|
||||
|
||||
func TestSchedulerEnqueueEvents(t *testing.T) {
|
||||
r := setup(t)
|
||||
|
||||
var (
|
||||
now = time.Now()
|
||||
oneDayAgo = now.Add(-24 * time.Hour)
|
||||
oneHourAgo = now.Add(-1 * time.Hour)
|
||||
)
|
||||
|
||||
type event struct {
|
||||
entryID string
|
||||
taskID string
|
||||
enqueuedAt time.Time
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
entryID string
|
||||
events []*base.SchedulerEnqueueEvent
|
||||
}{
|
||||
{
|
||||
entryID: "entry123",
|
||||
events: []*base.SchedulerEnqueueEvent{{"task123", oneDayAgo}, {"task456", oneHourAgo}},
|
||||
},
|
||||
{
|
||||
entryID: "entry123",
|
||||
events: []*base.SchedulerEnqueueEvent{},
|
||||
},
|
||||
}
|
||||
|
||||
loop:
|
||||
for _, tc := range tests {
|
||||
h.FlushDB(t, r.client)
|
||||
|
||||
for _, e := range tc.events {
|
||||
if err := r.RecordSchedulerEnqueueEvent(tc.entryID, e); err != nil {
|
||||
t.Errorf("RecordSchedulerEnqueueEvent(%q, %v) failed: %v", tc.entryID, e, err)
|
||||
continue loop
|
||||
}
|
||||
}
|
||||
got, err := r.ListSchedulerEnqueueEvents(tc.entryID)
|
||||
if err != nil {
|
||||
t.Errorf("ListSchedulerEnqueueEvents(%q) failed: %v", tc.entryID, err)
|
||||
continue
|
||||
}
|
||||
if diff := cmp.Diff(tc.events, got, h.SortSchedulerEnqueueEventOpt, timeCmpOpt); diff != "" {
|
||||
t.Errorf("ListSchedulerEnqueueEvent(%q) = %v, want %v; (-want,+got)\n%s",
|
||||
tc.entryID, got, tc.events, diff)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPause(t *testing.T) {
|
||||
r := setup(t)
|
||||
|
||||
|
@@ -575,6 +575,45 @@ func (r *RDB) ClearServerState(host string, pid int, serverID string) error {
|
||||
return clearServerStateCmd.Run(r.client, []string{skey, wkey}).Err()
|
||||
}
|
||||
|
||||
// KEYS[1] -> asynq:schedulers:{<schedulerID>}
|
||||
// ARGV[1] -> TTL in seconds
|
||||
// ARGV[2:] -> schedler entries
|
||||
var writeSchedulerEntriesCmd = redis.NewScript(`
|
||||
redis.call("DEL", KEYS[1])
|
||||
for i = 2, #ARGV do
|
||||
redis.call("LPUSH", KEYS[1], ARGV[i])
|
||||
end
|
||||
redis.call("EXPIRE", KEYS[1], ARGV[1])
|
||||
return redis.status_reply("OK")`)
|
||||
|
||||
// WriteSchedulerEntries writes scheduler entries data to redis with expiration set to the value ttl.
|
||||
func (r *RDB) WriteSchedulerEntries(schedulerID string, entries []*base.SchedulerEntry, ttl time.Duration) error {
|
||||
args := []interface{}{ttl.Seconds()}
|
||||
for _, e := range entries {
|
||||
bytes, err := json.Marshal(e)
|
||||
if err != nil {
|
||||
continue // skip bad data
|
||||
}
|
||||
args = append(args, bytes)
|
||||
}
|
||||
exp := time.Now().Add(ttl).UTC()
|
||||
key := base.SchedulerEntriesKey(schedulerID)
|
||||
err := r.client.ZAdd(base.AllSchedulers, &redis.Z{Score: float64(exp.Unix()), Member: key}).Err()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return writeSchedulerEntriesCmd.Run(r.client, []string{key}, args...).Err()
|
||||
}
|
||||
|
||||
// ClearSchedulerEntries deletes scheduler entries data from redis.
|
||||
func (r *RDB) ClearSchedulerEntries(scheduelrID string) error {
|
||||
key := base.SchedulerEntriesKey(scheduelrID)
|
||||
if err := r.client.ZRem(base.AllSchedulers, key).Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
return r.client.Del(key).Err()
|
||||
}
|
||||
|
||||
// CancelationPubSub returns a pubsub for cancelation messages.
|
||||
func (r *RDB) CancelationPubSub() (*redis.PubSub, error) {
|
||||
pubsub := r.client.Subscribe(base.CancelChannel)
|
||||
@@ -590,3 +629,26 @@ func (r *RDB) CancelationPubSub() (*redis.PubSub, error) {
|
||||
func (r *RDB) PublishCancelation(id string) error {
|
||||
return r.client.Publish(base.CancelChannel, id).Err()
|
||||
}
|
||||
|
||||
// KEYS[1] -> asynq:scheduler_history:<entryID>
|
||||
// ARGV[1] -> enqueued_at timestamp
|
||||
// ARGV[2] -> serialized SchedulerEnqueueEvent data
|
||||
// ARGV[3] -> max number of events to be persisted
|
||||
var recordSchedulerEnqueueEventCmd = redis.NewScript(`
|
||||
redis.call("ZADD", KEYS[1], ARGV[1], ARGV[2])
|
||||
redis.call("ZREMRANGEBYSCORE", KEYS[1], "-inf", ARGV[3])
|
||||
return redis.status_reply("OK")`)
|
||||
|
||||
// Maximum number of enqueue events to store per entry.
|
||||
const maxEvents = 10000
|
||||
|
||||
// RecordSchedulerEnqueueEvent records the time when the given task was enqueued.
|
||||
func (r *RDB) RecordSchedulerEnqueueEvent(entryID string, event *base.SchedulerEnqueueEvent) error {
|
||||
key := base.SchedulerHistoryKey(entryID)
|
||||
data, err := json.Marshal(event)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return recordSchedulerEnqueueEventCmd.Run(
|
||||
r.client, []string{key}, event.EnqueuedAt.Unix(), data, maxEvents).Err()
|
||||
}
|
||||
|
Reference in New Issue
Block a user