mirror of
https://github.com/hibiken/asynq.git
synced 2025-08-19 15:08:55 +08:00
Add Scheduler
- Renamed previously called scheduler to forwarder to resolve name conflicts
This commit is contained in:
@@ -65,6 +65,24 @@ var SortWorkerInfoOpt = cmp.Transformer("SortWorkerInfo", func(in []*base.Worker
|
||||
return out
|
||||
})
|
||||
|
||||
// SortSchedulerEntryOpt is a cmp.Option to sort base.SchedulerEntry for comparing slice of entries.
|
||||
var SortSchedulerEntryOpt = cmp.Transformer("SortSchedulerEntry", func(in []*base.SchedulerEntry) []*base.SchedulerEntry {
|
||||
out := append([]*base.SchedulerEntry(nil), in...) // Copy input to avoid mutating it
|
||||
sort.Slice(out, func(i, j int) bool {
|
||||
return out[i].Spec < out[j].Spec
|
||||
})
|
||||
return out
|
||||
})
|
||||
|
||||
// SortSchedulerEnqueueEventOpt is a cmp.Option to sort base.SchedulerEnqueueEvent for comparing slice of events.
|
||||
var SortSchedulerEnqueueEventOpt = cmp.Transformer("SortSchedulerEnqueueEvent", func(in []*base.SchedulerEnqueueEvent) []*base.SchedulerEnqueueEvent {
|
||||
out := append([]*base.SchedulerEnqueueEvent(nil), in...)
|
||||
sort.Slice(out, func(i, j int) bool {
|
||||
return out[i].EnqueuedAt.Unix() < out[j].EnqueuedAt.Unix()
|
||||
})
|
||||
return out
|
||||
})
|
||||
|
||||
// SortStringSliceOpt is a cmp.Option to sort string slice.
|
||||
var SortStringSliceOpt = cmp.Transformer("SortStringSlice", func(in []string) []string {
|
||||
out := append([]string(nil), in...)
|
||||
|
@@ -29,10 +29,11 @@ var DefaultQueue = QueueKey(DefaultQueueName)
|
||||
|
||||
// Global Redis keys.
|
||||
const (
|
||||
AllServers = "asynq:servers" // ZSET
|
||||
AllWorkers = "asynq:workers" // ZSET
|
||||
AllQueues = "asynq:queues" // SET
|
||||
CancelChannel = "asynq:cancel" // PubSub channel
|
||||
AllServers = "asynq:servers" // ZSET
|
||||
AllWorkers = "asynq:workers" // ZSET
|
||||
AllSchedulers = "asynq:schedulers" // ZSET
|
||||
AllQueues = "asynq:queues" // SET
|
||||
CancelChannel = "asynq:cancel" // PubSub channel
|
||||
)
|
||||
|
||||
// QueueKey returns a redis key for the given queue name.
|
||||
@@ -81,13 +82,23 @@ func FailedKey(qname string, t time.Time) string {
|
||||
}
|
||||
|
||||
// ServerInfoKey returns a redis key for process info.
|
||||
func ServerInfoKey(hostname string, pid int, sid string) string {
|
||||
return fmt.Sprintf("asynq:servers:{%s:%d:%s}", hostname, pid, sid)
|
||||
func ServerInfoKey(hostname string, pid int, serverID string) string {
|
||||
return fmt.Sprintf("asynq:servers:{%s:%d:%s}", hostname, pid, serverID)
|
||||
}
|
||||
|
||||
// WorkersKey returns a redis key for the workers given hostname, pid, and server ID.
|
||||
func WorkersKey(hostname string, pid int, sid string) string {
|
||||
return fmt.Sprintf("asynq:workers:{%s:%d:%s}", hostname, pid, sid)
|
||||
func WorkersKey(hostname string, pid int, serverID string) string {
|
||||
return fmt.Sprintf("asynq:workers:{%s:%d:%s}", hostname, pid, serverID)
|
||||
}
|
||||
|
||||
// SchedulerEntriesKey returns a redis key for the scheduler entries given scheduler ID.
|
||||
func SchedulerEntriesKey(schedulerID string) string {
|
||||
return fmt.Sprintf("asynq:schedulers:{%s}", schedulerID)
|
||||
}
|
||||
|
||||
// SchedulerHistoryKey returns a redis key for the scheduler's history for the given entry.
|
||||
func SchedulerHistoryKey(entryID string) string {
|
||||
return fmt.Sprintf("asynq:scheduler_history:%s", entryID)
|
||||
}
|
||||
|
||||
// UniqueKey returns a redis key with the given type, payload, and queue name.
|
||||
@@ -208,10 +219,10 @@ const (
|
||||
// StatusIdle indicates the server is in idle state.
|
||||
StatusIdle ServerStatusValue = iota
|
||||
|
||||
// StatusRunning indicates the servier is up and processing tasks.
|
||||
// StatusRunning indicates the server is up and active.
|
||||
StatusRunning
|
||||
|
||||
// StatusQuiet indicates the server is up but not processing new tasks.
|
||||
// StatusQuiet indicates the server is up but not active.
|
||||
StatusQuiet
|
||||
|
||||
// StatusStopped indicates the server server has been stopped.
|
||||
@@ -273,6 +284,40 @@ type WorkerInfo struct {
|
||||
Started time.Time
|
||||
}
|
||||
|
||||
// SchedulerEntry holds information about a periodic task registered with a scheduler.
|
||||
type SchedulerEntry struct {
|
||||
// Identifier of this entry.
|
||||
ID string
|
||||
|
||||
// Spec describes the schedule of this entry.
|
||||
Spec string
|
||||
|
||||
// Type is the task type of the periodic task.
|
||||
Type string
|
||||
|
||||
// Payload is the payload of the periodic task.
|
||||
Payload map[string]interface{}
|
||||
|
||||
// Opts is the options for the periodic task.
|
||||
Opts string
|
||||
|
||||
// Next shows the next time the task will be enqueued.
|
||||
Next time.Time
|
||||
|
||||
// Prev shows the last time the task was enqueued.
|
||||
// Zero time if task was never enqueued.
|
||||
Prev time.Time
|
||||
}
|
||||
|
||||
// SchedulerEnqueueEvent holds information about an enqueue event by a scheduler.
|
||||
type SchedulerEnqueueEvent struct {
|
||||
// ID of the task that was enqueued.
|
||||
TaskID string
|
||||
|
||||
// Time the task was enqueued.
|
||||
EnqueuedAt time.Time
|
||||
}
|
||||
|
||||
// Cancelations is a collection that holds cancel functions for all active tasks.
|
||||
//
|
||||
// Cancelations are safe for concurrent use by multipel goroutines.
|
||||
|
@@ -212,6 +212,41 @@ func TestWorkersKey(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestSchedulerEntriesKey(t *testing.T) {
|
||||
tests := []struct {
|
||||
schedulerID string
|
||||
want string
|
||||
}{
|
||||
{"localhost:9876:scheduler123", "asynq:schedulers:{localhost:9876:scheduler123}"},
|
||||
{"127.0.0.1:1234:scheduler987", "asynq:schedulers:{127.0.0.1:1234:scheduler987}"},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
got := SchedulerEntriesKey(tc.schedulerID)
|
||||
if got != tc.want {
|
||||
t.Errorf("SchedulerEntriesKey(%q) = %q, want %q", tc.schedulerID, got, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSchedulerHistoryKey(t *testing.T) {
|
||||
tests := []struct {
|
||||
entryID string
|
||||
want string
|
||||
}{
|
||||
{"entry876", "asynq:scheduler_history:entry876"},
|
||||
{"entry345", "asynq:scheduler_history:entry345"},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
got := SchedulerHistoryKey(tc.entryID)
|
||||
if got != tc.want {
|
||||
t.Errorf("SchedulerHistoryKey(%q) = %q, want %q",
|
||||
tc.entryID, got, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestUniqueKey(t *testing.T) {
|
||||
tests := []struct {
|
||||
desc string
|
||||
|
@@ -758,7 +758,7 @@ return keys`)
|
||||
|
||||
// ListServers returns the list of server info.
|
||||
func (r *RDB) ListServers() ([]*base.ServerInfo, error) {
|
||||
now := time.Now().UTC()
|
||||
now := time.Now()
|
||||
res, err := listServerKeysCmd.Run(r.client, []string{base.AllServers}, now.Unix()).Result()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -791,7 +791,7 @@ return keys`)
|
||||
|
||||
// ListWorkers returns the list of worker stats.
|
||||
func (r *RDB) ListWorkers() ([]*base.WorkerInfo, error) {
|
||||
now := time.Now().UTC()
|
||||
now := time.Now()
|
||||
res, err := listWorkerKeysCmd.Run(r.client, []string{base.AllWorkers}, now.Unix()).Result()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -818,6 +818,63 @@ func (r *RDB) ListWorkers() ([]*base.WorkerInfo, error) {
|
||||
return workers, nil
|
||||
}
|
||||
|
||||
// Note: Script also removes stale keys.
|
||||
var listSchedulerKeysCmd = redis.NewScript(`
|
||||
local now = tonumber(ARGV[1])
|
||||
local keys = redis.call("ZRANGEBYSCORE", KEYS[1], now, "+inf")
|
||||
redis.call("ZREMRANGEBYSCORE", KEYS[1], "-inf", now-1)
|
||||
return keys`)
|
||||
|
||||
// ListSchedulerEntries returns the list of scheduler entries.
|
||||
func (r *RDB) ListSchedulerEntries() ([]*base.SchedulerEntry, error) {
|
||||
now := time.Now()
|
||||
res, err := listSchedulerKeysCmd.Run(r.client, []string{base.AllSchedulers}, now.Unix()).Result()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
keys, err := cast.ToStringSliceE(res)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var entries []*base.SchedulerEntry
|
||||
for _, key := range keys {
|
||||
data, err := r.client.LRange(key, 0, -1).Result()
|
||||
if err != nil {
|
||||
continue // skip bad data
|
||||
}
|
||||
for _, s := range data {
|
||||
var e base.SchedulerEntry
|
||||
if err := json.Unmarshal([]byte(s), &e); err != nil {
|
||||
continue // skip bad data
|
||||
}
|
||||
entries = append(entries, &e)
|
||||
}
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// ListSchedulerEnqueueEvents returns the list of scheduler enqueue events.
|
||||
func (r *RDB) ListSchedulerEnqueueEvents(entryID string) ([]*base.SchedulerEnqueueEvent, error) {
|
||||
key := base.SchedulerHistoryKey(entryID)
|
||||
zs, err := r.client.ZRangeWithScores(key, 0, -1).Result()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var events []*base.SchedulerEnqueueEvent
|
||||
for _, z := range zs {
|
||||
data, err := cast.ToStringE(z.Member)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var e base.SchedulerEnqueueEvent
|
||||
if err := json.Unmarshal([]byte(data), &e); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
events = append(events, &e)
|
||||
}
|
||||
return events, nil
|
||||
}
|
||||
|
||||
// Pause pauses processing of tasks from the given queue.
|
||||
func (r *RDB) Pause(qname string) error {
|
||||
key := base.PausedKey(qname)
|
||||
|
@@ -2983,6 +2983,103 @@ func TestListWorkers(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteListClearSchedulerEntries(t *testing.T) {
|
||||
r := setup(t)
|
||||
now := time.Now().UTC()
|
||||
schedulerID := "127.0.0.1:9876:abc123"
|
||||
|
||||
data := []*base.SchedulerEntry{
|
||||
&base.SchedulerEntry{
|
||||
Spec: "* * * * *",
|
||||
Type: "foo",
|
||||
Payload: nil,
|
||||
Opts: "",
|
||||
Next: now.Add(5 * time.Hour),
|
||||
Prev: now.Add(-2 * time.Hour),
|
||||
},
|
||||
&base.SchedulerEntry{
|
||||
Spec: "@every 20m",
|
||||
Type: "bar",
|
||||
Payload: map[string]interface{}{"fiz": "baz"},
|
||||
Opts: "",
|
||||
Next: now.Add(1 * time.Minute),
|
||||
Prev: now.Add(-19 * time.Minute),
|
||||
},
|
||||
}
|
||||
|
||||
if err := r.WriteSchedulerEntries(schedulerID, data, 30*time.Second); err != nil {
|
||||
t.Fatalf("WriteSchedulerEnties failed: %v", err)
|
||||
}
|
||||
entries, err := r.ListSchedulerEntries()
|
||||
if err != nil {
|
||||
t.Fatalf("ListSchedulerEntries failed: %v", err)
|
||||
}
|
||||
if diff := cmp.Diff(data, entries, h.SortSchedulerEntryOpt); diff != "" {
|
||||
t.Errorf("ListSchedulerEntries() = %v, want %v; (-want,+got)\n%s", entries, data, diff)
|
||||
}
|
||||
if err := r.ClearSchedulerEntries(schedulerID); err != nil {
|
||||
t.Fatalf("ClearSchedulerEntries failed: %v", err)
|
||||
}
|
||||
entries, err = r.ListSchedulerEntries()
|
||||
if err != nil {
|
||||
t.Fatalf("ListSchedulerEntries() after clear failed: %v", err)
|
||||
}
|
||||
if len(entries) != 0 {
|
||||
t.Errorf("found %d entries, want 0 after clearing", len(entries))
|
||||
}
|
||||
}
|
||||
|
||||
func TestSchedulerEnqueueEvents(t *testing.T) {
|
||||
r := setup(t)
|
||||
|
||||
var (
|
||||
now = time.Now()
|
||||
oneDayAgo = now.Add(-24 * time.Hour)
|
||||
oneHourAgo = now.Add(-1 * time.Hour)
|
||||
)
|
||||
|
||||
type event struct {
|
||||
entryID string
|
||||
taskID string
|
||||
enqueuedAt time.Time
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
entryID string
|
||||
events []*base.SchedulerEnqueueEvent
|
||||
}{
|
||||
{
|
||||
entryID: "entry123",
|
||||
events: []*base.SchedulerEnqueueEvent{{"task123", oneDayAgo}, {"task456", oneHourAgo}},
|
||||
},
|
||||
{
|
||||
entryID: "entry123",
|
||||
events: []*base.SchedulerEnqueueEvent{},
|
||||
},
|
||||
}
|
||||
|
||||
loop:
|
||||
for _, tc := range tests {
|
||||
h.FlushDB(t, r.client)
|
||||
|
||||
for _, e := range tc.events {
|
||||
if err := r.RecordSchedulerEnqueueEvent(tc.entryID, e); err != nil {
|
||||
t.Errorf("RecordSchedulerEnqueueEvent(%q, %v) failed: %v", tc.entryID, e, err)
|
||||
continue loop
|
||||
}
|
||||
}
|
||||
got, err := r.ListSchedulerEnqueueEvents(tc.entryID)
|
||||
if err != nil {
|
||||
t.Errorf("ListSchedulerEnqueueEvents(%q) failed: %v", tc.entryID, err)
|
||||
continue
|
||||
}
|
||||
if diff := cmp.Diff(tc.events, got, h.SortSchedulerEnqueueEventOpt, timeCmpOpt); diff != "" {
|
||||
t.Errorf("ListSchedulerEnqueueEvent(%q) = %v, want %v; (-want,+got)\n%s",
|
||||
tc.entryID, got, tc.events, diff)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPause(t *testing.T) {
|
||||
r := setup(t)
|
||||
|
||||
|
@@ -575,6 +575,45 @@ func (r *RDB) ClearServerState(host string, pid int, serverID string) error {
|
||||
return clearServerStateCmd.Run(r.client, []string{skey, wkey}).Err()
|
||||
}
|
||||
|
||||
// KEYS[1] -> asynq:schedulers:{<schedulerID>}
|
||||
// ARGV[1] -> TTL in seconds
|
||||
// ARGV[2:] -> schedler entries
|
||||
var writeSchedulerEntriesCmd = redis.NewScript(`
|
||||
redis.call("DEL", KEYS[1])
|
||||
for i = 2, #ARGV do
|
||||
redis.call("LPUSH", KEYS[1], ARGV[i])
|
||||
end
|
||||
redis.call("EXPIRE", KEYS[1], ARGV[1])
|
||||
return redis.status_reply("OK")`)
|
||||
|
||||
// WriteSchedulerEntries writes scheduler entries data to redis with expiration set to the value ttl.
|
||||
func (r *RDB) WriteSchedulerEntries(schedulerID string, entries []*base.SchedulerEntry, ttl time.Duration) error {
|
||||
args := []interface{}{ttl.Seconds()}
|
||||
for _, e := range entries {
|
||||
bytes, err := json.Marshal(e)
|
||||
if err != nil {
|
||||
continue // skip bad data
|
||||
}
|
||||
args = append(args, bytes)
|
||||
}
|
||||
exp := time.Now().Add(ttl).UTC()
|
||||
key := base.SchedulerEntriesKey(schedulerID)
|
||||
err := r.client.ZAdd(base.AllSchedulers, &redis.Z{Score: float64(exp.Unix()), Member: key}).Err()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return writeSchedulerEntriesCmd.Run(r.client, []string{key}, args...).Err()
|
||||
}
|
||||
|
||||
// ClearSchedulerEntries deletes scheduler entries data from redis.
|
||||
func (r *RDB) ClearSchedulerEntries(scheduelrID string) error {
|
||||
key := base.SchedulerEntriesKey(scheduelrID)
|
||||
if err := r.client.ZRem(base.AllSchedulers, key).Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
return r.client.Del(key).Err()
|
||||
}
|
||||
|
||||
// CancelationPubSub returns a pubsub for cancelation messages.
|
||||
func (r *RDB) CancelationPubSub() (*redis.PubSub, error) {
|
||||
pubsub := r.client.Subscribe(base.CancelChannel)
|
||||
@@ -590,3 +629,26 @@ func (r *RDB) CancelationPubSub() (*redis.PubSub, error) {
|
||||
func (r *RDB) PublishCancelation(id string) error {
|
||||
return r.client.Publish(base.CancelChannel, id).Err()
|
||||
}
|
||||
|
||||
// KEYS[1] -> asynq:scheduler_history:<entryID>
|
||||
// ARGV[1] -> enqueued_at timestamp
|
||||
// ARGV[2] -> serialized SchedulerEnqueueEvent data
|
||||
// ARGV[3] -> max number of events to be persisted
|
||||
var recordSchedulerEnqueueEventCmd = redis.NewScript(`
|
||||
redis.call("ZADD", KEYS[1], ARGV[1], ARGV[2])
|
||||
redis.call("ZREMRANGEBYSCORE", KEYS[1], "-inf", ARGV[3])
|
||||
return redis.status_reply("OK")`)
|
||||
|
||||
// Maximum number of enqueue events to store per entry.
|
||||
const maxEvents = 10000
|
||||
|
||||
// RecordSchedulerEnqueueEvent records the time when the given task was enqueued.
|
||||
func (r *RDB) RecordSchedulerEnqueueEvent(entryID string, event *base.SchedulerEnqueueEvent) error {
|
||||
key := base.SchedulerHistoryKey(entryID)
|
||||
data, err := json.Marshal(event)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return recordSchedulerEnqueueEventCmd.Run(
|
||||
r.client, []string{key}, event.EnqueuedAt.Unix(), data, maxEvents).Err()
|
||||
}
|
||||
|
Reference in New Issue
Block a user