2
0
mirror of https://github.com/hibiken/asynq.git synced 2025-10-03 05:12:01 +08:00

Rename internal ProcessState to ServerState

This commit is contained in:
Ken Hibino
2020-04-12 16:42:11 -07:00
parent 4f11e52558
commit aafd8a5b74
11 changed files with 180 additions and 168 deletions

View File

@@ -758,6 +758,7 @@ func (r *RDB) RemoveQueue(qname string, force bool) error {
return nil
}
// TODO: Rename this to listServerInfo.
// Note: Script also removes stale keys.
var listProcessesCmd = redis.NewScript(`
local res = {}
@@ -773,9 +774,9 @@ redis.call("ZREMRANGEBYSCORE", KEYS[1], "-inf", now-1)
return res`)
// ListProcesses returns the list of process statuses.
func (r *RDB) ListProcesses() ([]*base.ProcessInfo, error) {
func (r *RDB) ListProcesses() ([]*base.ServerInfo, error) {
res, err := listProcessesCmd.Run(r.client,
[]string{base.AllProcesses}, time.Now().UTC().Unix()).Result()
[]string{base.AllServers}, time.Now().UTC().Unix()).Result()
if err != nil {
return nil, err
}
@@ -783,9 +784,9 @@ func (r *RDB) ListProcesses() ([]*base.ProcessInfo, error) {
if err != nil {
return nil, err
}
var processes []*base.ProcessInfo
var processes []*base.ServerInfo
for _, s := range data {
var ps base.ProcessInfo
var ps base.ServerInfo
err := json.Unmarshal([]byte(s), &ps)
if err != nil {
continue // skip bad data

View File

@@ -2055,10 +2055,10 @@ func TestListProcesses(t *testing.T) {
r := setup(t)
started1 := time.Now().Add(-time.Hour)
ps1 := base.NewProcessState("do.droplet1", 1234, 10, map[string]int{"default": 1}, false)
ps1.SetStarted(started1)
ps1.SetStatus(base.StatusRunning)
info1 := &base.ProcessInfo{
ss1 := base.NewServerState("do.droplet1", 1234, 10, map[string]int{"default": 1}, false)
ss1.SetStarted(started1)
ss1.SetStatus(base.StatusRunning)
info1 := &base.ServerInfo{
Concurrency: 10,
Queues: map[string]int{"default": 1},
Host: "do.droplet1",
@@ -2069,11 +2069,11 @@ func TestListProcesses(t *testing.T) {
}
started2 := time.Now().Add(-2 * time.Hour)
ps2 := base.NewProcessState("do.droplet2", 9876, 20, map[string]int{"email": 1}, false)
ps2.SetStarted(started2)
ps2.SetStatus(base.StatusStopped)
ps2.AddWorkerStats(h.NewTaskMessage("send_email", nil), time.Now())
info2 := &base.ProcessInfo{
ss2 := base.NewServerState("do.droplet2", 9876, 20, map[string]int{"email": 1}, false)
ss2.SetStarted(started2)
ss2.SetStatus(base.StatusStopped)
ss2.AddWorkerStats(h.NewTaskMessage("send_email", nil), time.Now())
info2 := &base.ServerInfo{
Concurrency: 20,
Queues: map[string]int{"email": 1},
Host: "do.droplet2",
@@ -2084,30 +2084,31 @@ func TestListProcesses(t *testing.T) {
}
tests := []struct {
processes []*base.ProcessState
want []*base.ProcessInfo
serverStates []*base.ServerState
want []*base.ServerInfo
}{
{
processes: []*base.ProcessState{},
want: []*base.ProcessInfo{},
serverStates: []*base.ServerState{},
want: []*base.ServerInfo{},
},
{
processes: []*base.ProcessState{ps1},
want: []*base.ProcessInfo{info1},
serverStates: []*base.ServerState{ss1},
want: []*base.ServerInfo{info1},
},
{
processes: []*base.ProcessState{ps1, ps2},
want: []*base.ProcessInfo{info1, info2},
serverStates: []*base.ServerState{ss1, ss2},
want: []*base.ServerInfo{info1, info2},
},
}
ignoreOpt := cmpopts.IgnoreUnexported(base.ProcessInfo{})
ignoreOpt := cmpopts.IgnoreUnexported(base.ServerInfo{})
ignoreFieldOpt := cmpopts.IgnoreFields(base.ServerInfo{}, "ServerID")
for _, tc := range tests {
h.FlushDB(t, r.client)
for _, ps := range tc.processes {
if err := r.WriteProcessState(ps, 5*time.Second); err != nil {
for _, ss := range tc.serverStates {
if err := r.WriteServerState(ss, 5*time.Second); err != nil {
t.Fatal(err)
}
}
@@ -2116,9 +2117,9 @@ func TestListProcesses(t *testing.T) {
if err != nil {
t.Errorf("r.ListProcesses returned an error: %v", err)
}
if diff := cmp.Diff(tc.want, got, h.SortProcessInfoOpt, ignoreOpt); diff != "" {
if diff := cmp.Diff(tc.want, got, h.SortServerInfoOpt, ignoreOpt, ignoreFieldOpt); diff != "" {
t.Errorf("r.ListProcesses returned %v, want %v; (-want,+got)\n%s",
got, tc.processes, diff)
got, tc.serverStates, diff)
}
}
}
@@ -2164,13 +2165,13 @@ func TestListWorkers(t *testing.T) {
for _, tc := range tests {
h.FlushDB(t, r.client)
ps := base.NewProcessState(host, pid, 10, map[string]int{"default": 1}, false)
ss := base.NewServerState(host, pid, 10, map[string]int{"default": 1}, false)
for _, w := range tc.workers {
ps.AddWorkerStats(w.msg, w.started)
ss.AddWorkerStats(w.msg, w.started)
}
err := r.WriteProcessState(ps, time.Minute)
err := r.WriteServerState(ss, time.Minute)
if err != nil {
t.Errorf("could not write process state to redis: %v", err)
continue

View File

@@ -463,9 +463,9 @@ func (r *RDB) forwardSingle(src, dst string) error {
[]string{src, dst}, now).Err()
}
// KEYS[1] -> asynq:ps:<host:pid>
// KEYS[2] -> asynq:ps
// KEYS[3] -> asynq:workers<host:pid>
// KEYS[1] -> asynq:servers:<host:pid:sid>
// KEYS[2] -> asynq:servers
// KEYS[3] -> asynq:workers<host:pid:sid>
// keys[4] -> asynq:workers
// ARGV[1] -> expiration time
// ARGV[2] -> TTL in seconds
@@ -486,7 +486,7 @@ return redis.status_reply("OK")`)
// WriteServerState writes server state data to redis with expiration set to the value ttl.
func (r *RDB) WriteServerState(ss *base.ServerState, ttl time.Duration) error {
info := ss.Get()
info := ss.GetInfo()
bytes, err := json.Marshal(info)
if err != nil {
return err
@@ -502,17 +502,17 @@ func (r *RDB) WriteServerState(ss *base.ServerState, ttl time.Duration) error {
}
args = append(args, w.ID.String(), bytes)
}
pkey := base.ProcessInfoKey(info.Host, info.PID)
wkey := base.WorkersKey(info.Host, info.PID)
skey := base.ServerInfoKey(info.Host, info.PID, info.ServerID)
wkey := base.WorkersKey(info.Host, info.PID, info.ServerID)
return writeProcessInfoCmd.Run(r.client,
[]string{pkey, base.AllProcesses, wkey, base.AllWorkers},
[]string{skey, base.AllServers, wkey, base.AllWorkers},
args...).Err()
}
// KEYS[1] -> asynq:ps
// KEYS[2] -> asynq:ps:<host:pid>
// KEYS[1] -> asynq:servers
// KEYS[2] -> asynq:servers:<host:pid:sid>
// KEYS[3] -> asynq:workers
// KEYS[4] -> asynq:workers<host:pid>
// KEYS[4] -> asynq:workers<host:pid:sid>
var clearProcessInfoCmd = redis.NewScript(`
redis.call("ZREM", KEYS[1], KEYS[2])
redis.call("DEL", KEYS[2])
@@ -522,12 +522,12 @@ return redis.status_reply("OK")`)
// ClearServerState deletes server state data from redis.
func (r *RDB) ClearServerState(ss *base.ServerState) error {
info := ss.Get()
host, pid := info.Host, info.PID
pkey := base.ProcessInfoKey(host, pid)
wkey := base.WorkersKey(host, pid)
info := ss.GetInfo()
host, pid, id := info.Host, info.PID, info.ServerID
skey := base.ServerInfoKey(host, pid, id)
wkey := base.WorkersKey(host, pid, id)
return clearProcessInfoCmd.Run(r.client,
[]string{base.AllProcesses, pkey, base.AllWorkers, wkey}).Err()
[]string{base.AllServers, skey, base.AllWorkers, wkey}).Err()
}
// CancelationPubSub returns a pubsub for cancelation messages.

View File

@@ -862,60 +862,61 @@ func TestCheckAndEnqueue(t *testing.T) {
}
}
func TestWriteProcessState(t *testing.T) {
func TestWriteServerState(t *testing.T) {
r := setup(t)
host, pid := "localhost", 98765
queues := map[string]int{"default": 2, "email": 5, "low": 1}
started := time.Now()
ps := base.NewProcessState(host, pid, 10, queues, false)
ps.SetStarted(started)
ps.SetStatus(base.StatusRunning)
ss := base.NewServerState("localhost", 4242, 10, queues, false)
ss.SetStarted(started)
ss.SetStatus(base.StatusRunning)
ttl := 5 * time.Second
h.FlushDB(t, r.client)
err := r.WriteProcessState(ps, ttl)
err := r.WriteServerState(ss, ttl)
if err != nil {
t.Errorf("r.WriteProcessState returned an error: %v", err)
t.Errorf("r.WriteServerState returned an error: %v", err)
}
// Check ProcessInfo was written correctly
pkey := base.ProcessInfoKey(host, pid)
data := r.client.Get(pkey).Val()
var got base.ProcessInfo
// Check ServerInfo was written correctly
info := ss.GetInfo()
skey := base.ServerInfoKey(info.Host, info.PID, info.ServerID)
data := r.client.Get(skey).Val()
var got base.ServerInfo
err = json.Unmarshal([]byte(data), &got)
if err != nil {
t.Fatalf("could not decode json: %v", err)
}
want := base.ProcessInfo{
Host: "localhost",
PID: 98765,
Concurrency: 10,
want := base.ServerInfo{
Host: info.Host,
PID: info.PID,
Concurrency: info.Concurrency,
Queues: map[string]int{"default": 2, "email": 5, "low": 1},
StrictPriority: false,
Status: "running",
Started: started,
ActiveWorkerCount: 0,
}
if diff := cmp.Diff(want, got); diff != "" {
t.Errorf("persisted ProcessInfo was %v, want %v; (-want,+got)\n%s",
ignoreOpt := cmpopts.IgnoreFields(base.ServerInfo{}, "ServerID")
if diff := cmp.Diff(want, got, ignoreOpt); diff != "" {
t.Errorf("persisted ServerInfo was %v, want %v; (-want,+got)\n%s",
got, want, diff)
}
// Check ProcessInfo TTL was set correctly
gotTTL := r.client.TTL(pkey).Val()
// Check ServerInfo TTL was set correctly
gotTTL := r.client.TTL(skey).Val()
if !cmp.Equal(ttl.Seconds(), gotTTL.Seconds(), cmpopts.EquateApprox(0, 1)) {
t.Errorf("TTL of %q was %v, want %v", pkey, gotTTL, ttl)
t.Errorf("TTL of %q was %v, want %v", skey, gotTTL, ttl)
}
// Check ProcessInfo key was added to the set correctly
gotProcesses := r.client.ZRange(base.AllProcesses, 0, -1).Val()
wantProcesses := []string{pkey}
// Check ServerInfo key was added to the set correctly
gotProcesses := r.client.ZRange(base.AllServers, 0, -1).Val()
wantProcesses := []string{skey}
if diff := cmp.Diff(wantProcesses, gotProcesses); diff != "" {
t.Errorf("%q contained %v, want %v", base.AllProcesses, gotProcesses, wantProcesses)
t.Errorf("%q contained %v, want %v", base.AllServers, gotProcesses, wantProcesses)
}
// Check WorkersInfo was written correctly
wkey := base.WorkersKey(host, pid)
wkey := base.WorkersKey(info.Host, info.PID, info.ServerID)
workerExist := r.client.Exists(wkey).Val()
if workerExist != 0 {
t.Errorf("%q key exists", wkey)
@@ -928,9 +929,8 @@ func TestWriteProcessState(t *testing.T) {
}
}
func TestWriteProcessStateWithWorkers(t *testing.T) {
func TestWriteServerStateWithWorkers(t *testing.T) {
r := setup(t)
host, pid := "localhost", 98765
queues := map[string]int{"default": 2, "email": 5, "low": 1}
concurrency := 10
@@ -939,31 +939,33 @@ func TestWriteProcessStateWithWorkers(t *testing.T) {
w2Started := time.Now().Add(-time.Second)
msg1 := h.NewTaskMessage("send_email", map[string]interface{}{"user_id": "123"})
msg2 := h.NewTaskMessage("gen_thumbnail", map[string]interface{}{"path": "some/path/to/imgfile"})
ps := base.NewProcessState(host, pid, concurrency, queues, false)
ps.SetStarted(started)
ps.SetStatus(base.StatusRunning)
ps.AddWorkerStats(msg1, w1Started)
ps.AddWorkerStats(msg2, w2Started)
ss := base.NewServerState("127.0.01", 4242, concurrency, queues, false)
ss.SetStarted(started)
ss.SetStatus(base.StatusRunning)
ss.AddWorkerStats(msg1, w1Started)
ss.AddWorkerStats(msg2, w2Started)
ttl := 5 * time.Second
h.FlushDB(t, r.client)
err := r.WriteProcessState(ps, ttl)
err := r.WriteServerState(ss, ttl)
if err != nil {
t.Errorf("r.WriteProcessState returned an error: %v", err)
t.Errorf("r.WriteServerState returned an error: %v", err)
}
// Check ProcessInfo was written correctly
pkey := base.ProcessInfoKey(host, pid)
data := r.client.Get(pkey).Val()
var got base.ProcessInfo
// Check ServerInfo was written correctly
info := ss.GetInfo()
skey := base.ServerInfoKey(info.Host, info.PID, info.ServerID)
data := r.client.Get(skey).Val()
var got base.ServerInfo
err = json.Unmarshal([]byte(data), &got)
if err != nil {
t.Fatalf("could not decode json: %v", err)
}
want := base.ProcessInfo{
Host: host,
PID: pid,
want := base.ServerInfo{
Host: info.Host,
PID: info.PID,
ServerID: info.ServerID,
Concurrency: concurrency,
Queues: queues,
StrictPriority: false,
@@ -972,23 +974,23 @@ func TestWriteProcessStateWithWorkers(t *testing.T) {
ActiveWorkerCount: 2,
}
if diff := cmp.Diff(want, got); diff != "" {
t.Errorf("persisted ProcessInfo was %v, want %v; (-want,+got)\n%s",
t.Errorf("persisted ServerInfo was %v, want %v; (-want,+got)\n%s",
got, want, diff)
}
// Check ProcessInfo TTL was set correctly
gotTTL := r.client.TTL(pkey).Val()
// Check ServerInfo TTL was set correctly
gotTTL := r.client.TTL(skey).Val()
if !cmp.Equal(ttl.Seconds(), gotTTL.Seconds(), cmpopts.EquateApprox(0, 1)) {
t.Errorf("TTL of %q was %v, want %v", pkey, gotTTL, ttl)
t.Errorf("TTL of %q was %v, want %v", skey, gotTTL, ttl)
}
// Check ProcessInfo key was added to the set correctly
gotProcesses := r.client.ZRange(base.AllProcesses, 0, -1).Val()
wantProcesses := []string{pkey}
// Check ServerInfo key was added to the set correctly
gotProcesses := r.client.ZRange(base.AllServers, 0, -1).Val()
wantProcesses := []string{skey}
if diff := cmp.Diff(wantProcesses, gotProcesses); diff != "" {
t.Errorf("%q contained %v, want %v", base.AllProcesses, gotProcesses, wantProcesses)
t.Errorf("%q contained %v, want %v", base.AllServers, gotProcesses, wantProcesses)
}
// Check WorkersInfo was written correctly
wkey := base.WorkersKey(host, pid)
wkey := base.WorkersKey(info.Host, info.PID, info.ServerID)
wdata := r.client.HGetAll(wkey).Val()
if len(wdata) != 2 {
t.Fatalf("HGETALL %q returned a hash of size %d, want 2", wkey, len(wdata))
@@ -1003,8 +1005,8 @@ func TestWriteProcessStateWithWorkers(t *testing.T) {
}
wantWorkers := map[string]*base.WorkerInfo{
msg1.ID.String(): {
Host: host,
PID: pid,
Host: info.Host,
PID: info.PID,
ID: msg1.ID,
Type: msg1.Type,
Queue: msg1.Queue,
@@ -1012,8 +1014,8 @@ func TestWriteProcessStateWithWorkers(t *testing.T) {
Started: w1Started,
},
msg2.ID.String(): {
Host: host,
PID: pid,
Host: info.Host,
PID: info.PID,
ID: msg2.ID,
Type: msg2.Type,
Queue: msg2.Queue,
@@ -1039,27 +1041,28 @@ func TestWriteProcessStateWithWorkers(t *testing.T) {
}
}
func TestClearProcessState(t *testing.T) {
func TestClearServerState(t *testing.T) {
r := setup(t)
host, pid := "127.0.0.1", 1234
ss := base.NewServerState("127.0.01", 4242, 10, map[string]int{"default": 1}, false)
info := ss.GetInfo()
h.FlushDB(t, r.client)
pkey := base.ProcessInfoKey(host, pid)
wkey := base.WorkersKey(host, pid)
otherPKey := base.ProcessInfoKey("otherhost", 12345)
otherWKey := base.WorkersKey("otherhost", 12345)
skey := base.ServerInfoKey(info.Host, info.PID, info.ServerID)
wkey := base.WorkersKey(info.Host, info.PID, info.ServerID)
otherSKey := base.ServerInfoKey("otherhost", 12345, "server98")
otherWKey := base.WorkersKey("otherhost", 12345, "server98")
// Populate the keys.
if err := r.client.Set(pkey, "process-info", 0).Err(); err != nil {
if err := r.client.Set(skey, "process-info", 0).Err(); err != nil {
t.Fatal(err)
}
if err := r.client.HSet(wkey, "worker-key", "worker-info").Err(); err != nil {
t.Fatal(err)
}
if err := r.client.ZAdd(base.AllProcesses, &redis.Z{Member: pkey}).Err(); err != nil {
if err := r.client.ZAdd(base.AllServers, &redis.Z{Member: skey}).Err(); err != nil {
t.Fatal(err)
}
if err := r.client.ZAdd(base.AllProcesses, &redis.Z{Member: otherPKey}).Err(); err != nil {
if err := r.client.ZAdd(base.AllServers, &redis.Z{Member: otherSKey}).Err(); err != nil {
t.Fatal(err)
}
if err := r.client.ZAdd(base.AllWorkers, &redis.Z{Member: wkey}).Err(); err != nil {
@@ -1069,24 +1072,22 @@ func TestClearProcessState(t *testing.T) {
t.Fatal(err)
}
ps := base.NewProcessState(host, pid, 10, map[string]int{"default": 1}, false)
err := r.ClearProcessState(ps)
err := r.ClearServerState(ss)
if err != nil {
t.Fatalf("(*RDB).ClearProcessState failed: %v", err)
t.Fatalf("(*RDB).ClearServerState failed: %v", err)
}
// Check all keys are cleared
if r.client.Exists(pkey).Val() != 0 {
t.Errorf("Redis key %q exists", pkey)
if r.client.Exists(skey).Val() != 0 {
t.Errorf("Redis key %q exists", skey)
}
if r.client.Exists(wkey).Val() != 0 {
t.Errorf("Redis key %q exists", wkey)
}
gotProcessKeys := r.client.ZRange(base.AllProcesses, 0, -1).Val()
wantProcessKeys := []string{otherPKey}
gotProcessKeys := r.client.ZRange(base.AllServers, 0, -1).Val()
wantProcessKeys := []string{otherSKey}
if diff := cmp.Diff(wantProcessKeys, gotProcessKeys); diff != "" {
t.Errorf("%q contained %v, want %v", base.AllProcesses, gotProcessKeys, wantProcessKeys)
t.Errorf("%q contained %v, want %v", base.AllServers, gotProcessKeys, wantProcessKeys)
}
gotWorkerKeys := r.client.ZRange(base.AllWorkers, 0, -1).Val()
wantWorkerKeys := []string{otherWKey}