mirror of
https://github.com/hibiken/asynq.git
synced 2024-12-25 23:32:17 +08:00
Refactor redis keys and store messages in protobuf
Changes: - Task messages are stored under "asynq:{<qname>}:t:<task_id>" key in redis, value is a HASH type and message are stored under "msg" key in the hash. The hash also stores "deadline", "timeout". - Redis LIST and ZSET stores task message IDs - Task messages are serialized using protocol buffer
This commit is contained in:
parent
2516c4baba
commit
7af3981929
3
.gitignore
vendored
3
.gitignore
vendored
@ -19,3 +19,6 @@
|
|||||||
|
|
||||||
# Ignore asynq config file
|
# Ignore asynq config file
|
||||||
.asynq.*
|
.asynq.*
|
||||||
|
|
||||||
|
# Ignore editor config files
|
||||||
|
.vscode
|
@ -7,6 +7,11 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
|||||||
|
|
||||||
## [Unreleased]
|
## [Unreleased]
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- Requires redis v4.0+ for multiple field/value pair support
|
||||||
|
- Renamed pending key (TODO: need migration script
|
||||||
|
|
||||||
## [0.17.2] - 2021-06-06
|
## [0.17.2] - 2021-06-06
|
||||||
|
|
||||||
### Fixed
|
### Fixed
|
||||||
|
7
Makefile
Normal file
7
Makefile
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
ROOT_DIR:=$(shell dirname $(realpath $(firstword $(MAKEFILE_LIST))))
|
||||||
|
|
||||||
|
proto: internal/proto/asynq.proto
|
||||||
|
protoc -I=$(ROOT_DIR)/internal/proto \
|
||||||
|
--go_out=$(ROOT_DIR)/internal/proto \
|
||||||
|
--go_opt=module=github.com/hibiken/asynq/internal/proto \
|
||||||
|
$(ROOT_DIR)/internal/proto/asynq.proto
|
@ -120,7 +120,7 @@ func TestClientEnqueueWithProcessAtOption(t *testing.T) {
|
|||||||
for qname, want := range tc.wantPending {
|
for qname, want := range tc.wantPending {
|
||||||
gotPending := h.GetPendingMessages(t, r, qname)
|
gotPending := h.GetPendingMessages(t, r, qname)
|
||||||
if diff := cmp.Diff(want, gotPending, h.IgnoreIDOpt, cmpopts.EquateEmpty()); diff != "" {
|
if diff := cmp.Diff(want, gotPending, h.IgnoreIDOpt, cmpopts.EquateEmpty()); diff != "" {
|
||||||
t.Errorf("%s;\nmismatch found in %q; (-want,+got)\n%s", tc.desc, base.QueueKey(qname), diff)
|
t.Errorf("%s;\nmismatch found in %q; (-want,+got)\n%s", tc.desc, base.PendingKey(qname), diff)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for qname, want := range tc.wantScheduled {
|
for qname, want := range tc.wantScheduled {
|
||||||
@ -379,7 +379,7 @@ func TestClientEnqueue(t *testing.T) {
|
|||||||
for qname, want := range tc.wantPending {
|
for qname, want := range tc.wantPending {
|
||||||
got := h.GetPendingMessages(t, r, qname)
|
got := h.GetPendingMessages(t, r, qname)
|
||||||
if diff := cmp.Diff(want, got, h.IgnoreIDOpt); diff != "" {
|
if diff := cmp.Diff(want, got, h.IgnoreIDOpt); diff != "" {
|
||||||
t.Errorf("%s;\nmismatch found in %q; (-want,+got)\n%s", tc.desc, base.QueueKey(qname), diff)
|
t.Errorf("%s;\nmismatch found in %q; (-want,+got)\n%s", tc.desc, base.PendingKey(qname), diff)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -484,7 +484,7 @@ func TestClientEnqueueWithProcessInOption(t *testing.T) {
|
|||||||
for qname, want := range tc.wantPending {
|
for qname, want := range tc.wantPending {
|
||||||
gotPending := h.GetPendingMessages(t, r, qname)
|
gotPending := h.GetPendingMessages(t, r, qname)
|
||||||
if diff := cmp.Diff(want, gotPending, h.IgnoreIDOpt, cmpopts.EquateEmpty()); diff != "" {
|
if diff := cmp.Diff(want, gotPending, h.IgnoreIDOpt, cmpopts.EquateEmpty()); diff != "" {
|
||||||
t.Errorf("%s;\nmismatch found in %q; (-want,+got)\n%s", tc.desc, base.QueueKey(qname), diff)
|
t.Errorf("%s;\nmismatch found in %q; (-want,+got)\n%s", tc.desc, base.PendingKey(qname), diff)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for qname, want := range tc.wantScheduled {
|
for qname, want := range tc.wantScheduled {
|
||||||
|
@ -69,7 +69,7 @@ func (f *forwarder) start(wg *sync.WaitGroup) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (f *forwarder) exec() {
|
func (f *forwarder) exec() {
|
||||||
if err := f.broker.CheckAndEnqueue(f.queues...); err != nil {
|
if err := f.broker.ForwardIfReady(f.queues...); err != nil {
|
||||||
f.logger.Errorf("Could not enqueue scheduled tasks: %v", err)
|
f.logger.Errorf("Could not enqueue scheduled tasks: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -130,7 +130,7 @@ func TestForwarder(t *testing.T) {
|
|||||||
for qname, want := range tc.wantPending {
|
for qname, want := range tc.wantPending {
|
||||||
gotPending := h.GetPendingMessages(t, r, qname)
|
gotPending := h.GetPendingMessages(t, r, qname)
|
||||||
if diff := cmp.Diff(want, gotPending, h.SortMsgOpt); diff != "" {
|
if diff := cmp.Diff(want, gotPending, h.SortMsgOpt); diff != "" {
|
||||||
t.Errorf("mismatch found in %q after running forwarder: (-want, +got)\n%s", base.QueueKey(qname), diff)
|
t.Errorf("mismatch found in %q after running forwarder: (-want, +got)\n%s", base.PendingKey(qname), diff)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
6
go.mod
6
go.mod
@ -4,12 +4,14 @@ go 1.13
|
|||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/go-redis/redis/v7 v7.4.0
|
github.com/go-redis/redis/v7 v7.4.0
|
||||||
github.com/google/go-cmp v0.4.0
|
github.com/golang/protobuf v1.4.1
|
||||||
github.com/google/uuid v1.1.1
|
github.com/google/go-cmp v0.5.0
|
||||||
|
github.com/google/uuid v1.2.0
|
||||||
github.com/robfig/cron/v3 v3.0.1
|
github.com/robfig/cron/v3 v3.0.1
|
||||||
github.com/spf13/cast v1.3.1
|
github.com/spf13/cast v1.3.1
|
||||||
go.uber.org/goleak v0.10.0
|
go.uber.org/goleak v0.10.0
|
||||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e
|
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e
|
||||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4
|
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4
|
||||||
|
google.golang.org/protobuf v1.25.0
|
||||||
gopkg.in/yaml.v2 v2.2.7 // indirect
|
gopkg.in/yaml.v2 v2.2.7 // indirect
|
||||||
)
|
)
|
||||||
|
58
go.sum
58
go.sum
@ -1,18 +1,40 @@
|
|||||||
|
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||||
|
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||||
|
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||||
|
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||||
|
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||||
github.com/go-redis/redis/v7 v7.2.0 h1:CrCexy/jYWZjW0AyVoHlcJUeZN19VWlbepTh1Vq6dJs=
|
github.com/go-redis/redis/v7 v7.2.0 h1:CrCexy/jYWZjW0AyVoHlcJUeZN19VWlbepTh1Vq6dJs=
|
||||||
github.com/go-redis/redis/v7 v7.2.0/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRfnwdHj/Dcg=
|
github.com/go-redis/redis/v7 v7.2.0/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRfnwdHj/Dcg=
|
||||||
github.com/go-redis/redis/v7 v7.4.0 h1:7obg6wUoj05T0EpY0o8B59S9w5yeMWql7sw2kwNW1x4=
|
github.com/go-redis/redis/v7 v7.4.0 h1:7obg6wUoj05T0EpY0o8B59S9w5yeMWql7sw2kwNW1x4=
|
||||||
github.com/go-redis/redis/v7 v7.4.0/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRfnwdHj/Dcg=
|
github.com/go-redis/redis/v7 v7.4.0/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRfnwdHj/Dcg=
|
||||||
|
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||||
|
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
||||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||||
|
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||||
|
github.com/golang/protobuf v1.4.1 h1:ZFgWrT+bLgsYPirOnRfKLYJLvssAegOj/hgyMFdJZe0=
|
||||||
|
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||||
|
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||||
|
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
|
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
|
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
|
||||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w=
|
||||||
|
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
|
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
|
||||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
|
github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs=
|
||||||
|
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
||||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||||
@ -27,6 +49,7 @@ github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME=
|
|||||||
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
|
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
|
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
|
||||||
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
|
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
|
||||||
github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng=
|
github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng=
|
||||||
@ -36,11 +59,23 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf
|
|||||||
go.uber.org/goleak v0.10.0 h1:G3eWbSNIskeRqtsN/1uI5B+eP73y3JUuBsv9AZjehb4=
|
go.uber.org/goleak v0.10.0 h1:G3eWbSNIskeRqtsN/1uI5B+eP73y3JUuBsv9AZjehb4=
|
||||||
go.uber.org/goleak v0.10.0/go.mod h1:VCZuO8V8mFPlL0F5J5GK1rtHV3DrFcQ1R8ryq7FK0aI=
|
go.uber.org/goleak v0.10.0/go.mod h1:VCZuO8V8mFPlL0F5J5GK1rtHV3DrFcQ1R8ryq7FK0aI=
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
|
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
|
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||||
|
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||||
|
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||||
|
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA=
|
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA=
|
||||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
golang.org/x/net v0.0.0-20190923162816-aa69164e4478 h1:l5EDrHhldLYb3ZRHDUhXF7Om7MvYXnkV9/iQNo1lX6g=
|
golang.org/x/net v0.0.0-20190923162816-aa69164e4478 h1:l5EDrHhldLYb3ZRHDUhXF7Om7MvYXnkV9/iQNo1lX6g=
|
||||||
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e h1:o3PsSEY8E4eXWkXrIP9YJALUkVZqzHJT5DOasTyn8Vs=
|
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e h1:o3PsSEY8E4eXWkXrIP9YJALUkVZqzHJT5DOasTyn8Vs=
|
||||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
@ -54,8 +89,29 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
|||||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
|
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
|
||||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||||
|
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||||
|
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||||
|
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
|
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||||
|
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||||
|
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||||
|
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||||
|
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||||
|
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||||
|
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||||
|
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||||
|
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||||
|
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||||
|
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||||
|
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||||
|
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||||
|
google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c=
|
||||||
|
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
|
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
|
||||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
@ -68,3 +124,5 @@ gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
|||||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo=
|
gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo=
|
||||||
gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
|
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
|
@ -548,11 +548,12 @@ func (i *Inspector) DeleteAllArchivedTasks(qname string) (int, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// DeleteTaskByKey deletes a task with the given key from the given queue.
|
// DeleteTaskByKey deletes a task with the given key from the given queue.
|
||||||
|
// TODO: We don't need score any more. Update this to delete task by ID
|
||||||
func (i *Inspector) DeleteTaskByKey(qname, key string) error {
|
func (i *Inspector) DeleteTaskByKey(qname, key string) error {
|
||||||
if err := base.ValidateQueueName(qname); err != nil {
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
prefix, id, score, err := parseTaskKey(key)
|
prefix, id, _, err := parseTaskKey(key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -560,11 +561,11 @@ func (i *Inspector) DeleteTaskByKey(qname, key string) error {
|
|||||||
case keyPrefixPending:
|
case keyPrefixPending:
|
||||||
return i.rdb.DeletePendingTask(qname, id)
|
return i.rdb.DeletePendingTask(qname, id)
|
||||||
case keyPrefixScheduled:
|
case keyPrefixScheduled:
|
||||||
return i.rdb.DeleteScheduledTask(qname, id, score)
|
return i.rdb.DeleteScheduledTask(qname, id)
|
||||||
case keyPrefixRetry:
|
case keyPrefixRetry:
|
||||||
return i.rdb.DeleteRetryTask(qname, id, score)
|
return i.rdb.DeleteRetryTask(qname, id)
|
||||||
case keyPrefixArchived:
|
case keyPrefixArchived:
|
||||||
return i.rdb.DeleteArchivedTask(qname, id, score)
|
return i.rdb.DeleteArchivedTask(qname, id)
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("invalid key")
|
return fmt.Errorf("invalid key")
|
||||||
}
|
}
|
||||||
@ -601,21 +602,22 @@ func (i *Inspector) RunAllArchivedTasks(qname string) (int, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// RunTaskByKey transition a task to pending state given task key and queue name.
|
// RunTaskByKey transition a task to pending state given task key and queue name.
|
||||||
|
// TODO: Update this to run task by ID.
|
||||||
func (i *Inspector) RunTaskByKey(qname, key string) error {
|
func (i *Inspector) RunTaskByKey(qname, key string) error {
|
||||||
if err := base.ValidateQueueName(qname); err != nil {
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
prefix, id, score, err := parseTaskKey(key)
|
prefix, id, _, err := parseTaskKey(key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
switch prefix {
|
switch prefix {
|
||||||
case keyPrefixScheduled:
|
case keyPrefixScheduled:
|
||||||
return i.rdb.RunScheduledTask(qname, id, score)
|
return i.rdb.RunScheduledTask(qname, id)
|
||||||
case keyPrefixRetry:
|
case keyPrefixRetry:
|
||||||
return i.rdb.RunRetryTask(qname, id, score)
|
return i.rdb.RunRetryTask(qname, id)
|
||||||
case keyPrefixArchived:
|
case keyPrefixArchived:
|
||||||
return i.rdb.RunArchivedTask(qname, id, score)
|
return i.rdb.RunArchivedTask(qname, id)
|
||||||
case keyPrefixPending:
|
case keyPrefixPending:
|
||||||
return fmt.Errorf("task is already pending for run")
|
return fmt.Errorf("task is already pending for run")
|
||||||
default:
|
default:
|
||||||
@ -654,11 +656,12 @@ func (i *Inspector) ArchiveAllRetryTasks(qname string) (int, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ArchiveTaskByKey archives a task with the given key in the given queue.
|
// ArchiveTaskByKey archives a task with the given key in the given queue.
|
||||||
|
// TODO: Update this to Archive task by ID.
|
||||||
func (i *Inspector) ArchiveTaskByKey(qname, key string) error {
|
func (i *Inspector) ArchiveTaskByKey(qname, key string) error {
|
||||||
if err := base.ValidateQueueName(qname); err != nil {
|
if err := base.ValidateQueueName(qname); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
prefix, id, score, err := parseTaskKey(key)
|
prefix, id, _, err := parseTaskKey(key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -666,9 +669,9 @@ func (i *Inspector) ArchiveTaskByKey(qname, key string) error {
|
|||||||
case keyPrefixPending:
|
case keyPrefixPending:
|
||||||
return i.rdb.ArchivePendingTask(qname, id)
|
return i.rdb.ArchivePendingTask(qname, id)
|
||||||
case keyPrefixScheduled:
|
case keyPrefixScheduled:
|
||||||
return i.rdb.ArchiveScheduledTask(qname, id, score)
|
return i.rdb.ArchiveScheduledTask(qname, id)
|
||||||
case keyPrefixRetry:
|
case keyPrefixRetry:
|
||||||
return i.rdb.ArchiveRetryTask(qname, id, score)
|
return i.rdb.ArchiveRetryTask(qname, id)
|
||||||
case keyPrefixArchived:
|
case keyPrefixArchived:
|
||||||
return fmt.Errorf("task is already archived")
|
return fmt.Errorf("task is already archived")
|
||||||
default:
|
default:
|
||||||
|
@ -518,8 +518,8 @@ func TestInspectorListPendingTasks(t *testing.T) {
|
|||||||
defer r.Close()
|
defer r.Close()
|
||||||
m1 := h.NewTaskMessage("task1", nil)
|
m1 := h.NewTaskMessage("task1", nil)
|
||||||
m2 := h.NewTaskMessage("task2", nil)
|
m2 := h.NewTaskMessage("task2", nil)
|
||||||
m3 := h.NewTaskMessage("task3", nil)
|
m3 := h.NewTaskMessageWithQueue("task3", nil, "critical")
|
||||||
m4 := h.NewTaskMessage("task4", nil)
|
m4 := h.NewTaskMessageWithQueue("task4", nil, "low")
|
||||||
|
|
||||||
inspector := New(getRedisConnOpt(t))
|
inspector := New(getRedisConnOpt(t))
|
||||||
|
|
||||||
@ -587,8 +587,8 @@ func TestInspectorListActiveTasks(t *testing.T) {
|
|||||||
defer r.Close()
|
defer r.Close()
|
||||||
m1 := h.NewTaskMessage("task1", nil)
|
m1 := h.NewTaskMessage("task1", nil)
|
||||||
m2 := h.NewTaskMessage("task2", nil)
|
m2 := h.NewTaskMessage("task2", nil)
|
||||||
m3 := h.NewTaskMessage("task3", nil)
|
m3 := h.NewTaskMessageWithQueue("task3", nil, "custom")
|
||||||
m4 := h.NewTaskMessage("task4", nil)
|
m4 := h.NewTaskMessageWithQueue("task4", nil, "custom")
|
||||||
|
|
||||||
inspector := New(getRedisConnOpt(t))
|
inspector := New(getRedisConnOpt(t))
|
||||||
|
|
||||||
@ -1254,13 +1254,13 @@ func TestInspectorArchiveAllPendingTasks(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
pending: map[string][]*base.TaskMessage{
|
pending: map[string][]*base.TaskMessage{
|
||||||
"default": {m3, m4},
|
"default": {m3},
|
||||||
},
|
},
|
||||||
archived: map[string][]base.Z{
|
archived: map[string][]base.Z{
|
||||||
"default": {z1, z2},
|
"default": {z1, z2},
|
||||||
},
|
},
|
||||||
qname: "default",
|
qname: "default",
|
||||||
want: 2,
|
want: 1,
|
||||||
wantPending: map[string][]*base.TaskMessage{
|
wantPending: map[string][]*base.TaskMessage{
|
||||||
"default": {},
|
"default": {},
|
||||||
},
|
},
|
||||||
@ -1269,7 +1269,6 @@ func TestInspectorArchiveAllPendingTasks(t *testing.T) {
|
|||||||
z1,
|
z1,
|
||||||
z2,
|
z2,
|
||||||
base.Z{Message: m3, Score: now.Unix()},
|
base.Z{Message: m3, Score: now.Unix()},
|
||||||
base.Z{Message: m4, Score: now.Unix()},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -6,7 +6,6 @@
|
|||||||
package asynqtest
|
package asynqtest
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
|
||||||
"math"
|
"math"
|
||||||
"sort"
|
"sort"
|
||||||
"testing"
|
"testing"
|
||||||
@ -130,7 +129,7 @@ func TaskMessageWithError(t base.TaskMessage, errMsg string) *base.TaskMessage {
|
|||||||
// Calling test will fail if marshaling errors out.
|
// Calling test will fail if marshaling errors out.
|
||||||
func MustMarshal(tb testing.TB, msg *base.TaskMessage) string {
|
func MustMarshal(tb testing.TB, msg *base.TaskMessage) string {
|
||||||
tb.Helper()
|
tb.Helper()
|
||||||
data, err := json.Marshal(msg)
|
data, err := base.EncodeMessage(msg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
tb.Fatal(err)
|
tb.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -141,34 +140,11 @@ func MustMarshal(tb testing.TB, msg *base.TaskMessage) string {
|
|||||||
// Calling test will fail if unmarshaling errors out.
|
// Calling test will fail if unmarshaling errors out.
|
||||||
func MustUnmarshal(tb testing.TB, data string) *base.TaskMessage {
|
func MustUnmarshal(tb testing.TB, data string) *base.TaskMessage {
|
||||||
tb.Helper()
|
tb.Helper()
|
||||||
var msg base.TaskMessage
|
msg, err := base.DecodeMessage([]byte(data))
|
||||||
err := json.Unmarshal([]byte(data), &msg)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
tb.Fatal(err)
|
tb.Fatal(err)
|
||||||
}
|
}
|
||||||
return &msg
|
return msg
|
||||||
}
|
|
||||||
|
|
||||||
// MustMarshalSlice marshals a slice of task messages and return a slice of
|
|
||||||
// json strings. Calling test will fail if marshaling errors out.
|
|
||||||
func MustMarshalSlice(tb testing.TB, msgs []*base.TaskMessage) []string {
|
|
||||||
tb.Helper()
|
|
||||||
var data []string
|
|
||||||
for _, m := range msgs {
|
|
||||||
data = append(data, MustMarshal(tb, m))
|
|
||||||
}
|
|
||||||
return data
|
|
||||||
}
|
|
||||||
|
|
||||||
// MustUnmarshalSlice unmarshals a slice of strings into a slice of task message structs.
|
|
||||||
// Calling test will fail if marshaling errors out.
|
|
||||||
func MustUnmarshalSlice(tb testing.TB, data []string) []*base.TaskMessage {
|
|
||||||
tb.Helper()
|
|
||||||
var msgs []*base.TaskMessage
|
|
||||||
for _, s := range data {
|
|
||||||
msgs = append(msgs, MustUnmarshal(tb, s))
|
|
||||||
}
|
|
||||||
return msgs
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// FlushDB deletes all the keys of the currently selected DB.
|
// FlushDB deletes all the keys of the currently selected DB.
|
||||||
@ -196,7 +172,7 @@ func FlushDB(tb testing.TB, r redis.UniversalClient) {
|
|||||||
func SeedPendingQueue(tb testing.TB, r redis.UniversalClient, msgs []*base.TaskMessage, qname string) {
|
func SeedPendingQueue(tb testing.TB, r redis.UniversalClient, msgs []*base.TaskMessage, qname string) {
|
||||||
tb.Helper()
|
tb.Helper()
|
||||||
r.SAdd(base.AllQueues, qname)
|
r.SAdd(base.AllQueues, qname)
|
||||||
seedRedisList(tb, r, base.QueueKey(qname), msgs)
|
seedRedisList(tb, r, base.PendingKey(qname), msgs)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SeedActiveQueue initializes the active queue with the given messages.
|
// SeedActiveQueue initializes the active queue with the given messages.
|
||||||
@ -238,6 +214,7 @@ func SeedDeadlines(tb testing.TB, r redis.UniversalClient, entries []base.Z, qna
|
|||||||
//
|
//
|
||||||
// pending maps a queue name to a list of messages.
|
// pending maps a queue name to a list of messages.
|
||||||
func SeedAllPendingQueues(tb testing.TB, r redis.UniversalClient, pending map[string][]*base.TaskMessage) {
|
func SeedAllPendingQueues(tb testing.TB, r redis.UniversalClient, pending map[string][]*base.TaskMessage) {
|
||||||
|
tb.Helper()
|
||||||
for q, msgs := range pending {
|
for q, msgs := range pending {
|
||||||
SeedPendingQueue(tb, r, msgs, q)
|
SeedPendingQueue(tb, r, msgs, q)
|
||||||
}
|
}
|
||||||
@ -245,6 +222,7 @@ func SeedAllPendingQueues(tb testing.TB, r redis.UniversalClient, pending map[st
|
|||||||
|
|
||||||
// SeedAllActiveQueues initializes all of the specified active queues with the given messages.
|
// SeedAllActiveQueues initializes all of the specified active queues with the given messages.
|
||||||
func SeedAllActiveQueues(tb testing.TB, r redis.UniversalClient, active map[string][]*base.TaskMessage) {
|
func SeedAllActiveQueues(tb testing.TB, r redis.UniversalClient, active map[string][]*base.TaskMessage) {
|
||||||
|
tb.Helper()
|
||||||
for q, msgs := range active {
|
for q, msgs := range active {
|
||||||
SeedActiveQueue(tb, r, msgs, q)
|
SeedActiveQueue(tb, r, msgs, q)
|
||||||
}
|
}
|
||||||
@ -252,6 +230,7 @@ func SeedAllActiveQueues(tb testing.TB, r redis.UniversalClient, active map[stri
|
|||||||
|
|
||||||
// SeedAllScheduledQueues initializes all of the specified scheduled queues with the given entries.
|
// SeedAllScheduledQueues initializes all of the specified scheduled queues with the given entries.
|
||||||
func SeedAllScheduledQueues(tb testing.TB, r redis.UniversalClient, scheduled map[string][]base.Z) {
|
func SeedAllScheduledQueues(tb testing.TB, r redis.UniversalClient, scheduled map[string][]base.Z) {
|
||||||
|
tb.Helper()
|
||||||
for q, entries := range scheduled {
|
for q, entries := range scheduled {
|
||||||
SeedScheduledQueue(tb, r, entries, q)
|
SeedScheduledQueue(tb, r, entries, q)
|
||||||
}
|
}
|
||||||
@ -259,6 +238,7 @@ func SeedAllScheduledQueues(tb testing.TB, r redis.UniversalClient, scheduled ma
|
|||||||
|
|
||||||
// SeedAllRetryQueues initializes all of the specified retry queues with the given entries.
|
// SeedAllRetryQueues initializes all of the specified retry queues with the given entries.
|
||||||
func SeedAllRetryQueues(tb testing.TB, r redis.UniversalClient, retry map[string][]base.Z) {
|
func SeedAllRetryQueues(tb testing.TB, r redis.UniversalClient, retry map[string][]base.Z) {
|
||||||
|
tb.Helper()
|
||||||
for q, entries := range retry {
|
for q, entries := range retry {
|
||||||
SeedRetryQueue(tb, r, entries, q)
|
SeedRetryQueue(tb, r, entries, q)
|
||||||
}
|
}
|
||||||
@ -266,6 +246,7 @@ func SeedAllRetryQueues(tb testing.TB, r redis.UniversalClient, retry map[string
|
|||||||
|
|
||||||
// SeedAllArchivedQueues initializes all of the specified archived queues with the given entries.
|
// SeedAllArchivedQueues initializes all of the specified archived queues with the given entries.
|
||||||
func SeedAllArchivedQueues(tb testing.TB, r redis.UniversalClient, archived map[string][]base.Z) {
|
func SeedAllArchivedQueues(tb testing.TB, r redis.UniversalClient, archived map[string][]base.Z) {
|
||||||
|
tb.Helper()
|
||||||
for q, entries := range archived {
|
for q, entries := range archived {
|
||||||
SeedArchivedQueue(tb, r, entries, q)
|
SeedArchivedQueue(tb, r, entries, q)
|
||||||
}
|
}
|
||||||
@ -273,101 +254,138 @@ func SeedAllArchivedQueues(tb testing.TB, r redis.UniversalClient, archived map[
|
|||||||
|
|
||||||
// SeedAllDeadlines initializes all of the deadlines with the given entries.
|
// SeedAllDeadlines initializes all of the deadlines with the given entries.
|
||||||
func SeedAllDeadlines(tb testing.TB, r redis.UniversalClient, deadlines map[string][]base.Z) {
|
func SeedAllDeadlines(tb testing.TB, r redis.UniversalClient, deadlines map[string][]base.Z) {
|
||||||
|
tb.Helper()
|
||||||
for q, entries := range deadlines {
|
for q, entries := range deadlines {
|
||||||
SeedDeadlines(tb, r, entries, q)
|
SeedDeadlines(tb, r, entries, q)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func seedRedisList(tb testing.TB, c redis.UniversalClient, key string, msgs []*base.TaskMessage) {
|
func seedRedisList(tb testing.TB, c redis.UniversalClient, key string, msgs []*base.TaskMessage) {
|
||||||
data := MustMarshalSlice(tb, msgs)
|
tb.Helper()
|
||||||
for _, s := range data {
|
for _, msg := range msgs {
|
||||||
if err := c.LPush(key, s).Err(); err != nil {
|
encoded := MustMarshal(tb, msg)
|
||||||
|
if err := c.LPush(key, msg.ID.String()).Err(); err != nil {
|
||||||
|
tb.Fatal(err)
|
||||||
|
}
|
||||||
|
key := base.TaskKey(msg.Queue, msg.ID.String())
|
||||||
|
data := map[string]interface{}{
|
||||||
|
"msg": encoded,
|
||||||
|
"timeout": msg.Timeout,
|
||||||
|
"deadline": msg.Deadline,
|
||||||
|
}
|
||||||
|
if err := c.HSet(key, data).Err(); err != nil {
|
||||||
tb.Fatal(err)
|
tb.Fatal(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func seedRedisZSet(tb testing.TB, c redis.UniversalClient, key string, items []base.Z) {
|
func seedRedisZSet(tb testing.TB, c redis.UniversalClient, key string, items []base.Z) {
|
||||||
|
tb.Helper()
|
||||||
for _, item := range items {
|
for _, item := range items {
|
||||||
z := &redis.Z{Member: MustMarshal(tb, item.Message), Score: float64(item.Score)}
|
msg := item.Message
|
||||||
|
encoded := MustMarshal(tb, msg)
|
||||||
|
z := &redis.Z{Member: msg.ID.String(), Score: float64(item.Score)}
|
||||||
if err := c.ZAdd(key, z).Err(); err != nil {
|
if err := c.ZAdd(key, z).Err(); err != nil {
|
||||||
tb.Fatal(err)
|
tb.Fatal(err)
|
||||||
}
|
}
|
||||||
|
key := base.TaskKey(msg.Queue, msg.ID.String())
|
||||||
|
data := map[string]interface{}{
|
||||||
|
"msg": encoded,
|
||||||
|
"timeout": msg.Timeout,
|
||||||
|
"deadline": msg.Deadline,
|
||||||
|
}
|
||||||
|
if err := c.HSet(key, data).Err(); err != nil {
|
||||||
|
tb.Fatal(err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetPendingMessages returns all pending messages in the given queue.
|
// GetPendingMessages returns all pending messages in the given queue.
|
||||||
func GetPendingMessages(tb testing.TB, r redis.UniversalClient, qname string) []*base.TaskMessage {
|
func GetPendingMessages(tb testing.TB, r redis.UniversalClient, qname string) []*base.TaskMessage {
|
||||||
tb.Helper()
|
tb.Helper()
|
||||||
return getListMessages(tb, r, base.QueueKey(qname))
|
return getMessagesFromList(tb, r, qname, base.PendingKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetActiveMessages returns all active messages in the given queue.
|
// GetActiveMessages returns all active messages in the given queue.
|
||||||
func GetActiveMessages(tb testing.TB, r redis.UniversalClient, qname string) []*base.TaskMessage {
|
func GetActiveMessages(tb testing.TB, r redis.UniversalClient, qname string) []*base.TaskMessage {
|
||||||
tb.Helper()
|
tb.Helper()
|
||||||
return getListMessages(tb, r, base.ActiveKey(qname))
|
return getMessagesFromList(tb, r, qname, base.ActiveKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetScheduledMessages returns all scheduled task messages in the given queue.
|
// GetScheduledMessages returns all scheduled task messages in the given queue.
|
||||||
func GetScheduledMessages(tb testing.TB, r redis.UniversalClient, qname string) []*base.TaskMessage {
|
func GetScheduledMessages(tb testing.TB, r redis.UniversalClient, qname string) []*base.TaskMessage {
|
||||||
tb.Helper()
|
tb.Helper()
|
||||||
return getZSetMessages(tb, r, base.ScheduledKey(qname))
|
return getMessagesFromZSet(tb, r, qname, base.ScheduledKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetRetryMessages returns all retry messages in the given queue.
|
// GetRetryMessages returns all retry messages in the given queue.
|
||||||
func GetRetryMessages(tb testing.TB, r redis.UniversalClient, qname string) []*base.TaskMessage {
|
func GetRetryMessages(tb testing.TB, r redis.UniversalClient, qname string) []*base.TaskMessage {
|
||||||
tb.Helper()
|
tb.Helper()
|
||||||
return getZSetMessages(tb, r, base.RetryKey(qname))
|
return getMessagesFromZSet(tb, r, qname, base.RetryKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetArchivedMessages returns all archived messages in the given queue.
|
// GetArchivedMessages returns all archived messages in the given queue.
|
||||||
func GetArchivedMessages(tb testing.TB, r redis.UniversalClient, qname string) []*base.TaskMessage {
|
func GetArchivedMessages(tb testing.TB, r redis.UniversalClient, qname string) []*base.TaskMessage {
|
||||||
tb.Helper()
|
tb.Helper()
|
||||||
return getZSetMessages(tb, r, base.ArchivedKey(qname))
|
return getMessagesFromZSet(tb, r, qname, base.ArchivedKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetScheduledEntries returns all scheduled messages and its score in the given queue.
|
// GetScheduledEntries returns all scheduled messages and its score in the given queue.
|
||||||
func GetScheduledEntries(tb testing.TB, r redis.UniversalClient, qname string) []base.Z {
|
func GetScheduledEntries(tb testing.TB, r redis.UniversalClient, qname string) []base.Z {
|
||||||
tb.Helper()
|
tb.Helper()
|
||||||
return getZSetEntries(tb, r, base.ScheduledKey(qname))
|
return getMessagesFromZSetWithScores(tb, r, qname, base.ScheduledKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetRetryEntries returns all retry messages and its score in the given queue.
|
// GetRetryEntries returns all retry messages and its score in the given queue.
|
||||||
func GetRetryEntries(tb testing.TB, r redis.UniversalClient, qname string) []base.Z {
|
func GetRetryEntries(tb testing.TB, r redis.UniversalClient, qname string) []base.Z {
|
||||||
tb.Helper()
|
tb.Helper()
|
||||||
return getZSetEntries(tb, r, base.RetryKey(qname))
|
return getMessagesFromZSetWithScores(tb, r, qname, base.RetryKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetArchivedEntries returns all archived messages and its score in the given queue.
|
// GetArchivedEntries returns all archived messages and its score in the given queue.
|
||||||
func GetArchivedEntries(tb testing.TB, r redis.UniversalClient, qname string) []base.Z {
|
func GetArchivedEntries(tb testing.TB, r redis.UniversalClient, qname string) []base.Z {
|
||||||
tb.Helper()
|
tb.Helper()
|
||||||
return getZSetEntries(tb, r, base.ArchivedKey(qname))
|
return getMessagesFromZSetWithScores(tb, r, qname, base.ArchivedKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetDeadlinesEntries returns all task messages and its score in the deadlines set for the given queue.
|
// GetDeadlinesEntries returns all task messages and its score in the deadlines set for the given queue.
|
||||||
func GetDeadlinesEntries(tb testing.TB, r redis.UniversalClient, qname string) []base.Z {
|
func GetDeadlinesEntries(tb testing.TB, r redis.UniversalClient, qname string) []base.Z {
|
||||||
tb.Helper()
|
tb.Helper()
|
||||||
return getZSetEntries(tb, r, base.DeadlinesKey(qname))
|
return getMessagesFromZSetWithScores(tb, r, qname, base.DeadlinesKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
func getListMessages(tb testing.TB, r redis.UniversalClient, list string) []*base.TaskMessage {
|
// Retrieves all messages stored under `keyFn(qname)` key in redis list.
|
||||||
data := r.LRange(list, 0, -1).Val()
|
func getMessagesFromList(tb testing.TB, r redis.UniversalClient, qname string, keyFn func(qname string) string) []*base.TaskMessage {
|
||||||
return MustUnmarshalSlice(tb, data)
|
tb.Helper()
|
||||||
}
|
ids := r.LRange(keyFn(qname), 0, -1).Val()
|
||||||
|
var msgs []*base.TaskMessage
|
||||||
func getZSetMessages(tb testing.TB, r redis.UniversalClient, zset string) []*base.TaskMessage {
|
for _, id := range ids {
|
||||||
data := r.ZRange(zset, 0, -1).Val()
|
data := r.HGet(base.TaskKey(qname, id), "msg").Val()
|
||||||
return MustUnmarshalSlice(tb, data)
|
msgs = append(msgs, MustUnmarshal(tb, data))
|
||||||
}
|
|
||||||
|
|
||||||
func getZSetEntries(tb testing.TB, r redis.UniversalClient, zset string) []base.Z {
|
|
||||||
data := r.ZRangeWithScores(zset, 0, -1).Val()
|
|
||||||
var entries []base.Z
|
|
||||||
for _, z := range data {
|
|
||||||
entries = append(entries, base.Z{
|
|
||||||
Message: MustUnmarshal(tb, z.Member.(string)),
|
|
||||||
Score: int64(z.Score),
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
return entries
|
return msgs
|
||||||
|
}
|
||||||
|
|
||||||
|
// Retrieves all messages stored under `keyFn(qname)` key in redis zset (sorted-set).
|
||||||
|
func getMessagesFromZSet(tb testing.TB, r redis.UniversalClient, qname string, keyFn func(qname string) string) []*base.TaskMessage {
|
||||||
|
tb.Helper()
|
||||||
|
ids := r.ZRange(keyFn(qname), 0, -1).Val()
|
||||||
|
var msgs []*base.TaskMessage
|
||||||
|
for _, id := range ids {
|
||||||
|
msg := r.HGet(base.TaskKey(qname, id), "msg").Val()
|
||||||
|
msgs = append(msgs, MustUnmarshal(tb, msg))
|
||||||
|
}
|
||||||
|
return msgs
|
||||||
|
}
|
||||||
|
|
||||||
|
// Retrieves all messages along with their scores stored under `keyFn(qname)` key in redis zset (sorted-set).
|
||||||
|
func getMessagesFromZSetWithScores(tb testing.TB, r redis.UniversalClient, qname string, keyFn func(qname string) string) []base.Z {
|
||||||
|
tb.Helper()
|
||||||
|
zs := r.ZRangeWithScores(keyFn(qname), 0, -1).Val()
|
||||||
|
var res []base.Z
|
||||||
|
for _, z := range zs {
|
||||||
|
msg := r.HGet(base.TaskKey(qname, z.Member.(string)), "msg").Val()
|
||||||
|
res = append(res, base.Z{Message: MustUnmarshal(tb, msg), Score: int64(z.Score)})
|
||||||
|
}
|
||||||
|
return res
|
||||||
}
|
}
|
||||||
|
@ -6,6 +6,7 @@
|
|||||||
package base
|
package base
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
@ -15,7 +16,10 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-redis/redis/v7"
|
"github.com/go-redis/redis/v7"
|
||||||
|
"github.com/golang/protobuf/ptypes"
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
|
pb "github.com/hibiken/asynq/internal/proto"
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Version of asynq library and CLI.
|
// Version of asynq library and CLI.
|
||||||
@ -25,7 +29,7 @@ const Version = "0.17.2"
|
|||||||
const DefaultQueueName = "default"
|
const DefaultQueueName = "default"
|
||||||
|
|
||||||
// DefaultQueue is the redis key for the default queue.
|
// DefaultQueue is the redis key for the default queue.
|
||||||
var DefaultQueue = QueueKey(DefaultQueueName)
|
var DefaultQueue = PendingKey(DefaultQueueName)
|
||||||
|
|
||||||
// Global Redis keys.
|
// Global Redis keys.
|
||||||
const (
|
const (
|
||||||
@ -45,9 +49,19 @@ func ValidateQueueName(qname string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// QueueKey returns a redis key for the given queue name.
|
// TaskKeyPrefix returns a prefix for task key.
|
||||||
func QueueKey(qname string) string {
|
func TaskKeyPrefix(qname string) string {
|
||||||
return fmt.Sprintf("asynq:{%s}", qname)
|
return fmt.Sprintf("asynq:{%s}:t:", qname)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TaskKey returns a redis key for the given task message.
|
||||||
|
func TaskKey(qname, id string) string {
|
||||||
|
return fmt.Sprintf("%s%s", TaskKeyPrefix(qname), id)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PendingKey returns a redis key for the given queue name.
|
||||||
|
func PendingKey(qname string) string {
|
||||||
|
return fmt.Sprintf("asynq:{%s}:pending", qname)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ActiveKey returns a redis key for the active tasks.
|
// ActiveKey returns a redis key for the active tasks.
|
||||||
@ -184,24 +198,51 @@ type TaskMessage struct {
|
|||||||
UniqueKey string
|
UniqueKey string
|
||||||
}
|
}
|
||||||
|
|
||||||
// EncodeMessage marshals the given task message in JSON and returns an encoded string.
|
// EncodeMessage marshals the given task message and returns an encoded bytes.
|
||||||
func EncodeMessage(msg *TaskMessage) (string, error) {
|
func EncodeMessage(msg *TaskMessage) ([]byte, error) {
|
||||||
b, err := json.Marshal(msg)
|
if msg == nil {
|
||||||
if err != nil {
|
return nil, fmt.Errorf("cannot encode nil message")
|
||||||
return "", err
|
|
||||||
}
|
}
|
||||||
return string(b), nil
|
payload, err := json.Marshal(msg.Payload)
|
||||||
}
|
if err != nil {
|
||||||
|
|
||||||
// DecodeMessage unmarshals the given encoded string and returns a decoded task message.
|
|
||||||
func DecodeMessage(s string) (*TaskMessage, error) {
|
|
||||||
d := json.NewDecoder(strings.NewReader(s))
|
|
||||||
d.UseNumber()
|
|
||||||
var msg TaskMessage
|
|
||||||
if err := d.Decode(&msg); err != nil {
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return &msg, nil
|
return proto.Marshal(&pb.TaskMessage{
|
||||||
|
Type: msg.Type,
|
||||||
|
Payload: payload,
|
||||||
|
Id: msg.ID.String(),
|
||||||
|
Queue: msg.Queue,
|
||||||
|
Retry: int32(msg.Retry),
|
||||||
|
Retried: int32(msg.Retried),
|
||||||
|
ErrorMsg: msg.ErrorMsg,
|
||||||
|
Timeout: msg.Timeout,
|
||||||
|
Deadline: msg.Deadline,
|
||||||
|
UniqueKey: msg.UniqueKey,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeMessage unmarshals the given bytes and returns a decoded task message.
|
||||||
|
func DecodeMessage(data []byte) (*TaskMessage, error) {
|
||||||
|
var pbmsg pb.TaskMessage
|
||||||
|
if err := proto.Unmarshal(data, &pbmsg); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
payload, err := decodePayload(pbmsg.GetPayload())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &TaskMessage{
|
||||||
|
Type: pbmsg.GetType(),
|
||||||
|
Payload: payload,
|
||||||
|
ID: uuid.MustParse(pbmsg.GetId()),
|
||||||
|
Queue: pbmsg.GetQueue(),
|
||||||
|
Retry: int(pbmsg.GetRetry()),
|
||||||
|
Retried: int(pbmsg.GetRetried()),
|
||||||
|
ErrorMsg: pbmsg.GetErrorMsg(),
|
||||||
|
Timeout: pbmsg.GetTimeout(),
|
||||||
|
Deadline: pbmsg.GetDeadline(),
|
||||||
|
UniqueKey: pbmsg.GetUniqueKey(),
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Z represents sorted set member.
|
// Z represents sorted set member.
|
||||||
@ -282,6 +323,59 @@ type ServerInfo struct {
|
|||||||
ActiveWorkerCount int
|
ActiveWorkerCount int
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// EncodeServerInfo marshals the given ServerInfo and returns the encoded bytes.
|
||||||
|
func EncodeServerInfo(info *ServerInfo) ([]byte, error) {
|
||||||
|
if info == nil {
|
||||||
|
return nil, fmt.Errorf("cannot encode nil server info")
|
||||||
|
}
|
||||||
|
queues := make(map[string]int32)
|
||||||
|
for q, p := range info.Queues {
|
||||||
|
queues[q] = int32(p)
|
||||||
|
}
|
||||||
|
started, err := ptypes.TimestampProto(info.Started)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return proto.Marshal(&pb.ServerInfo{
|
||||||
|
Host: info.Host,
|
||||||
|
Pid: int32(info.PID),
|
||||||
|
ServerId: info.ServerID,
|
||||||
|
Concurrency: int32(info.Concurrency),
|
||||||
|
Queues: queues,
|
||||||
|
StrictPriority: info.StrictPriority,
|
||||||
|
Status: info.Status,
|
||||||
|
StartTime: started,
|
||||||
|
ActiveWorkerCount: int32(info.ActiveWorkerCount),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeServerInfo decodes the given bytes into ServerInfo.
|
||||||
|
func DecodeServerInfo(b []byte) (*ServerInfo, error) {
|
||||||
|
var pbmsg pb.ServerInfo
|
||||||
|
if err := proto.Unmarshal(b, &pbmsg); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
queues := make(map[string]int)
|
||||||
|
for q, p := range pbmsg.GetQueues() {
|
||||||
|
queues[q] = int(p)
|
||||||
|
}
|
||||||
|
startTime, err := ptypes.Timestamp(pbmsg.GetStartTime())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &ServerInfo{
|
||||||
|
Host: pbmsg.GetHost(),
|
||||||
|
PID: int(pbmsg.GetPid()),
|
||||||
|
ServerID: pbmsg.GetServerId(),
|
||||||
|
Concurrency: int(pbmsg.GetConcurrency()),
|
||||||
|
Queues: queues,
|
||||||
|
StrictPriority: pbmsg.GetStrictPriority(),
|
||||||
|
Status: pbmsg.GetStatus(),
|
||||||
|
Started: startTime,
|
||||||
|
ActiveWorkerCount: int(pbmsg.GetActiveWorkerCount()),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
// WorkerInfo holds information about a running worker.
|
// WorkerInfo holds information about a running worker.
|
||||||
type WorkerInfo struct {
|
type WorkerInfo struct {
|
||||||
Host string
|
Host string
|
||||||
@ -289,12 +383,83 @@ type WorkerInfo struct {
|
|||||||
ServerID string
|
ServerID string
|
||||||
ID string
|
ID string
|
||||||
Type string
|
Type string
|
||||||
Queue string
|
|
||||||
Payload map[string]interface{}
|
Payload map[string]interface{}
|
||||||
|
Queue string
|
||||||
Started time.Time
|
Started time.Time
|
||||||
Deadline time.Time
|
Deadline time.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// EncodeWorkerInfo marshals the given WorkerInfo and returns the encoded bytes.
|
||||||
|
func EncodeWorkerInfo(info *WorkerInfo) ([]byte, error) {
|
||||||
|
if info == nil {
|
||||||
|
return nil, fmt.Errorf("cannot encode nil worker info")
|
||||||
|
}
|
||||||
|
payload, err := json.Marshal(info.Payload)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
startTime, err := ptypes.TimestampProto(info.Started)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
deadline, err := ptypes.TimestampProto(info.Deadline)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return proto.Marshal(&pb.WorkerInfo{
|
||||||
|
Host: info.Host,
|
||||||
|
Pid: int32(info.PID),
|
||||||
|
ServerId: info.ServerID,
|
||||||
|
TaskId: info.ID,
|
||||||
|
TaskType: info.Type,
|
||||||
|
TaskPayload: payload,
|
||||||
|
Queue: info.Queue,
|
||||||
|
StartTime: startTime,
|
||||||
|
Deadline: deadline,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func decodePayload(b []byte) (map[string]interface{}, error) {
|
||||||
|
d := json.NewDecoder(bytes.NewReader(b))
|
||||||
|
d.UseNumber()
|
||||||
|
payload := make(map[string]interface{})
|
||||||
|
if err := d.Decode(&payload); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return payload, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeWorkerInfo decodes the given bytes into WorkerInfo.
|
||||||
|
func DecodeWorkerInfo(b []byte) (*WorkerInfo, error) {
|
||||||
|
var pbmsg pb.WorkerInfo
|
||||||
|
if err := proto.Unmarshal(b, &pbmsg); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
payload, err := decodePayload(pbmsg.GetTaskPayload())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
startTime, err := ptypes.Timestamp(pbmsg.GetStartTime())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
deadline, err := ptypes.Timestamp(pbmsg.GetDeadline())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &WorkerInfo{
|
||||||
|
Host: pbmsg.GetHost(),
|
||||||
|
PID: int(pbmsg.GetPid()),
|
||||||
|
ServerID: pbmsg.GetServerId(),
|
||||||
|
ID: pbmsg.GetTaskId(),
|
||||||
|
Type: pbmsg.GetTaskType(),
|
||||||
|
Payload: payload,
|
||||||
|
Queue: pbmsg.GetQueue(),
|
||||||
|
Started: startTime,
|
||||||
|
Deadline: deadline,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
// SchedulerEntry holds information about a periodic task registered with a scheduler.
|
// SchedulerEntry holds information about a periodic task registered with a scheduler.
|
||||||
type SchedulerEntry struct {
|
type SchedulerEntry struct {
|
||||||
// Identifier of this entry.
|
// Identifier of this entry.
|
||||||
@ -320,6 +485,63 @@ type SchedulerEntry struct {
|
|||||||
Prev time.Time
|
Prev time.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// EncodeSchedulerEntry marshals the given entry and returns an encoded bytes.
|
||||||
|
func EncodeSchedulerEntry(entry *SchedulerEntry) ([]byte, error) {
|
||||||
|
if entry == nil {
|
||||||
|
return nil, fmt.Errorf("cannot encode nil scheduler entry")
|
||||||
|
}
|
||||||
|
payload, err := json.Marshal(entry.Payload)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
next, err := ptypes.TimestampProto(entry.Next)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
prev, err := ptypes.TimestampProto(entry.Prev)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return proto.Marshal(&pb.SchedulerEntry{
|
||||||
|
Id: entry.ID,
|
||||||
|
Spec: entry.Spec,
|
||||||
|
TaskType: entry.Type,
|
||||||
|
TaskPayload: payload,
|
||||||
|
EnqueueOptions: entry.Opts,
|
||||||
|
NextEnqueueTime: next,
|
||||||
|
PrevEnqueueTime: prev,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeSchedulerEntry unmarshals the given bytes and returns a decoded SchedulerEntry.
|
||||||
|
func DecodeSchedulerEntry(b []byte) (*SchedulerEntry, error) {
|
||||||
|
var pbmsg pb.SchedulerEntry
|
||||||
|
if err := proto.Unmarshal(b, &pbmsg); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
payload, err := decodePayload(pbmsg.GetTaskPayload())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
next, err := ptypes.Timestamp(pbmsg.GetNextEnqueueTime())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
prev, err := ptypes.Timestamp(pbmsg.GetPrevEnqueueTime())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &SchedulerEntry{
|
||||||
|
ID: pbmsg.GetId(),
|
||||||
|
Spec: pbmsg.GetSpec(),
|
||||||
|
Type: pbmsg.GetTaskType(),
|
||||||
|
Payload: payload,
|
||||||
|
Opts: pbmsg.GetEnqueueOptions(),
|
||||||
|
Next: next,
|
||||||
|
Prev: prev,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
// SchedulerEnqueueEvent holds information about an enqueue event by a scheduler.
|
// SchedulerEnqueueEvent holds information about an enqueue event by a scheduler.
|
||||||
type SchedulerEnqueueEvent struct {
|
type SchedulerEnqueueEvent struct {
|
||||||
// ID of the task that was enqueued.
|
// ID of the task that was enqueued.
|
||||||
@ -329,6 +551,39 @@ type SchedulerEnqueueEvent struct {
|
|||||||
EnqueuedAt time.Time
|
EnqueuedAt time.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// EncodeSchedulerEnqueueEvent marshals the given event
|
||||||
|
// and returns an encoded bytes.
|
||||||
|
func EncodeSchedulerEnqueueEvent(event *SchedulerEnqueueEvent) ([]byte, error) {
|
||||||
|
if event == nil {
|
||||||
|
return nil, fmt.Errorf("cannot encode nil enqueue event")
|
||||||
|
}
|
||||||
|
enqueuedAt, err := ptypes.TimestampProto(event.EnqueuedAt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return proto.Marshal(&pb.SchedulerEnqueueEvent{
|
||||||
|
TaskId: event.TaskID,
|
||||||
|
EnqueueTime: enqueuedAt,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeSchedulerEnqueueEvent unmarshals the given bytes
|
||||||
|
// and returns a decoded SchedulerEnqueueEvent.
|
||||||
|
func DecodeSchedulerEnqueueEvent(b []byte) (*SchedulerEnqueueEvent, error) {
|
||||||
|
var pbmsg pb.SchedulerEnqueueEvent
|
||||||
|
if err := proto.Unmarshal(b, &pbmsg); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
enqueuedAt, err := ptypes.Timestamp(pbmsg.GetEnqueueTime())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &SchedulerEnqueueEvent{
|
||||||
|
TaskID: pbmsg.GetTaskId(),
|
||||||
|
EnqueuedAt: enqueuedAt,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Cancelations is a collection that holds cancel functions for all active tasks.
|
// Cancelations is a collection that holds cancel functions for all active tasks.
|
||||||
//
|
//
|
||||||
// Cancelations are safe for concurrent use by multipel goroutines.
|
// Cancelations are safe for concurrent use by multipel goroutines.
|
||||||
@ -380,7 +635,7 @@ type Broker interface {
|
|||||||
ScheduleUnique(msg *TaskMessage, processAt time.Time, ttl time.Duration) error
|
ScheduleUnique(msg *TaskMessage, processAt time.Time, ttl time.Duration) error
|
||||||
Retry(msg *TaskMessage, processAt time.Time, errMsg string) error
|
Retry(msg *TaskMessage, processAt time.Time, errMsg string) error
|
||||||
Archive(msg *TaskMessage, errMsg string) error
|
Archive(msg *TaskMessage, errMsg string) error
|
||||||
CheckAndEnqueue(qnames ...string) error
|
ForwardIfReady(qnames ...string) error
|
||||||
ListDeadlineExceeded(deadline time.Time, qnames ...string) ([]*TaskMessage, error)
|
ListDeadlineExceeded(deadline time.Time, qnames ...string) ([]*TaskMessage, error)
|
||||||
WriteServerState(info *ServerInfo, workers []*WorkerInfo, ttl time.Duration) error
|
WriteServerState(info *ServerInfo, workers []*WorkerInfo, ttl time.Duration) error
|
||||||
ClearServerState(host string, pid int, serverID string) error
|
ClearServerState(host string, pid int, serverID string) error
|
||||||
|
@ -7,6 +7,7 @@ package base
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
@ -15,17 +16,36 @@ import (
|
|||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func TestTaskKey(t *testing.T) {
|
||||||
|
id := uuid.NewString()
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
qname string
|
||||||
|
id string
|
||||||
|
want string
|
||||||
|
}{
|
||||||
|
{"default", id, fmt.Sprintf("asynq:{default}:t:%s", id)},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
got := TaskKey(tc.qname, tc.id)
|
||||||
|
if got != tc.want {
|
||||||
|
t.Errorf("TaskKey(%q, %s) = %q, want %q", tc.qname, tc.id, got, tc.want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestQueueKey(t *testing.T) {
|
func TestQueueKey(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
qname string
|
qname string
|
||||||
want string
|
want string
|
||||||
}{
|
}{
|
||||||
{"default", "asynq:{default}"},
|
{"default", "asynq:{default}:pending"},
|
||||||
{"custom", "asynq:{custom}"},
|
{"custom", "asynq:{custom}:pending"},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
got := QueueKey(tc.qname)
|
got := PendingKey(tc.qname)
|
||||||
if got != tc.want {
|
if got != tc.want {
|
||||||
t.Errorf("QueueKey(%q) = %q, want %q", tc.qname, got, tc.want)
|
t.Errorf("QueueKey(%q) = %q, want %q", tc.qname, got, tc.want)
|
||||||
}
|
}
|
||||||
@ -352,6 +372,145 @@ func TestMessageEncoding(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestServerInfoEncoding(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
info ServerInfo
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
info: ServerInfo{
|
||||||
|
Host: "127.0.0.1",
|
||||||
|
PID: 9876,
|
||||||
|
ServerID: "abc123",
|
||||||
|
Concurrency: 10,
|
||||||
|
Queues: map[string]int{"default": 1, "critical": 2},
|
||||||
|
StrictPriority: false,
|
||||||
|
Status: "running",
|
||||||
|
Started: time.Now().Add(-3 * time.Hour),
|
||||||
|
ActiveWorkerCount: 8,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
encoded, err := EncodeServerInfo(&tc.info)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("EncodeServerInfo(info) returned error: %v", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
decoded, err := DecodeServerInfo(encoded)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("DecodeServerInfo(encoded) returned error: %v", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if diff := cmp.Diff(&tc.info, decoded); diff != "" {
|
||||||
|
t.Errorf("Decoded ServerInfo == %+v, want %+v;(-want,+got)\n%s",
|
||||||
|
decoded, tc.info, diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWorkerInfoEncoding(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
info WorkerInfo
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
info: WorkerInfo{
|
||||||
|
Host: "127.0.0.1",
|
||||||
|
PID: 9876,
|
||||||
|
ServerID: "abc123",
|
||||||
|
ID: uuid.NewString(),
|
||||||
|
Type: "taskA",
|
||||||
|
Payload: map[string]interface{}{"foo": "bar"},
|
||||||
|
Queue: "default",
|
||||||
|
Started: time.Now().Add(-3 * time.Hour),
|
||||||
|
Deadline: time.Now().Add(30 * time.Second),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
encoded, err := EncodeWorkerInfo(&tc.info)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("EncodeWorkerInfo(info) returned error: %v", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
decoded, err := DecodeWorkerInfo(encoded)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("DecodeWorkerInfo(encoded) returned error: %v", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if diff := cmp.Diff(&tc.info, decoded); diff != "" {
|
||||||
|
t.Errorf("Decoded WorkerInfo == %+v, want %+v;(-want,+got)\n%s",
|
||||||
|
decoded, tc.info, diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSchedulerEntryEncoding(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
entry SchedulerEntry
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
entry: SchedulerEntry{
|
||||||
|
ID: uuid.NewString(),
|
||||||
|
Spec: "* * * * *",
|
||||||
|
Type: "task_A",
|
||||||
|
Payload: map[string]interface{}{"foo": "bar"},
|
||||||
|
Opts: []string{"Queue('email')"},
|
||||||
|
Next: time.Now().Add(30 * time.Second).UTC(),
|
||||||
|
Prev: time.Now().Add(-2 * time.Minute).UTC(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
encoded, err := EncodeSchedulerEntry(&tc.entry)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("EncodeSchedulerEntry(entry) returned error: %v", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
decoded, err := DecodeSchedulerEntry(encoded)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("DecodeSchedulerEntry(encoded) returned error: %v", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if diff := cmp.Diff(&tc.entry, decoded); diff != "" {
|
||||||
|
t.Errorf("Decoded SchedulerEntry == %+v, want %+v;(-want,+got)\n%s",
|
||||||
|
decoded, tc.entry, diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSchedulerEnqueueEventEncoding(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
event SchedulerEnqueueEvent
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
event: SchedulerEnqueueEvent{
|
||||||
|
TaskID: uuid.NewString(),
|
||||||
|
EnqueuedAt: time.Now().Add(-30 * time.Second).UTC(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
encoded, err := EncodeSchedulerEnqueueEvent(&tc.event)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("EncodeSchedulerEnqueueEvent(event) returned error: %v", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
decoded, err := DecodeSchedulerEnqueueEvent(encoded)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("DecodeSchedulerEnqueueEvent(encoded) returned error: %v", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if diff := cmp.Diff(&tc.event, decoded); diff != "" {
|
||||||
|
t.Errorf("Decoded SchedulerEnqueueEvent == %+v, want %+v;(-want,+got)\n%s",
|
||||||
|
decoded, tc.event, diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Test for status being accessed by multiple goroutines.
|
// Test for status being accessed by multiple goroutines.
|
||||||
// Run with -race flag to check for data race.
|
// Run with -race flag to check for data race.
|
||||||
func TestStatusConcurrentAccess(t *testing.T) {
|
func TestStatusConcurrentAccess(t *testing.T) {
|
||||||
|
755
internal/proto/asynq.pb.go
Normal file
755
internal/proto/asynq.pb.go
Normal file
@ -0,0 +1,755 @@
|
|||||||
|
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||||
|
// versions:
|
||||||
|
// protoc-gen-go v1.25.0
|
||||||
|
// protoc v3.14.0
|
||||||
|
// source: asynq.proto
|
||||||
|
|
||||||
|
package proto
|
||||||
|
|
||||||
|
import (
|
||||||
|
proto "github.com/golang/protobuf/proto"
|
||||||
|
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||||
|
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||||
|
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
|
||||||
|
reflect "reflect"
|
||||||
|
sync "sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Verify that this generated code is sufficiently up-to-date.
|
||||||
|
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||||
|
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||||
|
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||||
|
)
|
||||||
|
|
||||||
|
// This is a compile-time assertion that a sufficiently up-to-date version
|
||||||
|
// of the legacy proto package is being used.
|
||||||
|
const _ = proto.ProtoPackageIsVersion4
|
||||||
|
|
||||||
|
type TaskMessage struct {
|
||||||
|
state protoimpl.MessageState
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
|
||||||
|
Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
|
||||||
|
Payload []byte `protobuf:"bytes,2,opt,name=payload,proto3" json:"payload,omitempty"`
|
||||||
|
Id string `protobuf:"bytes,3,opt,name=id,proto3" json:"id,omitempty"`
|
||||||
|
Queue string `protobuf:"bytes,4,opt,name=queue,proto3" json:"queue,omitempty"`
|
||||||
|
Retry int32 `protobuf:"varint,5,opt,name=retry,proto3" json:"retry,omitempty"`
|
||||||
|
Retried int32 `protobuf:"varint,6,opt,name=retried,proto3" json:"retried,omitempty"`
|
||||||
|
ErrorMsg string `protobuf:"bytes,7,opt,name=error_msg,json=errorMsg,proto3" json:"error_msg,omitempty"`
|
||||||
|
Timeout int64 `protobuf:"varint,8,opt,name=timeout,proto3" json:"timeout,omitempty"`
|
||||||
|
Deadline int64 `protobuf:"varint,9,opt,name=deadline,proto3" json:"deadline,omitempty"`
|
||||||
|
UniqueKey string `protobuf:"bytes,10,opt,name=unique_key,json=uniqueKey,proto3" json:"unique_key,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *TaskMessage) Reset() {
|
||||||
|
*x = TaskMessage{}
|
||||||
|
if protoimpl.UnsafeEnabled {
|
||||||
|
mi := &file_asynq_proto_msgTypes[0]
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *TaskMessage) String() string {
|
||||||
|
return protoimpl.X.MessageStringOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*TaskMessage) ProtoMessage() {}
|
||||||
|
|
||||||
|
func (x *TaskMessage) ProtoReflect() protoreflect.Message {
|
||||||
|
mi := &file_asynq_proto_msgTypes[0]
|
||||||
|
if protoimpl.UnsafeEnabled && x != nil {
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
if ms.LoadMessageInfo() == nil {
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
return ms
|
||||||
|
}
|
||||||
|
return mi.MessageOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Use TaskMessage.ProtoReflect.Descriptor instead.
|
||||||
|
func (*TaskMessage) Descriptor() ([]byte, []int) {
|
||||||
|
return file_asynq_proto_rawDescGZIP(), []int{0}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *TaskMessage) GetType() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.Type
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *TaskMessage) GetPayload() []byte {
|
||||||
|
if x != nil {
|
||||||
|
return x.Payload
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *TaskMessage) GetId() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.Id
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *TaskMessage) GetQueue() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.Queue
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *TaskMessage) GetRetry() int32 {
|
||||||
|
if x != nil {
|
||||||
|
return x.Retry
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *TaskMessage) GetRetried() int32 {
|
||||||
|
if x != nil {
|
||||||
|
return x.Retried
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *TaskMessage) GetErrorMsg() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.ErrorMsg
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *TaskMessage) GetTimeout() int64 {
|
||||||
|
if x != nil {
|
||||||
|
return x.Timeout
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *TaskMessage) GetDeadline() int64 {
|
||||||
|
if x != nil {
|
||||||
|
return x.Deadline
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *TaskMessage) GetUniqueKey() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.UniqueKey
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServerInfo holds information about a running server.
|
||||||
|
type ServerInfo struct {
|
||||||
|
state protoimpl.MessageState
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
|
||||||
|
Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"`
|
||||||
|
Pid int32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"`
|
||||||
|
ServerId string `protobuf:"bytes,3,opt,name=server_id,json=serverId,proto3" json:"server_id,omitempty"`
|
||||||
|
Concurrency int32 `protobuf:"varint,4,opt,name=concurrency,proto3" json:"concurrency,omitempty"`
|
||||||
|
Queues map[string]int32 `protobuf:"bytes,5,rep,name=queues,proto3" json:"queues,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"`
|
||||||
|
StrictPriority bool `protobuf:"varint,6,opt,name=strict_priority,json=strictPriority,proto3" json:"strict_priority,omitempty"`
|
||||||
|
Status string `protobuf:"bytes,7,opt,name=status,proto3" json:"status,omitempty"`
|
||||||
|
StartTime *timestamppb.Timestamp `protobuf:"bytes,8,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
|
||||||
|
ActiveWorkerCount int32 `protobuf:"varint,9,opt,name=active_worker_count,json=activeWorkerCount,proto3" json:"active_worker_count,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *ServerInfo) Reset() {
|
||||||
|
*x = ServerInfo{}
|
||||||
|
if protoimpl.UnsafeEnabled {
|
||||||
|
mi := &file_asynq_proto_msgTypes[1]
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *ServerInfo) String() string {
|
||||||
|
return protoimpl.X.MessageStringOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*ServerInfo) ProtoMessage() {}
|
||||||
|
|
||||||
|
func (x *ServerInfo) ProtoReflect() protoreflect.Message {
|
||||||
|
mi := &file_asynq_proto_msgTypes[1]
|
||||||
|
if protoimpl.UnsafeEnabled && x != nil {
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
if ms.LoadMessageInfo() == nil {
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
return ms
|
||||||
|
}
|
||||||
|
return mi.MessageOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Use ServerInfo.ProtoReflect.Descriptor instead.
|
||||||
|
func (*ServerInfo) Descriptor() ([]byte, []int) {
|
||||||
|
return file_asynq_proto_rawDescGZIP(), []int{1}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *ServerInfo) GetHost() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.Host
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *ServerInfo) GetPid() int32 {
|
||||||
|
if x != nil {
|
||||||
|
return x.Pid
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *ServerInfo) GetServerId() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.ServerId
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *ServerInfo) GetConcurrency() int32 {
|
||||||
|
if x != nil {
|
||||||
|
return x.Concurrency
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *ServerInfo) GetQueues() map[string]int32 {
|
||||||
|
if x != nil {
|
||||||
|
return x.Queues
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *ServerInfo) GetStrictPriority() bool {
|
||||||
|
if x != nil {
|
||||||
|
return x.StrictPriority
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *ServerInfo) GetStatus() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.Status
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *ServerInfo) GetStartTime() *timestamppb.Timestamp {
|
||||||
|
if x != nil {
|
||||||
|
return x.StartTime
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *ServerInfo) GetActiveWorkerCount() int32 {
|
||||||
|
if x != nil {
|
||||||
|
return x.ActiveWorkerCount
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// WorkerInfo holds information about a running worker.
|
||||||
|
type WorkerInfo struct {
|
||||||
|
state protoimpl.MessageState
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
|
||||||
|
Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"`
|
||||||
|
Pid int32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"`
|
||||||
|
ServerId string `protobuf:"bytes,3,opt,name=server_id,json=serverId,proto3" json:"server_id,omitempty"`
|
||||||
|
TaskId string `protobuf:"bytes,4,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"`
|
||||||
|
TaskType string `protobuf:"bytes,5,opt,name=task_type,json=taskType,proto3" json:"task_type,omitempty"`
|
||||||
|
TaskPayload []byte `protobuf:"bytes,6,opt,name=task_payload,json=taskPayload,proto3" json:"task_payload,omitempty"`
|
||||||
|
Queue string `protobuf:"bytes,7,opt,name=queue,proto3" json:"queue,omitempty"`
|
||||||
|
StartTime *timestamppb.Timestamp `protobuf:"bytes,8,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
|
||||||
|
Deadline *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=deadline,proto3" json:"deadline,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *WorkerInfo) Reset() {
|
||||||
|
*x = WorkerInfo{}
|
||||||
|
if protoimpl.UnsafeEnabled {
|
||||||
|
mi := &file_asynq_proto_msgTypes[2]
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *WorkerInfo) String() string {
|
||||||
|
return protoimpl.X.MessageStringOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*WorkerInfo) ProtoMessage() {}
|
||||||
|
|
||||||
|
func (x *WorkerInfo) ProtoReflect() protoreflect.Message {
|
||||||
|
mi := &file_asynq_proto_msgTypes[2]
|
||||||
|
if protoimpl.UnsafeEnabled && x != nil {
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
if ms.LoadMessageInfo() == nil {
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
return ms
|
||||||
|
}
|
||||||
|
return mi.MessageOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Use WorkerInfo.ProtoReflect.Descriptor instead.
|
||||||
|
func (*WorkerInfo) Descriptor() ([]byte, []int) {
|
||||||
|
return file_asynq_proto_rawDescGZIP(), []int{2}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *WorkerInfo) GetHost() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.Host
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *WorkerInfo) GetPid() int32 {
|
||||||
|
if x != nil {
|
||||||
|
return x.Pid
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *WorkerInfo) GetServerId() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.ServerId
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *WorkerInfo) GetTaskId() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.TaskId
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *WorkerInfo) GetTaskType() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.TaskType
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *WorkerInfo) GetTaskPayload() []byte {
|
||||||
|
if x != nil {
|
||||||
|
return x.TaskPayload
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *WorkerInfo) GetQueue() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.Queue
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *WorkerInfo) GetStartTime() *timestamppb.Timestamp {
|
||||||
|
if x != nil {
|
||||||
|
return x.StartTime
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *WorkerInfo) GetDeadline() *timestamppb.Timestamp {
|
||||||
|
if x != nil {
|
||||||
|
return x.Deadline
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SchedulerEntry holds information about a periodic task registered with a scheduler.
|
||||||
|
type SchedulerEntry struct {
|
||||||
|
state protoimpl.MessageState
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
|
||||||
|
// Identifier of the scheduler entry.
|
||||||
|
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||||
|
// Periodic schedule spec of the entry.
|
||||||
|
Spec string `protobuf:"bytes,2,opt,name=spec,proto3" json:"spec,omitempty"`
|
||||||
|
// Task type of the periodic task.
|
||||||
|
TaskType string `protobuf:"bytes,3,opt,name=task_type,json=taskType,proto3" json:"task_type,omitempty"`
|
||||||
|
// Task payload of the periodic task.
|
||||||
|
TaskPayload []byte `protobuf:"bytes,4,opt,name=task_payload,json=taskPayload,proto3" json:"task_payload,omitempty"`
|
||||||
|
// Options used to enqueue the periodic task.
|
||||||
|
EnqueueOptions []string `protobuf:"bytes,5,rep,name=enqueue_options,json=enqueueOptions,proto3" json:"enqueue_options,omitempty"`
|
||||||
|
// Next time the task will be enqueued.
|
||||||
|
NextEnqueueTime *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=next_enqueue_time,json=nextEnqueueTime,proto3" json:"next_enqueue_time,omitempty"`
|
||||||
|
// Last time the task was enqueued.
|
||||||
|
// Zero time if task was never enqueued.
|
||||||
|
PrevEnqueueTime *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=prev_enqueue_time,json=prevEnqueueTime,proto3" json:"prev_enqueue_time,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *SchedulerEntry) Reset() {
|
||||||
|
*x = SchedulerEntry{}
|
||||||
|
if protoimpl.UnsafeEnabled {
|
||||||
|
mi := &file_asynq_proto_msgTypes[3]
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *SchedulerEntry) String() string {
|
||||||
|
return protoimpl.X.MessageStringOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*SchedulerEntry) ProtoMessage() {}
|
||||||
|
|
||||||
|
func (x *SchedulerEntry) ProtoReflect() protoreflect.Message {
|
||||||
|
mi := &file_asynq_proto_msgTypes[3]
|
||||||
|
if protoimpl.UnsafeEnabled && x != nil {
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
if ms.LoadMessageInfo() == nil {
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
return ms
|
||||||
|
}
|
||||||
|
return mi.MessageOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Use SchedulerEntry.ProtoReflect.Descriptor instead.
|
||||||
|
func (*SchedulerEntry) Descriptor() ([]byte, []int) {
|
||||||
|
return file_asynq_proto_rawDescGZIP(), []int{3}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *SchedulerEntry) GetId() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.Id
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *SchedulerEntry) GetSpec() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.Spec
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *SchedulerEntry) GetTaskType() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.TaskType
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *SchedulerEntry) GetTaskPayload() []byte {
|
||||||
|
if x != nil {
|
||||||
|
return x.TaskPayload
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *SchedulerEntry) GetEnqueueOptions() []string {
|
||||||
|
if x != nil {
|
||||||
|
return x.EnqueueOptions
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *SchedulerEntry) GetNextEnqueueTime() *timestamppb.Timestamp {
|
||||||
|
if x != nil {
|
||||||
|
return x.NextEnqueueTime
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *SchedulerEntry) GetPrevEnqueueTime() *timestamppb.Timestamp {
|
||||||
|
if x != nil {
|
||||||
|
return x.PrevEnqueueTime
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SchedulerEnqueueEvent holds information about an enqueue event by a scheduler.
|
||||||
|
type SchedulerEnqueueEvent struct {
|
||||||
|
state protoimpl.MessageState
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
|
||||||
|
// ID of the task that was enqueued.
|
||||||
|
TaskId string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"`
|
||||||
|
// Time the task was enqueued.
|
||||||
|
EnqueueTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=enqueue_time,json=enqueueTime,proto3" json:"enqueue_time,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *SchedulerEnqueueEvent) Reset() {
|
||||||
|
*x = SchedulerEnqueueEvent{}
|
||||||
|
if protoimpl.UnsafeEnabled {
|
||||||
|
mi := &file_asynq_proto_msgTypes[4]
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *SchedulerEnqueueEvent) String() string {
|
||||||
|
return protoimpl.X.MessageStringOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*SchedulerEnqueueEvent) ProtoMessage() {}
|
||||||
|
|
||||||
|
func (x *SchedulerEnqueueEvent) ProtoReflect() protoreflect.Message {
|
||||||
|
mi := &file_asynq_proto_msgTypes[4]
|
||||||
|
if protoimpl.UnsafeEnabled && x != nil {
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
if ms.LoadMessageInfo() == nil {
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
return ms
|
||||||
|
}
|
||||||
|
return mi.MessageOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Use SchedulerEnqueueEvent.ProtoReflect.Descriptor instead.
|
||||||
|
func (*SchedulerEnqueueEvent) Descriptor() ([]byte, []int) {
|
||||||
|
return file_asynq_proto_rawDescGZIP(), []int{4}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *SchedulerEnqueueEvent) GetTaskId() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.TaskId
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *SchedulerEnqueueEvent) GetEnqueueTime() *timestamppb.Timestamp {
|
||||||
|
if x != nil {
|
||||||
|
return x.EnqueueTime
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var File_asynq_proto protoreflect.FileDescriptor
|
||||||
|
|
||||||
|
var file_asynq_proto_rawDesc = []byte{
|
||||||
|
0x0a, 0x0b, 0x61, 0x73, 0x79, 0x6e, 0x71, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x74,
|
||||||
|
0x75, 0x74, 0x6f, 0x72, 0x69, 0x61, 0x6c, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
|
||||||
|
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
|
||||||
|
0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x83, 0x02, 0x0a, 0x0b, 0x54, 0x61, 0x73,
|
||||||
|
0x6b, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65,
|
||||||
|
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07,
|
||||||
|
0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70,
|
||||||
|
0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01,
|
||||||
|
0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x75, 0x65, 0x18,
|
||||||
|
0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05,
|
||||||
|
0x72, 0x65, 0x74, 0x72, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x72, 0x65, 0x74,
|
||||||
|
0x72, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x64, 0x18, 0x06, 0x20,
|
||||||
|
0x01, 0x28, 0x05, 0x52, 0x07, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09,
|
||||||
|
0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x73, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52,
|
||||||
|
0x08, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x73, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x74, 0x69, 0x6d,
|
||||||
|
0x65, 0x6f, 0x75, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65,
|
||||||
|
0x6f, 0x75, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18,
|
||||||
|
0x09, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12,
|
||||||
|
0x1d, 0x0a, 0x0a, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x0a, 0x20,
|
||||||
|
0x01, 0x28, 0x09, 0x52, 0x09, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x4b, 0x65, 0x79, 0x22, 0x92,
|
||||||
|
0x03, 0x0a, 0x0a, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a,
|
||||||
|
0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x6f, 0x73,
|
||||||
|
0x74, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03,
|
||||||
|
0x70, 0x69, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x69, 0x64,
|
||||||
|
0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x49, 0x64,
|
||||||
|
0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18,
|
||||||
|
0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e,
|
||||||
|
0x63, 0x79, 0x12, 0x38, 0x0a, 0x06, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03,
|
||||||
|
0x28, 0x0b, 0x32, 0x20, 0x2e, 0x74, 0x75, 0x74, 0x6f, 0x72, 0x69, 0x61, 0x6c, 0x2e, 0x53, 0x65,
|
||||||
|
0x72, 0x76, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x51, 0x75, 0x65, 0x75, 0x65, 0x73, 0x45,
|
||||||
|
0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x12, 0x27, 0x0a, 0x0f,
|
||||||
|
0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x5f, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18,
|
||||||
|
0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x50, 0x72, 0x69,
|
||||||
|
0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18,
|
||||||
|
0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x39, 0x0a,
|
||||||
|
0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28,
|
||||||
|
0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||||
|
0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73,
|
||||||
|
0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x13, 0x61, 0x63, 0x74, 0x69,
|
||||||
|
0x76, 0x65, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18,
|
||||||
|
0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x11, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x57, 0x6f, 0x72,
|
||||||
|
0x6b, 0x65, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x1a, 0x39, 0x0a, 0x0b, 0x51, 0x75, 0x65, 0x75,
|
||||||
|
0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01,
|
||||||
|
0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c,
|
||||||
|
0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a,
|
||||||
|
0x02, 0x38, 0x01, 0x22, 0xb1, 0x02, 0x0a, 0x0a, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x6e,
|
||||||
|
0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
|
||||||
|
0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x02, 0x20,
|
||||||
|
0x01, 0x28, 0x05, 0x52, 0x03, 0x70, 0x69, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x65, 0x72, 0x76,
|
||||||
|
0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x72,
|
||||||
|
0x76, 0x65, 0x72, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64,
|
||||||
|
0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12, 0x1b,
|
||||||
|
0x0a, 0x09, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28,
|
||||||
|
0x09, 0x52, 0x08, 0x74, 0x61, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x74,
|
||||||
|
0x61, 0x73, 0x6b, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28,
|
||||||
|
0x0c, 0x52, 0x0b, 0x74, 0x61, 0x73, 0x6b, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x14,
|
||||||
|
0x0a, 0x05, 0x71, 0x75, 0x65, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x71,
|
||||||
|
0x75, 0x65, 0x75, 0x65, 0x12, 0x39, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69,
|
||||||
|
0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
|
||||||
|
0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73,
|
||||||
|
0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12,
|
||||||
|
0x36, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28,
|
||||||
|
0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||||
|
0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x64,
|
||||||
|
0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x22, 0xad, 0x02, 0x0a, 0x0e, 0x53, 0x63, 0x68, 0x65,
|
||||||
|
0x64, 0x75, 0x6c, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64,
|
||||||
|
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x70,
|
||||||
|
0x65, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x73, 0x70, 0x65, 0x63, 0x12, 0x1b,
|
||||||
|
0x0a, 0x09, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28,
|
||||||
|
0x09, 0x52, 0x08, 0x74, 0x61, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x74,
|
||||||
|
0x61, 0x73, 0x6b, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28,
|
||||||
|
0x0c, 0x52, 0x0b, 0x74, 0x61, 0x73, 0x6b, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x27,
|
||||||
|
0x0a, 0x0f, 0x65, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
|
||||||
|
0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x65, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65,
|
||||||
|
0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x46, 0x0a, 0x11, 0x6e, 0x65, 0x78, 0x74, 0x5f,
|
||||||
|
0x65, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01,
|
||||||
|
0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
|
||||||
|
0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0f,
|
||||||
|
0x6e, 0x65, 0x78, 0x74, 0x45, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12,
|
||||||
|
0x46, 0x0a, 0x11, 0x70, 0x72, 0x65, 0x76, 0x5f, 0x65, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x5f,
|
||||||
|
0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
|
||||||
|
0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d,
|
||||||
|
0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0f, 0x70, 0x72, 0x65, 0x76, 0x45, 0x6e, 0x71, 0x75,
|
||||||
|
0x65, 0x75, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x22, 0x6f, 0x0a, 0x15, 0x53, 0x63, 0x68, 0x65, 0x64,
|
||||||
|
0x75, 0x6c, 0x65, 0x72, 0x45, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74,
|
||||||
|
0x12, 0x17, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
|
||||||
|
0x09, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12, 0x3d, 0x0a, 0x0c, 0x65, 0x6e, 0x71,
|
||||||
|
0x75, 0x65, 0x75, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
|
||||||
|
0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
|
||||||
|
0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x65, 0x6e, 0x71,
|
||||||
|
0x75, 0x65, 0x75, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68,
|
||||||
|
0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x69, 0x62, 0x69, 0x6b, 0x65, 0x6e, 0x2f, 0x61,
|
||||||
|
0x73, 0x79, 0x6e, 0x71, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72,
|
||||||
|
0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
file_asynq_proto_rawDescOnce sync.Once
|
||||||
|
file_asynq_proto_rawDescData = file_asynq_proto_rawDesc
|
||||||
|
)
|
||||||
|
|
||||||
|
func file_asynq_proto_rawDescGZIP() []byte {
|
||||||
|
file_asynq_proto_rawDescOnce.Do(func() {
|
||||||
|
file_asynq_proto_rawDescData = protoimpl.X.CompressGZIP(file_asynq_proto_rawDescData)
|
||||||
|
})
|
||||||
|
return file_asynq_proto_rawDescData
|
||||||
|
}
|
||||||
|
|
||||||
|
var file_asynq_proto_msgTypes = make([]protoimpl.MessageInfo, 6)
|
||||||
|
var file_asynq_proto_goTypes = []interface{}{
|
||||||
|
(*TaskMessage)(nil), // 0: tutorial.TaskMessage
|
||||||
|
(*ServerInfo)(nil), // 1: tutorial.ServerInfo
|
||||||
|
(*WorkerInfo)(nil), // 2: tutorial.WorkerInfo
|
||||||
|
(*SchedulerEntry)(nil), // 3: tutorial.SchedulerEntry
|
||||||
|
(*SchedulerEnqueueEvent)(nil), // 4: tutorial.SchedulerEnqueueEvent
|
||||||
|
nil, // 5: tutorial.ServerInfo.QueuesEntry
|
||||||
|
(*timestamppb.Timestamp)(nil), // 6: google.protobuf.Timestamp
|
||||||
|
}
|
||||||
|
var file_asynq_proto_depIdxs = []int32{
|
||||||
|
5, // 0: tutorial.ServerInfo.queues:type_name -> tutorial.ServerInfo.QueuesEntry
|
||||||
|
6, // 1: tutorial.ServerInfo.start_time:type_name -> google.protobuf.Timestamp
|
||||||
|
6, // 2: tutorial.WorkerInfo.start_time:type_name -> google.protobuf.Timestamp
|
||||||
|
6, // 3: tutorial.WorkerInfo.deadline:type_name -> google.protobuf.Timestamp
|
||||||
|
6, // 4: tutorial.SchedulerEntry.next_enqueue_time:type_name -> google.protobuf.Timestamp
|
||||||
|
6, // 5: tutorial.SchedulerEntry.prev_enqueue_time:type_name -> google.protobuf.Timestamp
|
||||||
|
6, // 6: tutorial.SchedulerEnqueueEvent.enqueue_time:type_name -> google.protobuf.Timestamp
|
||||||
|
7, // [7:7] is the sub-list for method output_type
|
||||||
|
7, // [7:7] is the sub-list for method input_type
|
||||||
|
7, // [7:7] is the sub-list for extension type_name
|
||||||
|
7, // [7:7] is the sub-list for extension extendee
|
||||||
|
0, // [0:7] is the sub-list for field type_name
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() { file_asynq_proto_init() }
|
||||||
|
func file_asynq_proto_init() {
|
||||||
|
if File_asynq_proto != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if !protoimpl.UnsafeEnabled {
|
||||||
|
file_asynq_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||||
|
switch v := v.(*TaskMessage); i {
|
||||||
|
case 0:
|
||||||
|
return &v.state
|
||||||
|
case 1:
|
||||||
|
return &v.sizeCache
|
||||||
|
case 2:
|
||||||
|
return &v.unknownFields
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
file_asynq_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||||
|
switch v := v.(*ServerInfo); i {
|
||||||
|
case 0:
|
||||||
|
return &v.state
|
||||||
|
case 1:
|
||||||
|
return &v.sizeCache
|
||||||
|
case 2:
|
||||||
|
return &v.unknownFields
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
file_asynq_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||||
|
switch v := v.(*WorkerInfo); i {
|
||||||
|
case 0:
|
||||||
|
return &v.state
|
||||||
|
case 1:
|
||||||
|
return &v.sizeCache
|
||||||
|
case 2:
|
||||||
|
return &v.unknownFields
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
file_asynq_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
|
||||||
|
switch v := v.(*SchedulerEntry); i {
|
||||||
|
case 0:
|
||||||
|
return &v.state
|
||||||
|
case 1:
|
||||||
|
return &v.sizeCache
|
||||||
|
case 2:
|
||||||
|
return &v.unknownFields
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
file_asynq_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
|
||||||
|
switch v := v.(*SchedulerEnqueueEvent); i {
|
||||||
|
case 0:
|
||||||
|
return &v.state
|
||||||
|
case 1:
|
||||||
|
return &v.sizeCache
|
||||||
|
case 2:
|
||||||
|
return &v.unknownFields
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
type x struct{}
|
||||||
|
out := protoimpl.TypeBuilder{
|
||||||
|
File: protoimpl.DescBuilder{
|
||||||
|
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||||
|
RawDescriptor: file_asynq_proto_rawDesc,
|
||||||
|
NumEnums: 0,
|
||||||
|
NumMessages: 6,
|
||||||
|
NumExtensions: 0,
|
||||||
|
NumServices: 0,
|
||||||
|
},
|
||||||
|
GoTypes: file_asynq_proto_goTypes,
|
||||||
|
DependencyIndexes: file_asynq_proto_depIdxs,
|
||||||
|
MessageInfos: file_asynq_proto_msgTypes,
|
||||||
|
}.Build()
|
||||||
|
File_asynq_proto = out.File
|
||||||
|
file_asynq_proto_rawDesc = nil
|
||||||
|
file_asynq_proto_goTypes = nil
|
||||||
|
file_asynq_proto_depIdxs = nil
|
||||||
|
}
|
148
internal/proto/asynq.proto
Normal file
148
internal/proto/asynq.proto
Normal file
@ -0,0 +1,148 @@
|
|||||||
|
// Copyright 2020 Kentaro Hibino. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license
|
||||||
|
// that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
syntax = "proto3";
|
||||||
|
package asynq;
|
||||||
|
|
||||||
|
import "google/protobuf/timestamp.proto";
|
||||||
|
|
||||||
|
option go_package = "github.com/hibiken/asynq/internal/proto";
|
||||||
|
|
||||||
|
// TaskMessage is the internal representation of a task with additional
|
||||||
|
// metadata fields.
|
||||||
|
message TaskMessage {
|
||||||
|
// Type indicates the kind of the task to be performed.
|
||||||
|
string type = 1;
|
||||||
|
|
||||||
|
// Payload holds data needed to process the task.
|
||||||
|
bytes payload = 2;
|
||||||
|
|
||||||
|
// Unique identifier for the task.
|
||||||
|
string id = 3;
|
||||||
|
|
||||||
|
// Name of the queue to which this task belongs.
|
||||||
|
string queue = 4;
|
||||||
|
|
||||||
|
// Max number of retries for this task.
|
||||||
|
int32 retry = 5;
|
||||||
|
|
||||||
|
// Number of times this task has been retried so far.
|
||||||
|
int32 retried = 6;
|
||||||
|
|
||||||
|
// Error message from the last failure.
|
||||||
|
string error_msg = 7;
|
||||||
|
|
||||||
|
// Timeout specifies timeout in seconds.
|
||||||
|
// Use zero to indicate no timeout.
|
||||||
|
int64 timeout = 8;
|
||||||
|
|
||||||
|
// Deadline specifies the deadline for the task in Unix time,
|
||||||
|
// the number of seconds elapsed since January 1, 1970 UTC.
|
||||||
|
// Use zero to indicate no deadline.
|
||||||
|
int64 deadline = 9;
|
||||||
|
|
||||||
|
// UniqueKey holds the redis key used for uniqueness lock for this task.
|
||||||
|
// Empty string indicates that no uniqueness lock was used.
|
||||||
|
string unique_key = 10;
|
||||||
|
};
|
||||||
|
|
||||||
|
// ServerInfo holds information about a running server.
|
||||||
|
message ServerInfo {
|
||||||
|
// Host machine the server is running on.
|
||||||
|
string host = 1;
|
||||||
|
|
||||||
|
// PID of the server process.
|
||||||
|
int32 pid = 2;
|
||||||
|
|
||||||
|
// Unique identifier for this server.
|
||||||
|
string server_id = 3;
|
||||||
|
|
||||||
|
// Maximum number of concurrency this server will use.
|
||||||
|
int32 concurrency = 4;
|
||||||
|
|
||||||
|
// List of queue names with their priorities.
|
||||||
|
// The server will consume tasks from the queues and prioritize
|
||||||
|
// queues with higher priority numbers.
|
||||||
|
map<string, int32> queues = 5;
|
||||||
|
|
||||||
|
// If set, the server will always consume tasks from a queue with higher
|
||||||
|
// priority.
|
||||||
|
bool strict_priority = 6;
|
||||||
|
|
||||||
|
// Status indicates the status of the server.
|
||||||
|
string status = 7;
|
||||||
|
|
||||||
|
// Time this server was started.
|
||||||
|
google.protobuf.Timestamp start_time = 8;
|
||||||
|
|
||||||
|
// Number of workers currently processing tasks.
|
||||||
|
int32 active_worker_count = 9;
|
||||||
|
};
|
||||||
|
|
||||||
|
// WorkerInfo holds information about a running worker.
|
||||||
|
message WorkerInfo {
|
||||||
|
// Host matchine this worker is running on.
|
||||||
|
string host = 1;
|
||||||
|
|
||||||
|
// PID of the process in which this worker is running.
|
||||||
|
int32 pid = 2;
|
||||||
|
|
||||||
|
// ID of the server in which this worker is running.
|
||||||
|
string server_id = 3;
|
||||||
|
|
||||||
|
// ID of the task this worker is processing.
|
||||||
|
string task_id = 4;
|
||||||
|
|
||||||
|
// Type of the task this worker is processing.
|
||||||
|
string task_type = 5;
|
||||||
|
|
||||||
|
// Payload of the task this worker is processing.
|
||||||
|
bytes task_payload = 6;
|
||||||
|
|
||||||
|
// Name of the queue the task the worker is processing belongs.
|
||||||
|
string queue = 7;
|
||||||
|
|
||||||
|
// Time this worker started processing the task.
|
||||||
|
google.protobuf.Timestamp start_time = 8;
|
||||||
|
|
||||||
|
// Deadline by which the worker needs to complete processing
|
||||||
|
// the task. If worker exceeds the deadline, the task will fail.
|
||||||
|
google.protobuf.Timestamp deadline = 9;
|
||||||
|
};
|
||||||
|
|
||||||
|
// SchedulerEntry holds information about a periodic task registered
|
||||||
|
// with a scheduler.
|
||||||
|
message SchedulerEntry {
|
||||||
|
// Identifier of the scheduler entry.
|
||||||
|
string id = 1;
|
||||||
|
|
||||||
|
// Periodic schedule spec of the entry.
|
||||||
|
string spec = 2;
|
||||||
|
|
||||||
|
// Task type of the periodic task.
|
||||||
|
string task_type = 3;
|
||||||
|
|
||||||
|
// Task payload of the periodic task.
|
||||||
|
bytes task_payload = 4;
|
||||||
|
|
||||||
|
// Options used to enqueue the periodic task.
|
||||||
|
repeated string enqueue_options = 5;
|
||||||
|
|
||||||
|
// Next time the task will be enqueued.
|
||||||
|
google.protobuf.Timestamp next_enqueue_time = 6;
|
||||||
|
|
||||||
|
// Last time the task was enqueued.
|
||||||
|
// Zero time if task was never enqueued.
|
||||||
|
google.protobuf.Timestamp prev_enqueue_time = 7;
|
||||||
|
};
|
||||||
|
|
||||||
|
// SchedulerEnqueueEvent holds information about an enqueue event
|
||||||
|
// by a scheduler.
|
||||||
|
message SchedulerEnqueueEvent {
|
||||||
|
// ID of the task that was enqueued.
|
||||||
|
string task_id = 1;
|
||||||
|
|
||||||
|
// Time the task was enqueued.
|
||||||
|
google.protobuf.Timestamp enqueue_time = 2;
|
||||||
|
};
|
@ -259,8 +259,8 @@ func BenchmarkCheckAndEnqueue(b *testing.B) {
|
|||||||
asynqtest.SeedScheduledQueue(b, r.client, zs, base.DefaultQueueName)
|
asynqtest.SeedScheduledQueue(b, r.client, zs, base.DefaultQueueName)
|
||||||
b.StartTimer()
|
b.StartTimer()
|
||||||
|
|
||||||
if err := r.CheckAndEnqueue(base.DefaultQueueName); err != nil {
|
if err := r.ForwardIfReady(base.DefaultQueueName); err != nil {
|
||||||
b.Fatalf("CheckAndEnqueue failed: %v", err)
|
b.Fatalf("ForwardIfReady failed: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -5,7 +5,6 @@
|
|||||||
package rdb
|
package rdb
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
@ -110,7 +109,7 @@ func (r *RDB) CurrentStats(qname string) (*Stats, error) {
|
|||||||
}
|
}
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
res, err := currentStatsCmd.Run(r.client, []string{
|
res, err := currentStatsCmd.Run(r.client, []string{
|
||||||
base.QueueKey(qname),
|
base.PendingKey(qname),
|
||||||
base.ActiveKey(qname),
|
base.ActiveKey(qname),
|
||||||
base.ScheduledKey(qname),
|
base.ScheduledKey(qname),
|
||||||
base.RetryKey(qname),
|
base.RetryKey(qname),
|
||||||
@ -135,7 +134,7 @@ func (r *RDB) CurrentStats(qname string) (*Stats, error) {
|
|||||||
key := cast.ToString(data[i])
|
key := cast.ToString(data[i])
|
||||||
val := cast.ToInt(data[i+1])
|
val := cast.ToInt(data[i+1])
|
||||||
switch key {
|
switch key {
|
||||||
case base.QueueKey(qname):
|
case base.PendingKey(qname):
|
||||||
stats.Pending = val
|
stats.Pending = val
|
||||||
size += val
|
size += val
|
||||||
case base.ActiveKey(qname):
|
case base.ActiveKey(qname):
|
||||||
@ -312,7 +311,7 @@ func (r *RDB) ListPending(qname string, pgn Pagination) ([]*base.TaskMessage, er
|
|||||||
if !r.client.SIsMember(base.AllQueues, qname).Val() {
|
if !r.client.SIsMember(base.AllQueues, qname).Val() {
|
||||||
return nil, fmt.Errorf("queue %q does not exist", qname)
|
return nil, fmt.Errorf("queue %q does not exist", qname)
|
||||||
}
|
}
|
||||||
return r.listMessages(base.QueueKey(qname), pgn)
|
return r.listMessages(base.PendingKey(qname), qname, pgn)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListActive returns all tasks that are currently being processed for the given queue.
|
// ListActive returns all tasks that are currently being processed for the given queue.
|
||||||
@ -320,23 +319,42 @@ func (r *RDB) ListActive(qname string, pgn Pagination) ([]*base.TaskMessage, err
|
|||||||
if !r.client.SIsMember(base.AllQueues, qname).Val() {
|
if !r.client.SIsMember(base.AllQueues, qname).Val() {
|
||||||
return nil, fmt.Errorf("queue %q does not exist", qname)
|
return nil, fmt.Errorf("queue %q does not exist", qname)
|
||||||
}
|
}
|
||||||
return r.listMessages(base.ActiveKey(qname), pgn)
|
return r.listMessages(base.ActiveKey(qname), qname, pgn)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// KEYS[1] -> key for id list (e.g. asynq:{<qname>}:pending)
|
||||||
|
// ARGV[1] -> start offset
|
||||||
|
// ARGV[2] -> stop offset
|
||||||
|
// ARGV[3] -> task key prefix
|
||||||
|
var listMessagesCmd = redis.NewScript(`
|
||||||
|
local ids = redis.call("LRange", KEYS[1], ARGV[1], ARGV[2])
|
||||||
|
local res = {}
|
||||||
|
for _, id in ipairs(ids) do
|
||||||
|
local key = ARGV[3] .. id
|
||||||
|
table.insert(res, redis.call("HGET", key, "msg"))
|
||||||
|
end
|
||||||
|
return res
|
||||||
|
`)
|
||||||
|
|
||||||
// listMessages returns a list of TaskMessage in Redis list with the given key.
|
// listMessages returns a list of TaskMessage in Redis list with the given key.
|
||||||
func (r *RDB) listMessages(key string, pgn Pagination) ([]*base.TaskMessage, error) {
|
func (r *RDB) listMessages(key, qname string, pgn Pagination) ([]*base.TaskMessage, error) {
|
||||||
// Note: Because we use LPUSH to redis list, we need to calculate the
|
// Note: Because we use LPUSH to redis list, we need to calculate the
|
||||||
// correct range and reverse the list to get the tasks with pagination.
|
// correct range and reverse the list to get the tasks with pagination.
|
||||||
stop := -pgn.start() - 1
|
stop := -pgn.start() - 1
|
||||||
start := -pgn.stop() - 1
|
start := -pgn.stop() - 1
|
||||||
data, err := r.client.LRange(key, start, stop).Result()
|
res, err := listMessagesCmd.Run(r.client,
|
||||||
|
[]string{key}, start, stop, base.TaskKeyPrefix(qname)).Result()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
data, err := cast.ToStringSliceE(res)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
reverse(data)
|
reverse(data)
|
||||||
var msgs []*base.TaskMessage
|
var msgs []*base.TaskMessage
|
||||||
for _, s := range data {
|
for _, s := range data {
|
||||||
m, err := base.DecodeMessage(s)
|
m, err := base.DecodeMessage([]byte(s))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
continue // bad data, ignore and continue
|
continue // bad data, ignore and continue
|
||||||
}
|
}
|
||||||
@ -352,7 +370,7 @@ func (r *RDB) ListScheduled(qname string, pgn Pagination) ([]base.Z, error) {
|
|||||||
if !r.client.SIsMember(base.AllQueues, qname).Val() {
|
if !r.client.SIsMember(base.AllQueues, qname).Val() {
|
||||||
return nil, fmt.Errorf("queue %q does not exist", qname)
|
return nil, fmt.Errorf("queue %q does not exist", qname)
|
||||||
}
|
}
|
||||||
return r.listZSetEntries(base.ScheduledKey(qname), pgn)
|
return r.listZSetEntries(base.ScheduledKey(qname), qname, pgn)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListRetry returns all tasks from the given queue that have failed before
|
// ListRetry returns all tasks from the given queue that have failed before
|
||||||
@ -361,7 +379,7 @@ func (r *RDB) ListRetry(qname string, pgn Pagination) ([]base.Z, error) {
|
|||||||
if !r.client.SIsMember(base.AllQueues, qname).Val() {
|
if !r.client.SIsMember(base.AllQueues, qname).Val() {
|
||||||
return nil, fmt.Errorf("queue %q does not exist", qname)
|
return nil, fmt.Errorf("queue %q does not exist", qname)
|
||||||
}
|
}
|
||||||
return r.listZSetEntries(base.RetryKey(qname), pgn)
|
return r.listZSetEntries(base.RetryKey(qname), qname, pgn)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListArchived returns all tasks from the given queue that have exhausted its retry limit.
|
// ListArchived returns all tasks from the given queue that have exhausted its retry limit.
|
||||||
@ -369,36 +387,63 @@ func (r *RDB) ListArchived(qname string, pgn Pagination) ([]base.Z, error) {
|
|||||||
if !r.client.SIsMember(base.AllQueues, qname).Val() {
|
if !r.client.SIsMember(base.AllQueues, qname).Val() {
|
||||||
return nil, fmt.Errorf("queue %q does not exist", qname)
|
return nil, fmt.Errorf("queue %q does not exist", qname)
|
||||||
}
|
}
|
||||||
return r.listZSetEntries(base.ArchivedKey(qname), pgn)
|
return r.listZSetEntries(base.ArchivedKey(qname), qname, pgn)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// KEYS[1] -> key for ids set (e.g. asynq:{<qname>}:scheduled)
|
||||||
|
// ARGV[1] -> min
|
||||||
|
// ARGV[2] -> max
|
||||||
|
// ARGV[3] -> task key prefix
|
||||||
|
//
|
||||||
|
// Returns an array populated with
|
||||||
|
// [msg1, score1, msg2, score2, ..., msgN, scoreN]
|
||||||
|
var listZSetEntriesCmd = redis.NewScript(`
|
||||||
|
local res = {}
|
||||||
|
local id_score_pairs = redis.call("ZRANGE", KEYS[1], ARGV[1], ARGV[2], "WITHSCORES")
|
||||||
|
for i = 1, table.getn(id_score_pairs), 2 do
|
||||||
|
local key = ARGV[3] .. id_score_pairs[i]
|
||||||
|
table.insert(res, redis.call("HGET", key, "msg"))
|
||||||
|
table.insert(res, id_score_pairs[i+1])
|
||||||
|
end
|
||||||
|
return res
|
||||||
|
`)
|
||||||
|
|
||||||
// listZSetEntries returns a list of message and score pairs in Redis sorted-set
|
// listZSetEntries returns a list of message and score pairs in Redis sorted-set
|
||||||
// with the given key.
|
// with the given key.
|
||||||
func (r *RDB) listZSetEntries(key string, pgn Pagination) ([]base.Z, error) {
|
func (r *RDB) listZSetEntries(key, qname string, pgn Pagination) ([]base.Z, error) {
|
||||||
data, err := r.client.ZRangeWithScores(key, pgn.start(), pgn.stop()).Result()
|
res, err := listZSetEntriesCmd.Run(r.client, []string{key},
|
||||||
|
pgn.start(), pgn.stop(), base.TaskKeyPrefix(qname)).Result()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
var res []base.Z
|
data, err := cast.ToSliceE(res)
|
||||||
for _, z := range data {
|
if err != nil {
|
||||||
s, ok := z.Member.(string)
|
return nil, err
|
||||||
if !ok {
|
|
||||||
continue // bad data, ignore and continue
|
|
||||||
}
|
}
|
||||||
msg, err := base.DecodeMessage(s)
|
var zs []base.Z
|
||||||
|
for i := 0; i < len(data); i += 2 {
|
||||||
|
s, err := cast.ToStringE(data[i])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
score, err := cast.ToInt64E(data[i+1])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
msg, err := base.DecodeMessage([]byte(s))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
continue // bad data, ignore and continue
|
continue // bad data, ignore and continue
|
||||||
}
|
}
|
||||||
res = append(res, base.Z{Message: msg, Score: int64(z.Score)})
|
zs = append(zs, base.Z{Message: msg, Score: score})
|
||||||
}
|
}
|
||||||
return res, nil
|
return zs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// RunArchivedTask finds an archived task that matches the given id and score from
|
// RunArchivedTask finds an archived task that matches the given id and score from
|
||||||
// the given queue and enqueues it for processing.
|
// the given queue and enqueues it for processing.
|
||||||
// If a task that matches the id and score does not exist, it returns ErrTaskNotFound.
|
// If a task that matches the id and score does not exist, it returns ErrTaskNotFound.
|
||||||
func (r *RDB) RunArchivedTask(qname string, id uuid.UUID, score int64) error {
|
func (r *RDB) RunArchivedTask(qname string, id uuid.UUID) error {
|
||||||
n, err := r.removeAndRun(base.ArchivedKey(qname), base.QueueKey(qname), id.String(), float64(score))
|
n, err := r.removeAndRun(base.ArchivedKey(qname), base.PendingKey(qname), id.String())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -411,8 +456,8 @@ func (r *RDB) RunArchivedTask(qname string, id uuid.UUID, score int64) error {
|
|||||||
// RunRetryTask finds a retry task that matches the given id and score from
|
// RunRetryTask finds a retry task that matches the given id and score from
|
||||||
// the given queue and enqueues it for processing.
|
// the given queue and enqueues it for processing.
|
||||||
// If a task that matches the id and score does not exist, it returns ErrTaskNotFound.
|
// If a task that matches the id and score does not exist, it returns ErrTaskNotFound.
|
||||||
func (r *RDB) RunRetryTask(qname string, id uuid.UUID, score int64) error {
|
func (r *RDB) RunRetryTask(qname string, id uuid.UUID) error {
|
||||||
n, err := r.removeAndRun(base.RetryKey(qname), base.QueueKey(qname), id.String(), float64(score))
|
n, err := r.removeAndRun(base.RetryKey(qname), base.PendingKey(qname), id.String())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -425,8 +470,8 @@ func (r *RDB) RunRetryTask(qname string, id uuid.UUID, score int64) error {
|
|||||||
// RunScheduledTask finds a scheduled task that matches the given id and score from
|
// RunScheduledTask finds a scheduled task that matches the given id and score from
|
||||||
// from the given queue and enqueues it for processing.
|
// from the given queue and enqueues it for processing.
|
||||||
// If a task that matches the id and score does not exist, it returns ErrTaskNotFound.
|
// If a task that matches the id and score does not exist, it returns ErrTaskNotFound.
|
||||||
func (r *RDB) RunScheduledTask(qname string, id uuid.UUID, score int64) error {
|
func (r *RDB) RunScheduledTask(qname string, id uuid.UUID) error {
|
||||||
n, err := r.removeAndRun(base.ScheduledKey(qname), base.QueueKey(qname), id.String(), float64(score))
|
n, err := r.removeAndRun(base.ScheduledKey(qname), base.PendingKey(qname), id.String())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -439,35 +484,35 @@ func (r *RDB) RunScheduledTask(qname string, id uuid.UUID, score int64) error {
|
|||||||
// RunAllScheduledTasks enqueues all scheduled tasks from the given queue
|
// RunAllScheduledTasks enqueues all scheduled tasks from the given queue
|
||||||
// and returns the number of tasks enqueued.
|
// and returns the number of tasks enqueued.
|
||||||
func (r *RDB) RunAllScheduledTasks(qname string) (int64, error) {
|
func (r *RDB) RunAllScheduledTasks(qname string) (int64, error) {
|
||||||
return r.removeAndRunAll(base.ScheduledKey(qname), base.QueueKey(qname))
|
return r.removeAndRunAll(base.ScheduledKey(qname), base.PendingKey(qname))
|
||||||
}
|
}
|
||||||
|
|
||||||
// RunAllRetryTasks enqueues all retry tasks from the given queue
|
// RunAllRetryTasks enqueues all retry tasks from the given queue
|
||||||
// and returns the number of tasks enqueued.
|
// and returns the number of tasks enqueued.
|
||||||
func (r *RDB) RunAllRetryTasks(qname string) (int64, error) {
|
func (r *RDB) RunAllRetryTasks(qname string) (int64, error) {
|
||||||
return r.removeAndRunAll(base.RetryKey(qname), base.QueueKey(qname))
|
return r.removeAndRunAll(base.RetryKey(qname), base.PendingKey(qname))
|
||||||
}
|
}
|
||||||
|
|
||||||
// RunAllArchivedTasks enqueues all archived tasks from the given queue
|
// RunAllArchivedTasks enqueues all archived tasks from the given queue
|
||||||
// and returns the number of tasks enqueued.
|
// and returns the number of tasks enqueued.
|
||||||
func (r *RDB) RunAllArchivedTasks(qname string) (int64, error) {
|
func (r *RDB) RunAllArchivedTasks(qname string) (int64, error) {
|
||||||
return r.removeAndRunAll(base.ArchivedKey(qname), base.QueueKey(qname))
|
return r.removeAndRunAll(base.ArchivedKey(qname), base.PendingKey(qname))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// KEYS[1] -> sorted set to remove the id from
|
||||||
|
// KEYS[2] -> asynq:{<qname>}:pending
|
||||||
|
// ARGV[1] -> task ID
|
||||||
var removeAndRunCmd = redis.NewScript(`
|
var removeAndRunCmd = redis.NewScript(`
|
||||||
local msgs = redis.call("ZRANGEBYSCORE", KEYS[1], ARGV[1], ARGV[1])
|
local n = redis.call("ZREM", KEYS[1], ARGV[1])
|
||||||
for _, msg in ipairs(msgs) do
|
if n == 0 then
|
||||||
local decoded = cjson.decode(msg)
|
return 0
|
||||||
if decoded["ID"] == ARGV[2] then
|
|
||||||
redis.call("LPUSH", KEYS[2], msg)
|
|
||||||
redis.call("ZREM", KEYS[1], msg)
|
|
||||||
return 1
|
|
||||||
end
|
|
||||||
end
|
end
|
||||||
return 0`)
|
redis.call("LPUSH", KEYS[2], ARGV[1])
|
||||||
|
return 1
|
||||||
|
`)
|
||||||
|
|
||||||
func (r *RDB) removeAndRun(zset, qkey, id string, score float64) (int64, error) {
|
func (r *RDB) removeAndRun(zset, qkey, id string) (int64, error) {
|
||||||
res, err := removeAndRunCmd.Run(r.client, []string{zset, qkey}, score, id).Result()
|
res, err := removeAndRunCmd.Run(r.client, []string{zset, qkey}, id).Result()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
@ -479,12 +524,12 @@ func (r *RDB) removeAndRun(zset, qkey, id string, score float64) (int64, error)
|
|||||||
}
|
}
|
||||||
|
|
||||||
var removeAndRunAllCmd = redis.NewScript(`
|
var removeAndRunAllCmd = redis.NewScript(`
|
||||||
local msgs = redis.call("ZRANGE", KEYS[1], 0, -1)
|
local ids = redis.call("ZRANGE", KEYS[1], 0, -1)
|
||||||
for _, msg in ipairs(msgs) do
|
for _, id in ipairs(ids) do
|
||||||
redis.call("LPUSH", KEYS[2], msg)
|
redis.call("LPUSH", KEYS[2], id)
|
||||||
redis.call("ZREM", KEYS[1], msg)
|
redis.call("ZREM", KEYS[1], id)
|
||||||
end
|
end
|
||||||
return table.getn(msgs)`)
|
return table.getn(ids)`)
|
||||||
|
|
||||||
func (r *RDB) removeAndRunAll(zset, qkey string) (int64, error) {
|
func (r *RDB) removeAndRunAll(zset, qkey string) (int64, error) {
|
||||||
res, err := removeAndRunAllCmd.Run(r.client, []string{zset, qkey}).Result()
|
res, err := removeAndRunAllCmd.Run(r.client, []string{zset, qkey}).Result()
|
||||||
@ -498,10 +543,11 @@ func (r *RDB) removeAndRunAll(zset, qkey string) (int64, error) {
|
|||||||
return n, nil
|
return n, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ArchiveRetryTask finds a retry task that matches the given id and score from the given queue
|
// ArchiveRetryTask finds a retry task that matches the given id
|
||||||
// and archives it. If a task that maches the id and score does not exist, it returns ErrTaskNotFound.
|
// from the given queue and archives it.
|
||||||
func (r *RDB) ArchiveRetryTask(qname string, id uuid.UUID, score int64) error {
|
// If there's no match, it returns ErrTaskNotFound.
|
||||||
n, err := r.removeAndArchive(base.RetryKey(qname), base.ArchivedKey(qname), id.String(), float64(score))
|
func (r *RDB) ArchiveRetryTask(qname string, id uuid.UUID) error {
|
||||||
|
n, err := r.removeAndArchive(base.RetryKey(qname), base.ArchivedKey(qname), id.String())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -511,10 +557,11 @@ func (r *RDB) ArchiveRetryTask(qname string, id uuid.UUID, score int64) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ArchiveScheduledTask finds a scheduled task that matches the given id and score from the given queue
|
// ArchiveScheduledTask finds a scheduled task that matches the given id
|
||||||
// and archives it. If a task that maches the id and score does not exist, it returns ErrTaskNotFound.
|
// from the given queue and archives it.
|
||||||
func (r *RDB) ArchiveScheduledTask(qname string, id uuid.UUID, score int64) error {
|
// If there's no match, it returns ErrTaskNotFound.
|
||||||
n, err := r.removeAndArchive(base.ScheduledKey(qname), base.ArchivedKey(qname), id.String(), float64(score))
|
func (r *RDB) ArchiveScheduledTask(qname string, id uuid.UUID) error {
|
||||||
|
n, err := r.removeAndArchive(base.ScheduledKey(qname), base.ArchivedKey(qname), id.String())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -526,13 +573,12 @@ func (r *RDB) ArchiveScheduledTask(qname string, id uuid.UUID, score int64) erro
|
|||||||
|
|
||||||
// KEYS[1] -> asynq:{<qname>}
|
// KEYS[1] -> asynq:{<qname>}
|
||||||
// KEYS[2] -> asynq:{<qname>}:archived
|
// KEYS[2] -> asynq:{<qname>}:archived
|
||||||
// ARGV[1] -> task message to archive
|
// ARGV[1] -> ID of the task to archive
|
||||||
// ARGV[2] -> current timestamp
|
// ARGV[2] -> current timestamp
|
||||||
// ARGV[3] -> cutoff timestamp (e.g., 90 days ago)
|
// ARGV[3] -> cutoff timestamp (e.g., 90 days ago)
|
||||||
// ARGV[4] -> max number of tasks in archive (e.g., 100)
|
// ARGV[4] -> max number of tasks in archive (e.g., 100)
|
||||||
var archivePendingCmd = redis.NewScript(`
|
var archivePendingCmd = redis.NewScript(`
|
||||||
local x = redis.call("LREM", KEYS[1], 1, ARGV[1])
|
if redis.call("LREM", KEYS[1], 1, ARGV[1]) == 0 then
|
||||||
if x == 0 then
|
|
||||||
return 0
|
return 0
|
||||||
end
|
end
|
||||||
redis.call("ZADD", KEYS[2], ARGV[2], ARGV[1])
|
redis.call("ZADD", KEYS[2], ARGV[2], ARGV[1])
|
||||||
@ -541,47 +587,33 @@ redis.call("ZREMRANGEBYRANK", KEYS[2], 0, -ARGV[4])
|
|||||||
return 1
|
return 1
|
||||||
`)
|
`)
|
||||||
|
|
||||||
func (r *RDB) archivePending(qname, msg string) (int64, error) {
|
// ArchivePendingTask finds a pending task that matches the given id
|
||||||
keys := []string{base.QueueKey(qname), base.ArchivedKey(qname)}
|
// from the given queue and archives it.
|
||||||
|
// If there's no match, it returns ErrTaskNotFound.
|
||||||
|
func (r *RDB) ArchivePendingTask(qname string, id uuid.UUID) error {
|
||||||
|
keys := []string{
|
||||||
|
base.PendingKey(qname),
|
||||||
|
base.ArchivedKey(qname),
|
||||||
|
}
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
limit := now.AddDate(0, 0, -archivedExpirationInDays).Unix() // 90 days ago
|
argv := []interface{}{
|
||||||
args := []interface{}{msg, now.Unix(), limit, maxArchiveSize}
|
id.String(),
|
||||||
res, err := archivePendingCmd.Run(r.client, keys, args...).Result()
|
now.Unix(),
|
||||||
|
now.AddDate(0, 0, -archivedExpirationInDays).Unix(),
|
||||||
|
maxArchiveSize,
|
||||||
|
}
|
||||||
|
res, err := archivePendingCmd.Run(r.client, keys, argv...).Result()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return err
|
||||||
}
|
}
|
||||||
n, ok := res.(int64)
|
n, ok := res.(int64)
|
||||||
if !ok {
|
if !ok {
|
||||||
return 0, fmt.Errorf("could not cast %v to int64", res)
|
return fmt.Errorf("command error: unexpected return value %v", res)
|
||||||
}
|
|
||||||
return n, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ArchivePendingTask finds a pending task that matches the given id from the given queue
|
|
||||||
// and archives it. If a task that maches the id does not exist, it returns ErrTaskNotFound.
|
|
||||||
func (r *RDB) ArchivePendingTask(qname string, id uuid.UUID) error {
|
|
||||||
qkey := base.QueueKey(qname)
|
|
||||||
data, err := r.client.LRange(qkey, 0, -1).Result()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
for _, s := range data {
|
|
||||||
msg, err := base.DecodeMessage(s)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if msg.ID == id {
|
|
||||||
n, err := r.archivePending(qname, s)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
if n == 0 {
|
if n == 0 {
|
||||||
return ErrTaskNotFound
|
return ErrTaskNotFound
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
|
||||||
}
|
|
||||||
return ErrTaskNotFound
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ArchiveAllRetryTasks archives all retry tasks from the given queue and
|
// ArchiveAllRetryTasks archives all retry tasks from the given queue and
|
||||||
@ -596,66 +628,64 @@ func (r *RDB) ArchiveAllScheduledTasks(qname string) (int64, error) {
|
|||||||
return r.removeAndArchiveAll(base.ScheduledKey(qname), base.ArchivedKey(qname))
|
return r.removeAndArchiveAll(base.ScheduledKey(qname), base.ArchivedKey(qname))
|
||||||
}
|
}
|
||||||
|
|
||||||
// KEYS[1] -> asynq:{<qname>}
|
// KEYS[1] -> asynq:{<qname>}:pending
|
||||||
// KEYS[2] -> asynq:{<qname>}:archived
|
// KEYS[2] -> asynq:{<qname>}:archived
|
||||||
// ARGV[1] -> current timestamp
|
// ARGV[1] -> current timestamp
|
||||||
// ARGV[2] -> cutoff timestamp (e.g., 90 days ago)
|
// ARGV[2] -> cutoff timestamp (e.g., 90 days ago)
|
||||||
// ARGV[3] -> max number of tasks in archive (e.g., 100)
|
// ARGV[3] -> max number of tasks in archive (e.g., 100)
|
||||||
var archiveAllPendingCmd = redis.NewScript(`
|
var archiveAllPendingCmd = redis.NewScript(`
|
||||||
local msgs = redis.call("LRANGE", KEYS[1], 0, -1)
|
local ids = redis.call("LRANGE", KEYS[1], 0, -1)
|
||||||
for _, msg in ipairs(msgs) do
|
for _, id in ipairs(ids) do
|
||||||
redis.call("ZADD", KEYS[2], ARGV[1], msg)
|
redis.call("ZADD", KEYS[2], ARGV[1], id)
|
||||||
redis.call("ZREMRANGEBYSCORE", KEYS[2], "-inf", ARGV[2])
|
redis.call("ZREMRANGEBYSCORE", KEYS[2], "-inf", ARGV[2])
|
||||||
redis.call("ZREMRANGEBYRANK", KEYS[2], 0, -ARGV[3])
|
redis.call("ZREMRANGEBYRANK", KEYS[2], 0, -ARGV[3])
|
||||||
end
|
end
|
||||||
redis.call("DEL", KEYS[1])
|
redis.call("DEL", KEYS[1])
|
||||||
return table.getn(msgs)`)
|
return table.getn(ids)`)
|
||||||
|
|
||||||
// ArchiveAllPendingTasks archives all pending tasks from the given queue and
|
// ArchiveAllPendingTasks archives all pending tasks from the given queue and
|
||||||
// returns the number of tasks that were moved.
|
// returns the number of tasks moved.
|
||||||
func (r *RDB) ArchiveAllPendingTasks(qname string) (int64, error) {
|
func (r *RDB) ArchiveAllPendingTasks(qname string) (int64, error) {
|
||||||
keys := []string{base.QueueKey(qname), base.ArchivedKey(qname)}
|
keys := []string{base.PendingKey(qname), base.ArchivedKey(qname)}
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
limit := now.AddDate(0, 0, -archivedExpirationInDays).Unix() // 90 days ago
|
argv := []interface{}{
|
||||||
args := []interface{}{now.Unix(), limit, maxArchiveSize}
|
now.Unix(),
|
||||||
res, err := archiveAllPendingCmd.Run(r.client, keys, args...).Result()
|
now.AddDate(0, 0, -archivedExpirationInDays).Unix(),
|
||||||
|
maxArchiveSize,
|
||||||
|
}
|
||||||
|
res, err := archiveAllPendingCmd.Run(r.client, keys, argv...).Result()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
n, ok := res.(int64)
|
n, ok := res.(int64)
|
||||||
if !ok {
|
if !ok {
|
||||||
return 0, fmt.Errorf("could not cast %v to int64", res)
|
return 0, fmt.Errorf("command error: unexpected return value %v", res)
|
||||||
}
|
}
|
||||||
return n, nil
|
return n, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// KEYS[1] -> ZSET to move task from (e.g., retry queue)
|
// KEYS[1] -> ZSET to move task from (e.g., retry queue)
|
||||||
// KEYS[2] -> asynq:{<qname>}:archived
|
// KEYS[2] -> asynq:{<qname>}:archived
|
||||||
// ARGV[1] -> score of the task to archive
|
// ARGV[1] -> id of the task to archive
|
||||||
// ARGV[2] -> id of the task to archive
|
// ARGV[2] -> current timestamp
|
||||||
// ARGV[3] -> current timestamp
|
// ARGV[3] -> cutoff timestamp (e.g., 90 days ago)
|
||||||
// ARGV[4] -> cutoff timestamp (e.g., 90 days ago)
|
// ARGV[4] -> max number of tasks in archived state (e.g., 100)
|
||||||
// ARGV[5] -> max number of tasks in archived state (e.g., 100)
|
|
||||||
var removeAndArchiveCmd = redis.NewScript(`
|
var removeAndArchiveCmd = redis.NewScript(`
|
||||||
local msgs = redis.call("ZRANGEBYSCORE", KEYS[1], ARGV[1], ARGV[1])
|
if redis.call("ZREM", KEYS[1], ARGV[1]) == 0 then
|
||||||
for _, msg in ipairs(msgs) do
|
return 0
|
||||||
local decoded = cjson.decode(msg)
|
|
||||||
if decoded["ID"] == ARGV[2] then
|
|
||||||
redis.call("ZREM", KEYS[1], msg)
|
|
||||||
redis.call("ZADD", KEYS[2], ARGV[3], msg)
|
|
||||||
redis.call("ZREMRANGEBYSCORE", KEYS[2], "-inf", ARGV[4])
|
|
||||||
redis.call("ZREMRANGEBYRANK", KEYS[2], 0, -ARGV[5])
|
|
||||||
return 1
|
|
||||||
end
|
|
||||||
end
|
end
|
||||||
return 0`)
|
redis.call("ZADD", KEYS[2], ARGV[2], ARGV[1])
|
||||||
|
redis.call("ZREMRANGEBYSCORE", KEYS[2], "-inf", ARGV[3])
|
||||||
|
redis.call("ZREMRANGEBYRANK", KEYS[2], 0, -ARGV[4])
|
||||||
|
return 1
|
||||||
|
`)
|
||||||
|
|
||||||
func (r *RDB) removeAndArchive(src, dst, id string, score float64) (int64, error) {
|
func (r *RDB) removeAndArchive(src, dst, id string) (int64, error) {
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
limit := now.AddDate(0, 0, -archivedExpirationInDays).Unix() // 90 days ago
|
limit := now.AddDate(0, 0, -archivedExpirationInDays).Unix() // 90 days ago
|
||||||
res, err := removeAndArchiveCmd.Run(r.client,
|
res, err := removeAndArchiveCmd.Run(r.client,
|
||||||
[]string{src, dst},
|
[]string{src, dst},
|
||||||
score, id, now.Unix(), limit, maxArchiveSize).Result()
|
id, now.Unix(), limit, maxArchiveSize).Result()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
@ -666,108 +696,106 @@ func (r *RDB) removeAndArchive(src, dst, id string, score float64) (int64, error
|
|||||||
return n, nil
|
return n, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// KEYS[1] -> ZSET to move task from (e.g., retry queue)
|
// KEYS[1] -> ZSET to move task from (e.g., asynq:{<qname>}:retry)
|
||||||
// KEYS[2] -> asynq:{<qname>}:archived
|
// KEYS[2] -> asynq:{<qname>}:archived
|
||||||
// ARGV[1] -> current timestamp
|
// ARGV[1] -> current timestamp
|
||||||
// ARGV[2] -> cutoff timestamp (e.g., 90 days ago)
|
// ARGV[2] -> cutoff timestamp (e.g., 90 days ago)
|
||||||
// ARGV[3] -> max number of tasks in archive (e.g., 100)
|
// ARGV[3] -> max number of tasks in archive (e.g., 100)
|
||||||
var removeAndArchiveAllCmd = redis.NewScript(`
|
var removeAndArchiveAllCmd = redis.NewScript(`
|
||||||
local msgs = redis.call("ZRANGE", KEYS[1], 0, -1)
|
local ids = redis.call("ZRANGE", KEYS[1], 0, -1)
|
||||||
for _, msg in ipairs(msgs) do
|
for _, id in ipairs(ids) do
|
||||||
redis.call("ZADD", KEYS[2], ARGV[1], msg)
|
redis.call("ZADD", KEYS[2], ARGV[1], id)
|
||||||
redis.call("ZREM", KEYS[1], msg)
|
|
||||||
redis.call("ZREMRANGEBYSCORE", KEYS[2], "-inf", ARGV[2])
|
redis.call("ZREMRANGEBYSCORE", KEYS[2], "-inf", ARGV[2])
|
||||||
redis.call("ZREMRANGEBYRANK", KEYS[2], 0, -ARGV[3])
|
redis.call("ZREMRANGEBYRANK", KEYS[2], 0, -ARGV[3])
|
||||||
end
|
end
|
||||||
return table.getn(msgs)`)
|
redis.call("DEL", KEYS[1])
|
||||||
|
return table.getn(ids)`)
|
||||||
|
|
||||||
func (r *RDB) removeAndArchiveAll(src, dst string) (int64, error) {
|
func (r *RDB) removeAndArchiveAll(src, dst string) (int64, error) {
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
limit := now.AddDate(0, 0, -archivedExpirationInDays).Unix() // 90 days ago
|
argv := []interface{}{
|
||||||
res, err := removeAndArchiveAllCmd.Run(r.client, []string{src, dst},
|
now.Unix(),
|
||||||
now.Unix(), limit, maxArchiveSize).Result()
|
now.AddDate(0, 0, -archivedExpirationInDays).Unix(),
|
||||||
|
maxArchiveSize,
|
||||||
|
}
|
||||||
|
res, err := removeAndArchiveAllCmd.Run(r.client,
|
||||||
|
[]string{src, dst}, argv...).Result()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
n, ok := res.(int64)
|
n, ok := res.(int64)
|
||||||
if !ok {
|
if !ok {
|
||||||
return 0, fmt.Errorf("could not cast %v to int64", res)
|
return 0, fmt.Errorf("command error: unexpected return value %v", res)
|
||||||
}
|
}
|
||||||
return n, nil
|
return n, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteArchivedTask deletes an archived task that matches the given id and score from the given queue.
|
// DeleteArchivedTask deletes an archived task that matches the given id and score from the given queue.
|
||||||
// If a task that matches the id and score does not exist, it returns ErrTaskNotFound.
|
// If a task that matches the id and score does not exist, it returns ErrTaskNotFound.
|
||||||
func (r *RDB) DeleteArchivedTask(qname string, id uuid.UUID, score int64) error {
|
func (r *RDB) DeleteArchivedTask(qname string, id uuid.UUID) error {
|
||||||
return r.deleteTask(base.ArchivedKey(qname), id.String(), float64(score))
|
return r.deleteTask(base.ArchivedKey(qname), qname, id.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteRetryTask deletes a retry task that matches the given id and score from the given queue.
|
// DeleteRetryTask deletes a retry task that matches the given id and score from the given queue.
|
||||||
// If a task that matches the id and score does not exist, it returns ErrTaskNotFound.
|
// If a task that matches the id and score does not exist, it returns ErrTaskNotFound.
|
||||||
func (r *RDB) DeleteRetryTask(qname string, id uuid.UUID, score int64) error {
|
func (r *RDB) DeleteRetryTask(qname string, id uuid.UUID) error {
|
||||||
return r.deleteTask(base.RetryKey(qname), id.String(), float64(score))
|
return r.deleteTask(base.RetryKey(qname), qname, id.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteScheduledTask deletes a scheduled task that matches the given id and score from the given queue.
|
// DeleteScheduledTask deletes a scheduled task that matches the given id and score from the given queue.
|
||||||
// If a task that matches the id and score does not exist, it returns ErrTaskNotFound.
|
// If a task that matches the id and score does not exist, it returns ErrTaskNotFound.
|
||||||
func (r *RDB) DeleteScheduledTask(qname string, id uuid.UUID, score int64) error {
|
func (r *RDB) DeleteScheduledTask(qname string, id uuid.UUID) error {
|
||||||
return r.deleteTask(base.ScheduledKey(qname), id.String(), float64(score))
|
return r.deleteTask(base.ScheduledKey(qname), qname, id.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// KEYS[1] -> asynq:{<qname>}:pending
|
||||||
|
// KEYS[2] -> asynq:{<qname>}:t:<task_id>
|
||||||
|
// ARGV[1] -> task ID
|
||||||
|
var deletePendingTaskCmd = redis.NewScript(`
|
||||||
|
if redis.call("LREM", KEYS[1], 0, ARGV[1]) == 0 then
|
||||||
|
return 0
|
||||||
|
end
|
||||||
|
return redis.call("DEL", KEYS[2])
|
||||||
|
`)
|
||||||
|
|
||||||
// DeletePendingTask deletes a pending tasks that matches the given id from the given queue.
|
// DeletePendingTask deletes a pending tasks that matches the given id from the given queue.
|
||||||
// If a task that matches the id does not exist, it returns ErrTaskNotFound.
|
// If there's no match, it returns ErrTaskNotFound.
|
||||||
func (r *RDB) DeletePendingTask(qname string, id uuid.UUID) error {
|
func (r *RDB) DeletePendingTask(qname string, id uuid.UUID) error {
|
||||||
qkey := base.QueueKey(qname)
|
keys := []string{base.PendingKey(qname), base.TaskKey(qname, id.String())}
|
||||||
data, err := r.client.LRange(qkey, 0, -1).Result()
|
res, err := deletePendingTaskCmd.Run(r.client, keys, id.String()).Result()
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
for _, s := range data {
|
|
||||||
msg, err := base.DecodeMessage(s)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if msg.ID == id {
|
|
||||||
n, err := r.client.LRem(qkey, 1, s).Result()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if n == 0 {
|
|
||||||
return ErrTaskNotFound
|
|
||||||
}
|
|
||||||
if r.client.Get(msg.UniqueKey).Val() == msg.ID.String() {
|
|
||||||
if err := r.client.Del(msg.UniqueKey).Err(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return ErrTaskNotFound
|
|
||||||
}
|
|
||||||
|
|
||||||
var deleteTaskCmd = redis.NewScript(`
|
|
||||||
local msgs = redis.call("ZRANGEBYSCORE", KEYS[1], ARGV[1], ARGV[1])
|
|
||||||
for _, msg in ipairs(msgs) do
|
|
||||||
local decoded = cjson.decode(msg)
|
|
||||||
if decoded["ID"] == ARGV[2] then
|
|
||||||
redis.call("ZREM", KEYS[1], msg)
|
|
||||||
if redis.call("GET", decoded["UniqueKey"]) == ARGV[2] then
|
|
||||||
redis.call("DEL", decoded["UniqueKey"])
|
|
||||||
end
|
|
||||||
return 1
|
|
||||||
end
|
|
||||||
end
|
|
||||||
return 0`)
|
|
||||||
|
|
||||||
func (r *RDB) deleteTask(key, id string, score float64) error {
|
|
||||||
res, err := deleteTaskCmd.Run(r.client, []string{key}, score, id).Result()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
n, ok := res.(int64)
|
n, ok := res.(int64)
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("could not cast %v to int64", res)
|
return fmt.Errorf("command error: unexpected return value %v", res)
|
||||||
|
}
|
||||||
|
if n == 0 {
|
||||||
|
return ErrTaskNotFound
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// KEYS[1] -> ZSET key to remove the task from (e.g. asynq:{<qname>}:retry)
|
||||||
|
// KEYS[2] -> asynq:{<qname>}:t:<task_id>
|
||||||
|
// ARGV[1] -> task ID
|
||||||
|
var deleteTaskCmd = redis.NewScript(`
|
||||||
|
if redis.call("ZREM", KEYS[1], ARGV[1]) == 0 then
|
||||||
|
return 0
|
||||||
|
end
|
||||||
|
return redis.call("DEL", KEYS[2])
|
||||||
|
`)
|
||||||
|
|
||||||
|
func (r *RDB) deleteTask(key, qname, id string) error {
|
||||||
|
keys := []string{key, base.TaskKey(qname, id)}
|
||||||
|
argv := []interface{}{id}
|
||||||
|
res, err := deleteTaskCmd.Run(r.client, keys, argv...).Result()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
n, ok := res.(int64)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("command error: unexpected return value %v", res)
|
||||||
}
|
}
|
||||||
if n == 0 {
|
if n == 0 {
|
||||||
return ErrTaskNotFound
|
return ErrTaskNotFound
|
||||||
@ -776,37 +804,36 @@ func (r *RDB) deleteTask(key, id string, score float64) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// KEYS[1] -> queue to delete
|
// KEYS[1] -> queue to delete
|
||||||
|
// ARGV[1] -> task key prefix
|
||||||
var deleteAllCmd = redis.NewScript(`
|
var deleteAllCmd = redis.NewScript(`
|
||||||
local msgs = redis.call("ZRANGE", KEYS[1], 0, -1)
|
local ids = redis.call("ZRANGE", KEYS[1], 0, -1)
|
||||||
for _, msg in ipairs(msgs) do
|
for _, id in ipairs(ids) do
|
||||||
local decoded = cjson.decode(msg)
|
local key = ARGV[1] .. id
|
||||||
if redis.call("GET", decoded["UniqueKey"]) == decoded["ID"] then
|
redis.call("DEL", key)
|
||||||
redis.call("DEL", decoded["UniqueKey"])
|
|
||||||
end
|
|
||||||
end
|
end
|
||||||
redis.call("DEL", KEYS[1])
|
redis.call("DEL", KEYS[1])
|
||||||
return table.getn(msgs)`)
|
return table.getn(ids)`)
|
||||||
|
|
||||||
// DeleteAllArchivedTasks deletes all archived tasks from the given queue
|
// DeleteAllArchivedTasks deletes all archived tasks from the given queue
|
||||||
// and returns the number of tasks deleted.
|
// and returns the number of tasks deleted.
|
||||||
func (r *RDB) DeleteAllArchivedTasks(qname string) (int64, error) {
|
func (r *RDB) DeleteAllArchivedTasks(qname string) (int64, error) {
|
||||||
return r.deleteAll(base.ArchivedKey(qname))
|
return r.deleteAll(base.ArchivedKey(qname), qname)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteAllRetryTasks deletes all retry tasks from the given queue
|
// DeleteAllRetryTasks deletes all retry tasks from the given queue
|
||||||
// and returns the number of tasks deleted.
|
// and returns the number of tasks deleted.
|
||||||
func (r *RDB) DeleteAllRetryTasks(qname string) (int64, error) {
|
func (r *RDB) DeleteAllRetryTasks(qname string) (int64, error) {
|
||||||
return r.deleteAll(base.RetryKey(qname))
|
return r.deleteAll(base.RetryKey(qname), qname)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteAllScheduledTasks deletes all scheduled tasks from the given queue
|
// DeleteAllScheduledTasks deletes all scheduled tasks from the given queue
|
||||||
// and returns the number of tasks deleted.
|
// and returns the number of tasks deleted.
|
||||||
func (r *RDB) DeleteAllScheduledTasks(qname string) (int64, error) {
|
func (r *RDB) DeleteAllScheduledTasks(qname string) (int64, error) {
|
||||||
return r.deleteAll(base.ScheduledKey(qname))
|
return r.deleteAll(base.ScheduledKey(qname), qname)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *RDB) deleteAll(key string) (int64, error) {
|
func (r *RDB) deleteAll(key, qname string) (int64, error) {
|
||||||
res, err := deleteAllCmd.Run(r.client, []string{key}).Result()
|
res, err := deleteAllCmd.Run(r.client, []string{key}, base.TaskKeyPrefix(qname)).Result()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
@ -817,28 +844,28 @@ func (r *RDB) deleteAll(key string) (int64, error) {
|
|||||||
return n, nil
|
return n, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// KEYS[1] -> asynq:{<qname>}
|
// KEYS[1] -> asynq:{<qname>}:pending
|
||||||
|
// ARGV[1] -> task key prefix
|
||||||
var deleteAllPendingCmd = redis.NewScript(`
|
var deleteAllPendingCmd = redis.NewScript(`
|
||||||
local msgs = redis.call("LRANGE", KEYS[1], 0, -1)
|
local ids = redis.call("LRANGE", KEYS[1], 0, -1)
|
||||||
for _, msg in ipairs(msgs) do
|
for _, id in ipairs(ids) do
|
||||||
local decoded = cjson.decode(msg)
|
local key = ARGV[1] .. id
|
||||||
if redis.call("GET", decoded["UniqueKey"]) == decoded["ID"] then
|
redis.call("DEL", key)
|
||||||
redis.call("DEL", decoded["UniqueKey"])
|
|
||||||
end
|
|
||||||
end
|
end
|
||||||
redis.call("DEL", KEYS[1])
|
redis.call("DEL", KEYS[1])
|
||||||
return table.getn(msgs)`)
|
return table.getn(ids)`)
|
||||||
|
|
||||||
// DeleteAllPendingTasks deletes all pending tasks from the given queue
|
// DeleteAllPendingTasks deletes all pending tasks from the given queue
|
||||||
// and returns the number of tasks deleted.
|
// and returns the number of tasks deleted.
|
||||||
func (r *RDB) DeleteAllPendingTasks(qname string) (int64, error) {
|
func (r *RDB) DeleteAllPendingTasks(qname string) (int64, error) {
|
||||||
res, err := deleteAllPendingCmd.Run(r.client, []string{base.QueueKey(qname)}).Result()
|
res, err := deleteAllPendingCmd.Run(r.client,
|
||||||
|
[]string{base.PendingKey(qname)}, base.TaskKeyPrefix(qname)).Result()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
n, ok := res.(int64)
|
n, ok := res.(int64)
|
||||||
if !ok {
|
if !ok {
|
||||||
return 0, fmt.Errorf("could not cast %v to int64", res)
|
return 0, fmt.Errorf("command error: unexpected return value %v", res)
|
||||||
}
|
}
|
||||||
return n, nil
|
return n, nil
|
||||||
}
|
}
|
||||||
@ -868,11 +895,27 @@ func (e *ErrQueueNotEmpty) Error() string {
|
|||||||
// KEYS[4] -> asynq:{<qname>}:retry
|
// KEYS[4] -> asynq:{<qname>}:retry
|
||||||
// KEYS[5] -> asynq:{<qname>}:archived
|
// KEYS[5] -> asynq:{<qname>}:archived
|
||||||
// KEYS[6] -> asynq:{<qname>}:deadlines
|
// KEYS[6] -> asynq:{<qname>}:deadlines
|
||||||
|
// ARGV[1] -> task key prefix
|
||||||
var removeQueueForceCmd = redis.NewScript(`
|
var removeQueueForceCmd = redis.NewScript(`
|
||||||
local active = redis.call("LLEN", KEYS[2])
|
local active = redis.call("LLEN", KEYS[2])
|
||||||
if active > 0 then
|
if active > 0 then
|
||||||
return redis.error_reply("Queue has tasks active")
|
return redis.error_reply("Queue has tasks active")
|
||||||
end
|
end
|
||||||
|
for _, id in ipairs(redis.call("LRANGE", KEYS[1], 0, -1)) do
|
||||||
|
redis.call("DEL", ARGV[1] .. id)
|
||||||
|
end
|
||||||
|
for _, id in ipairs(redis.call("LRANGE", KEYS[2], 0, -1)) do
|
||||||
|
redis.call("DEL", ARGV[1] .. id)
|
||||||
|
end
|
||||||
|
for _, id in ipairs(redis.call("ZRANGE", KEYS[3], 0, -1)) do
|
||||||
|
redis.call("DEL", ARGV[1] .. id)
|
||||||
|
end
|
||||||
|
for _, id in ipairs(redis.call("ZRANGE", KEYS[4], 0, -1)) do
|
||||||
|
redis.call("DEL", ARGV[1] .. id)
|
||||||
|
end
|
||||||
|
for _, id in ipairs(redis.call("ZRANGE", KEYS[5], 0, -1)) do
|
||||||
|
redis.call("DEL", ARGV[1] .. id)
|
||||||
|
end
|
||||||
redis.call("DEL", KEYS[1])
|
redis.call("DEL", KEYS[1])
|
||||||
redis.call("DEL", KEYS[2])
|
redis.call("DEL", KEYS[2])
|
||||||
redis.call("DEL", KEYS[3])
|
redis.call("DEL", KEYS[3])
|
||||||
@ -882,22 +925,36 @@ redis.call("DEL", KEYS[6])
|
|||||||
return redis.status_reply("OK")`)
|
return redis.status_reply("OK")`)
|
||||||
|
|
||||||
// Checks whether queue is empty before removing.
|
// Checks whether queue is empty before removing.
|
||||||
// KEYS[1] -> asynq:{<qname>}
|
// KEYS[1] -> asynq:{<qname>}:pending
|
||||||
// KEYS[2] -> asynq:{<qname>}:active
|
// KEYS[2] -> asynq:{<qname>}:active
|
||||||
// KEYS[3] -> asynq:{<qname>}:scheduled
|
// KEYS[3] -> asynq:{<qname>}:scheduled
|
||||||
// KEYS[4] -> asynq:{<qname>}:retry
|
// KEYS[4] -> asynq:{<qname>}:retry
|
||||||
// KEYS[5] -> asynq:{<qname>}:archived
|
// KEYS[5] -> asynq:{<qname>}:archived
|
||||||
// KEYS[6] -> asynq:{<qname>}:deadlines
|
// KEYS[6] -> asynq:{<qname>}:deadlines
|
||||||
|
// ARGV[1] -> task key prefix
|
||||||
var removeQueueCmd = redis.NewScript(`
|
var removeQueueCmd = redis.NewScript(`
|
||||||
local pending = redis.call("LLEN", KEYS[1])
|
local ids = {}
|
||||||
local active = redis.call("LLEN", KEYS[2])
|
for _, id in ipairs(redis.call("LRANGE", KEYS[1], 0, -1)) do
|
||||||
local scheduled = redis.call("SCARD", KEYS[3])
|
table.insert(ids, id)
|
||||||
local retry = redis.call("SCARD", KEYS[4])
|
end
|
||||||
local archived = redis.call("SCARD", KEYS[5])
|
for _, id in ipairs(redis.call("LRANGE", KEYS[2], 0, -1)) do
|
||||||
local total = pending + active + scheduled + retry + archived
|
table.insert(ids, id)
|
||||||
if total > 0 then
|
end
|
||||||
|
for _, id in ipairs(redis.call("ZRANGE", KEYS[3], 0, -1)) do
|
||||||
|
table.insert(ids, id)
|
||||||
|
end
|
||||||
|
for _, id in ipairs(redis.call("ZRANGE", KEYS[4], 0, -1)) do
|
||||||
|
table.insert(ids, id)
|
||||||
|
end
|
||||||
|
for _, id in ipairs(redis.call("ZRANGE", KEYS[5], 0, -1)) do
|
||||||
|
table.insert(ids, id)
|
||||||
|
end
|
||||||
|
if table.getn(ids) > 0 then
|
||||||
return redis.error_reply("QUEUE NOT EMPTY")
|
return redis.error_reply("QUEUE NOT EMPTY")
|
||||||
end
|
end
|
||||||
|
for _, id in ipairs(ids) do
|
||||||
|
redis.call("DEL", ARGV[1] .. id)
|
||||||
|
end
|
||||||
redis.call("DEL", KEYS[1])
|
redis.call("DEL", KEYS[1])
|
||||||
redis.call("DEL", KEYS[2])
|
redis.call("DEL", KEYS[2])
|
||||||
redis.call("DEL", KEYS[3])
|
redis.call("DEL", KEYS[3])
|
||||||
@ -927,14 +984,14 @@ func (r *RDB) RemoveQueue(qname string, force bool) error {
|
|||||||
script = removeQueueCmd
|
script = removeQueueCmd
|
||||||
}
|
}
|
||||||
keys := []string{
|
keys := []string{
|
||||||
base.QueueKey(qname),
|
base.PendingKey(qname),
|
||||||
base.ActiveKey(qname),
|
base.ActiveKey(qname),
|
||||||
base.ScheduledKey(qname),
|
base.ScheduledKey(qname),
|
||||||
base.RetryKey(qname),
|
base.RetryKey(qname),
|
||||||
base.ArchivedKey(qname),
|
base.ArchivedKey(qname),
|
||||||
base.DeadlinesKey(qname),
|
base.DeadlinesKey(qname),
|
||||||
}
|
}
|
||||||
if err := script.Run(r.client, keys).Err(); err != nil {
|
if err := script.Run(r.client, keys, base.TaskKeyPrefix(qname)).Err(); err != nil {
|
||||||
if err.Error() == "QUEUE NOT EMPTY" {
|
if err.Error() == "QUEUE NOT EMPTY" {
|
||||||
return &ErrQueueNotEmpty{qname}
|
return &ErrQueueNotEmpty{qname}
|
||||||
}
|
}
|
||||||
@ -967,46 +1024,47 @@ func (r *RDB) ListServers() ([]*base.ServerInfo, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
continue // skip bad data
|
continue // skip bad data
|
||||||
}
|
}
|
||||||
var info base.ServerInfo
|
info, err := base.DecodeServerInfo([]byte(data))
|
||||||
if err := json.Unmarshal([]byte(data), &info); err != nil {
|
if err != nil {
|
||||||
continue // skip bad data
|
continue // skip bad data
|
||||||
}
|
}
|
||||||
servers = append(servers, &info)
|
servers = append(servers, info)
|
||||||
}
|
}
|
||||||
return servers, nil
|
return servers, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Note: Script also removes stale keys.
|
// Note: Script also removes stale keys.
|
||||||
var listWorkerKeysCmd = redis.NewScript(`
|
var listWorkersCmd = redis.NewScript(`
|
||||||
local now = tonumber(ARGV[1])
|
local now = tonumber(ARGV[1])
|
||||||
local keys = redis.call("ZRANGEBYSCORE", KEYS[1], now, "+inf")
|
local keys = redis.call("ZRANGEBYSCORE", KEYS[1], now, "+inf")
|
||||||
redis.call("ZREMRANGEBYSCORE", KEYS[1], "-inf", now-1)
|
redis.call("ZREMRANGEBYSCORE", KEYS[1], "-inf", now-1)
|
||||||
return keys`)
|
local res = {}
|
||||||
|
for _, key in ipairs(keys) do
|
||||||
|
local vals = redis.call("HVALS", key)
|
||||||
|
for _, v in ipairs(vals) do
|
||||||
|
table.insert(res, v)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
return res`)
|
||||||
|
|
||||||
// ListWorkers returns the list of worker stats.
|
// ListWorkers returns the list of worker stats.
|
||||||
func (r *RDB) ListWorkers() ([]*base.WorkerInfo, error) {
|
func (r *RDB) ListWorkers() ([]*base.WorkerInfo, error) {
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
res, err := listWorkerKeysCmd.Run(r.client, []string{base.AllWorkers}, now.Unix()).Result()
|
res, err := listWorkersCmd.Run(r.client, []string{base.AllWorkers}, now.Unix()).Result()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
keys, err := cast.ToStringSliceE(res)
|
data, err := cast.ToStringSliceE(res)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
var workers []*base.WorkerInfo
|
var workers []*base.WorkerInfo
|
||||||
for _, key := range keys {
|
for _, s := range data {
|
||||||
data, err := r.client.HVals(key).Result()
|
w, err := base.DecodeWorkerInfo([]byte(s))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
continue // skip bad data
|
continue // skip bad data
|
||||||
}
|
}
|
||||||
for _, s := range data {
|
workers = append(workers, w)
|
||||||
var w base.WorkerInfo
|
|
||||||
if err := json.Unmarshal([]byte(s), &w); err != nil {
|
|
||||||
continue // skip bad data
|
|
||||||
}
|
|
||||||
workers = append(workers, &w)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return workers, nil
|
return workers, nil
|
||||||
}
|
}
|
||||||
@ -1036,11 +1094,11 @@ func (r *RDB) ListSchedulerEntries() ([]*base.SchedulerEntry, error) {
|
|||||||
continue // skip bad data
|
continue // skip bad data
|
||||||
}
|
}
|
||||||
for _, s := range data {
|
for _, s := range data {
|
||||||
var e base.SchedulerEntry
|
e, err := base.DecodeSchedulerEntry([]byte(s))
|
||||||
if err := json.Unmarshal([]byte(s), &e); err != nil {
|
if err != nil {
|
||||||
continue // skip bad data
|
continue // skip bad data
|
||||||
}
|
}
|
||||||
entries = append(entries, &e)
|
entries = append(entries, e)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return entries, nil
|
return entries, nil
|
||||||
@ -1059,11 +1117,11 @@ func (r *RDB) ListSchedulerEnqueueEvents(entryID string, pgn Pagination) ([]*bas
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
var e base.SchedulerEnqueueEvent
|
e, err := base.DecodeSchedulerEnqueueEvent([]byte(data))
|
||||||
if err := json.Unmarshal([]byte(data), &e); err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
events = append(events, &e)
|
events = append(events, e)
|
||||||
}
|
}
|
||||||
return events, nil
|
return events, nil
|
||||||
}
|
}
|
||||||
@ -1096,7 +1154,7 @@ func (r *RDB) Unpause(qname string) error {
|
|||||||
|
|
||||||
// ClusterKeySlot returns an integer identifying the hash slot the given queue hashes to.
|
// ClusterKeySlot returns an integer identifying the hash slot the given queue hashes to.
|
||||||
func (r *RDB) ClusterKeySlot(qname string) (int64, error) {
|
func (r *RDB) ClusterKeySlot(qname string) (int64, error) {
|
||||||
key := base.QueueKey(qname)
|
key := base.PendingKey(qname)
|
||||||
return r.client.ClusterKeySlot(key).Result()
|
return r.client.ClusterKeySlot(key).Result()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -386,7 +386,7 @@ func TestListPendingPagination(t *testing.T) {
|
|||||||
|
|
||||||
msgs = []*base.TaskMessage(nil) // empty list
|
msgs = []*base.TaskMessage(nil) // empty list
|
||||||
for i := 0; i < 100; i++ {
|
for i := 0; i < 100; i++ {
|
||||||
msg := h.NewTaskMessage(fmt.Sprintf("custom %d", i), nil)
|
msg := h.NewTaskMessageWithQueue(fmt.Sprintf("custom %d", i), nil, "custom")
|
||||||
msgs = append(msgs, msg)
|
msgs = append(msgs, msg)
|
||||||
}
|
}
|
||||||
// create 100 tasks in custom queue
|
// create 100 tasks in custom queue
|
||||||
@ -841,7 +841,7 @@ func TestListRetryPagination(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestListDead(t *testing.T) {
|
func TestListArchived(t *testing.T) {
|
||||||
r := setup(t)
|
r := setup(t)
|
||||||
defer r.Close()
|
defer r.Close()
|
||||||
m1 := &base.TaskMessage{
|
m1 := &base.TaskMessage{
|
||||||
@ -932,7 +932,7 @@ func TestListDead(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestListDeadPagination(t *testing.T) {
|
func TestListArchivedPagination(t *testing.T) {
|
||||||
r := setup(t)
|
r := setup(t)
|
||||||
defer r.Close()
|
defer r.Close()
|
||||||
var entries []base.Z
|
var entries []base.Z
|
||||||
@ -996,7 +996,7 @@ var (
|
|||||||
zScoreCmpOpt = h.EquateInt64Approx(2) // allow for 2 seconds margin in Z.Score
|
zScoreCmpOpt = h.EquateInt64Approx(2) // allow for 2 seconds margin in Z.Score
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestRunDeadTask(t *testing.T) {
|
func TestRunArchivedTask(t *testing.T) {
|
||||||
r := setup(t)
|
r := setup(t)
|
||||||
defer r.Close()
|
defer r.Close()
|
||||||
t1 := h.NewTaskMessage("send_email", nil)
|
t1 := h.NewTaskMessage("send_email", nil)
|
||||||
@ -1008,9 +1008,8 @@ func TestRunDeadTask(t *testing.T) {
|
|||||||
tests := []struct {
|
tests := []struct {
|
||||||
archived map[string][]base.Z
|
archived map[string][]base.Z
|
||||||
qname string
|
qname string
|
||||||
score int64
|
|
||||||
id uuid.UUID
|
id uuid.UUID
|
||||||
want error // expected return value from calling RunDeadTask
|
want error // expected return value from calling RunArchivedTask
|
||||||
wantArchived map[string][]*base.TaskMessage
|
wantArchived map[string][]*base.TaskMessage
|
||||||
wantPending map[string][]*base.TaskMessage
|
wantPending map[string][]*base.TaskMessage
|
||||||
}{
|
}{
|
||||||
@ -1022,7 +1021,6 @@ func TestRunDeadTask(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
qname: "default",
|
qname: "default",
|
||||||
score: s2,
|
|
||||||
id: t2.ID,
|
id: t2.ID,
|
||||||
want: nil,
|
want: nil,
|
||||||
wantArchived: map[string][]*base.TaskMessage{
|
wantArchived: map[string][]*base.TaskMessage{
|
||||||
@ -1040,8 +1038,7 @@ func TestRunDeadTask(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
qname: "default",
|
qname: "default",
|
||||||
score: 123,
|
id: uuid.New(),
|
||||||
id: t2.ID,
|
|
||||||
want: ErrTaskNotFound,
|
want: ErrTaskNotFound,
|
||||||
wantArchived: map[string][]*base.TaskMessage{
|
wantArchived: map[string][]*base.TaskMessage{
|
||||||
"default": {t1, t2},
|
"default": {t1, t2},
|
||||||
@ -1061,7 +1058,6 @@ func TestRunDeadTask(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
qname: "critical",
|
qname: "critical",
|
||||||
score: s1,
|
|
||||||
id: t3.ID,
|
id: t3.ID,
|
||||||
want: nil,
|
want: nil,
|
||||||
wantArchived: map[string][]*base.TaskMessage{
|
wantArchived: map[string][]*base.TaskMessage{
|
||||||
@ -1079,16 +1075,16 @@ func TestRunDeadTask(t *testing.T) {
|
|||||||
h.FlushDB(t, r.client) // clean up db before each test case
|
h.FlushDB(t, r.client) // clean up db before each test case
|
||||||
h.SeedAllArchivedQueues(t, r.client, tc.archived)
|
h.SeedAllArchivedQueues(t, r.client, tc.archived)
|
||||||
|
|
||||||
got := r.RunArchivedTask(tc.qname, tc.id, tc.score)
|
got := r.RunArchivedTask(tc.qname, tc.id)
|
||||||
if got != tc.want {
|
if got != tc.want {
|
||||||
t.Errorf("r.RunDeadTask(%q, %s, %d) = %v, want %v", tc.qname, tc.id, tc.score, got, tc.want)
|
t.Errorf("r.RunDeadTask(%q, %s) = %v, want %v", tc.qname, tc.id, got, tc.want)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
for qname, want := range tc.wantPending {
|
for qname, want := range tc.wantPending {
|
||||||
gotPending := h.GetPendingMessages(t, r.client, qname)
|
gotPending := h.GetPendingMessages(t, r.client, qname)
|
||||||
if diff := cmp.Diff(want, gotPending, h.SortMsgOpt); diff != "" {
|
if diff := cmp.Diff(want, gotPending, h.SortMsgOpt); diff != "" {
|
||||||
t.Errorf("mismatch found in %q; (-want, +got)\n%s", base.QueueKey(qname), diff)
|
t.Errorf("mismatch found in %q; (-want, +got)\n%s", base.PendingKey(qname), diff)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1113,7 +1109,6 @@ func TestRunRetryTask(t *testing.T) {
|
|||||||
tests := []struct {
|
tests := []struct {
|
||||||
retry map[string][]base.Z
|
retry map[string][]base.Z
|
||||||
qname string
|
qname string
|
||||||
score int64
|
|
||||||
id uuid.UUID
|
id uuid.UUID
|
||||||
want error // expected return value from calling RunRetryTask
|
want error // expected return value from calling RunRetryTask
|
||||||
wantRetry map[string][]*base.TaskMessage
|
wantRetry map[string][]*base.TaskMessage
|
||||||
@ -1127,7 +1122,6 @@ func TestRunRetryTask(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
qname: "default",
|
qname: "default",
|
||||||
score: s2,
|
|
||||||
id: t2.ID,
|
id: t2.ID,
|
||||||
want: nil,
|
want: nil,
|
||||||
wantRetry: map[string][]*base.TaskMessage{
|
wantRetry: map[string][]*base.TaskMessage{
|
||||||
@ -1145,8 +1139,7 @@ func TestRunRetryTask(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
qname: "default",
|
qname: "default",
|
||||||
score: 123,
|
id: uuid.New(),
|
||||||
id: t2.ID,
|
|
||||||
want: ErrTaskNotFound,
|
want: ErrTaskNotFound,
|
||||||
wantRetry: map[string][]*base.TaskMessage{
|
wantRetry: map[string][]*base.TaskMessage{
|
||||||
"default": {t1, t2},
|
"default": {t1, t2},
|
||||||
@ -1166,7 +1159,6 @@ func TestRunRetryTask(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
qname: "low",
|
qname: "low",
|
||||||
score: s2,
|
|
||||||
id: t3.ID,
|
id: t3.ID,
|
||||||
want: nil,
|
want: nil,
|
||||||
wantRetry: map[string][]*base.TaskMessage{
|
wantRetry: map[string][]*base.TaskMessage{
|
||||||
@ -1184,16 +1176,16 @@ func TestRunRetryTask(t *testing.T) {
|
|||||||
h.FlushDB(t, r.client) // clean up db before each test case
|
h.FlushDB(t, r.client) // clean up db before each test case
|
||||||
h.SeedAllRetryQueues(t, r.client, tc.retry) // initialize retry queue
|
h.SeedAllRetryQueues(t, r.client, tc.retry) // initialize retry queue
|
||||||
|
|
||||||
got := r.RunRetryTask(tc.qname, tc.id, tc.score)
|
got := r.RunRetryTask(tc.qname, tc.id)
|
||||||
if got != tc.want {
|
if got != tc.want {
|
||||||
t.Errorf("r.RunRetryTask(%q, %s, %d) = %v, want %v", tc.qname, tc.id, tc.score, got, tc.want)
|
t.Errorf("r.RunRetryTask(%q, %s) = %v, want %v", tc.qname, tc.id, got, tc.want)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
for qname, want := range tc.wantPending {
|
for qname, want := range tc.wantPending {
|
||||||
gotPending := h.GetPendingMessages(t, r.client, qname)
|
gotPending := h.GetPendingMessages(t, r.client, qname)
|
||||||
if diff := cmp.Diff(want, gotPending, h.SortMsgOpt); diff != "" {
|
if diff := cmp.Diff(want, gotPending, h.SortMsgOpt); diff != "" {
|
||||||
t.Errorf("mismatch found in %q; (-want, +got)\n%s", base.QueueKey(qname), diff)
|
t.Errorf("mismatch found in %q; (-want, +got)\n%s", base.PendingKey(qname), diff)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1218,7 +1210,6 @@ func TestRunScheduledTask(t *testing.T) {
|
|||||||
tests := []struct {
|
tests := []struct {
|
||||||
scheduled map[string][]base.Z
|
scheduled map[string][]base.Z
|
||||||
qname string
|
qname string
|
||||||
score int64
|
|
||||||
id uuid.UUID
|
id uuid.UUID
|
||||||
want error // expected return value from calling RunScheduledTask
|
want error // expected return value from calling RunScheduledTask
|
||||||
wantScheduled map[string][]*base.TaskMessage
|
wantScheduled map[string][]*base.TaskMessage
|
||||||
@ -1232,7 +1223,6 @@ func TestRunScheduledTask(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
qname: "default",
|
qname: "default",
|
||||||
score: s2,
|
|
||||||
id: t2.ID,
|
id: t2.ID,
|
||||||
want: nil,
|
want: nil,
|
||||||
wantScheduled: map[string][]*base.TaskMessage{
|
wantScheduled: map[string][]*base.TaskMessage{
|
||||||
@ -1250,8 +1240,7 @@ func TestRunScheduledTask(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
qname: "default",
|
qname: "default",
|
||||||
score: 123,
|
id: uuid.New(),
|
||||||
id: t2.ID,
|
|
||||||
want: ErrTaskNotFound,
|
want: ErrTaskNotFound,
|
||||||
wantScheduled: map[string][]*base.TaskMessage{
|
wantScheduled: map[string][]*base.TaskMessage{
|
||||||
"default": {t1, t2},
|
"default": {t1, t2},
|
||||||
@ -1271,7 +1260,6 @@ func TestRunScheduledTask(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
qname: "notifications",
|
qname: "notifications",
|
||||||
score: s1,
|
|
||||||
id: t3.ID,
|
id: t3.ID,
|
||||||
want: nil,
|
want: nil,
|
||||||
wantScheduled: map[string][]*base.TaskMessage{
|
wantScheduled: map[string][]*base.TaskMessage{
|
||||||
@ -1289,16 +1277,16 @@ func TestRunScheduledTask(t *testing.T) {
|
|||||||
h.FlushDB(t, r.client) // clean up db before each test case
|
h.FlushDB(t, r.client) // clean up db before each test case
|
||||||
h.SeedAllScheduledQueues(t, r.client, tc.scheduled)
|
h.SeedAllScheduledQueues(t, r.client, tc.scheduled)
|
||||||
|
|
||||||
got := r.RunScheduledTask(tc.qname, tc.id, tc.score)
|
got := r.RunScheduledTask(tc.qname, tc.id)
|
||||||
if got != tc.want {
|
if got != tc.want {
|
||||||
t.Errorf("r.RunRetryTask(%q, %s, %d) = %v, want %v", tc.qname, tc.id, tc.score, got, tc.want)
|
t.Errorf("r.RunRetryTask(%q, %s) = %v, want %v", tc.qname, tc.id, got, tc.want)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
for qname, want := range tc.wantPending {
|
for qname, want := range tc.wantPending {
|
||||||
gotPending := h.GetPendingMessages(t, r.client, qname)
|
gotPending := h.GetPendingMessages(t, r.client, qname)
|
||||||
if diff := cmp.Diff(want, gotPending, h.SortMsgOpt); diff != "" {
|
if diff := cmp.Diff(want, gotPending, h.SortMsgOpt); diff != "" {
|
||||||
t.Errorf("mismatch found in %q; (-want, +got)\n%s", base.QueueKey(qname), diff)
|
t.Errorf("mismatch found in %q; (-want, +got)\n%s", base.PendingKey(qname), diff)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1405,7 +1393,7 @@ func TestRunAllScheduledTasks(t *testing.T) {
|
|||||||
for qname, want := range tc.wantPending {
|
for qname, want := range tc.wantPending {
|
||||||
gotPending := h.GetPendingMessages(t, r.client, qname)
|
gotPending := h.GetPendingMessages(t, r.client, qname)
|
||||||
if diff := cmp.Diff(want, gotPending, h.SortMsgOpt); diff != "" {
|
if diff := cmp.Diff(want, gotPending, h.SortMsgOpt); diff != "" {
|
||||||
t.Errorf("%s; mismatch found in %q; (-want, +got)\n%s", tc.desc, base.QueueKey(qname), diff)
|
t.Errorf("%s; mismatch found in %q; (-want, +got)\n%s", tc.desc, base.PendingKey(qname), diff)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for qname, want := range tc.wantScheduled {
|
for qname, want := range tc.wantScheduled {
|
||||||
@ -1511,7 +1499,7 @@ func TestRunAllRetryTasks(t *testing.T) {
|
|||||||
for qname, want := range tc.wantPending {
|
for qname, want := range tc.wantPending {
|
||||||
gotPending := h.GetPendingMessages(t, r.client, qname)
|
gotPending := h.GetPendingMessages(t, r.client, qname)
|
||||||
if diff := cmp.Diff(want, gotPending, h.SortMsgOpt); diff != "" {
|
if diff := cmp.Diff(want, gotPending, h.SortMsgOpt); diff != "" {
|
||||||
t.Errorf("%s; mismatch found in %q; (-want, +got)\n%s", tc.desc, base.QueueKey(qname), diff)
|
t.Errorf("%s; mismatch found in %q; (-want, +got)\n%s", tc.desc, base.PendingKey(qname), diff)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for qname, want := range tc.wantRetry {
|
for qname, want := range tc.wantRetry {
|
||||||
@ -1523,7 +1511,7 @@ func TestRunAllRetryTasks(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRunAllDeadTasks(t *testing.T) {
|
func TestRunAllArchivedTasks(t *testing.T) {
|
||||||
r := setup(t)
|
r := setup(t)
|
||||||
defer r.Close()
|
defer r.Close()
|
||||||
t1 := h.NewTaskMessage("send_email", nil)
|
t1 := h.NewTaskMessage("send_email", nil)
|
||||||
@ -1617,7 +1605,7 @@ func TestRunAllDeadTasks(t *testing.T) {
|
|||||||
for qname, want := range tc.wantPending {
|
for qname, want := range tc.wantPending {
|
||||||
gotPending := h.GetPendingMessages(t, r.client, qname)
|
gotPending := h.GetPendingMessages(t, r.client, qname)
|
||||||
if diff := cmp.Diff(want, gotPending, h.SortMsgOpt); diff != "" {
|
if diff := cmp.Diff(want, gotPending, h.SortMsgOpt); diff != "" {
|
||||||
t.Errorf("%s; mismatch found in %q; (-want, +got)\n%s", tc.desc, base.QueueKey(qname), diff)
|
t.Errorf("%s; mismatch found in %q; (-want, +got)\n%s", tc.desc, base.PendingKey(qname), diff)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for qname, want := range tc.wantArchived {
|
for qname, want := range tc.wantArchived {
|
||||||
@ -1629,7 +1617,7 @@ func TestRunAllDeadTasks(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestKillRetryTask(t *testing.T) {
|
func TestArchiveRetryTask(t *testing.T) {
|
||||||
r := setup(t)
|
r := setup(t)
|
||||||
defer r.Close()
|
defer r.Close()
|
||||||
m1 := h.NewTaskMessage("task1", nil)
|
m1 := h.NewTaskMessage("task1", nil)
|
||||||
@ -1646,7 +1634,6 @@ func TestKillRetryTask(t *testing.T) {
|
|||||||
archived map[string][]base.Z
|
archived map[string][]base.Z
|
||||||
qname string
|
qname string
|
||||||
id uuid.UUID
|
id uuid.UUID
|
||||||
score int64
|
|
||||||
want error
|
want error
|
||||||
wantRetry map[string][]base.Z
|
wantRetry map[string][]base.Z
|
||||||
wantArchived map[string][]base.Z
|
wantArchived map[string][]base.Z
|
||||||
@ -1663,7 +1650,6 @@ func TestKillRetryTask(t *testing.T) {
|
|||||||
},
|
},
|
||||||
qname: "default",
|
qname: "default",
|
||||||
id: m1.ID,
|
id: m1.ID,
|
||||||
score: t1.Unix(),
|
|
||||||
want: nil,
|
want: nil,
|
||||||
wantRetry: map[string][]base.Z{
|
wantRetry: map[string][]base.Z{
|
||||||
"default": {{Message: m2, Score: t2.Unix()}},
|
"default": {{Message: m2, Score: t2.Unix()}},
|
||||||
@ -1680,8 +1666,7 @@ func TestKillRetryTask(t *testing.T) {
|
|||||||
"default": {{Message: m2, Score: t2.Unix()}},
|
"default": {{Message: m2, Score: t2.Unix()}},
|
||||||
},
|
},
|
||||||
qname: "default",
|
qname: "default",
|
||||||
id: m2.ID,
|
id: uuid.New(),
|
||||||
score: t2.Unix(),
|
|
||||||
want: ErrTaskNotFound,
|
want: ErrTaskNotFound,
|
||||||
wantRetry: map[string][]base.Z{
|
wantRetry: map[string][]base.Z{
|
||||||
"default": {{Message: m1, Score: t1.Unix()}},
|
"default": {{Message: m1, Score: t1.Unix()}},
|
||||||
@ -1707,7 +1692,6 @@ func TestKillRetryTask(t *testing.T) {
|
|||||||
},
|
},
|
||||||
qname: "custom",
|
qname: "custom",
|
||||||
id: m3.ID,
|
id: m3.ID,
|
||||||
score: t3.Unix(),
|
|
||||||
want: nil,
|
want: nil,
|
||||||
wantRetry: map[string][]base.Z{
|
wantRetry: map[string][]base.Z{
|
||||||
"default": {
|
"default": {
|
||||||
@ -1730,10 +1714,10 @@ func TestKillRetryTask(t *testing.T) {
|
|||||||
h.SeedAllRetryQueues(t, r.client, tc.retry)
|
h.SeedAllRetryQueues(t, r.client, tc.retry)
|
||||||
h.SeedAllArchivedQueues(t, r.client, tc.archived)
|
h.SeedAllArchivedQueues(t, r.client, tc.archived)
|
||||||
|
|
||||||
got := r.ArchiveRetryTask(tc.qname, tc.id, tc.score)
|
got := r.ArchiveRetryTask(tc.qname, tc.id)
|
||||||
if got != tc.want {
|
if got != tc.want {
|
||||||
t.Errorf("(*RDB).KillRetryTask(%q, %v, %v) = %v, want %v",
|
t.Errorf("(*RDB).ArchiveRetryTask(%q, %v) = %v, want %v",
|
||||||
tc.qname, tc.id, tc.score, got, tc.want)
|
tc.qname, tc.id, got, tc.want)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1755,7 +1739,7 @@ func TestKillRetryTask(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestKillScheduledTask(t *testing.T) {
|
func TestArchiveScheduledTask(t *testing.T) {
|
||||||
r := setup(t)
|
r := setup(t)
|
||||||
defer r.Close()
|
defer r.Close()
|
||||||
m1 := h.NewTaskMessage("task1", nil)
|
m1 := h.NewTaskMessage("task1", nil)
|
||||||
@ -1772,7 +1756,6 @@ func TestKillScheduledTask(t *testing.T) {
|
|||||||
archived map[string][]base.Z
|
archived map[string][]base.Z
|
||||||
qname string
|
qname string
|
||||||
id uuid.UUID
|
id uuid.UUID
|
||||||
score int64
|
|
||||||
want error
|
want error
|
||||||
wantScheduled map[string][]base.Z
|
wantScheduled map[string][]base.Z
|
||||||
wantArchived map[string][]base.Z
|
wantArchived map[string][]base.Z
|
||||||
@ -1789,7 +1772,6 @@ func TestKillScheduledTask(t *testing.T) {
|
|||||||
},
|
},
|
||||||
qname: "default",
|
qname: "default",
|
||||||
id: m1.ID,
|
id: m1.ID,
|
||||||
score: t1.Unix(),
|
|
||||||
want: nil,
|
want: nil,
|
||||||
wantScheduled: map[string][]base.Z{
|
wantScheduled: map[string][]base.Z{
|
||||||
"default": {{Message: m2, Score: t2.Unix()}},
|
"default": {{Message: m2, Score: t2.Unix()}},
|
||||||
@ -1807,7 +1789,6 @@ func TestKillScheduledTask(t *testing.T) {
|
|||||||
},
|
},
|
||||||
qname: "default",
|
qname: "default",
|
||||||
id: m2.ID,
|
id: m2.ID,
|
||||||
score: t2.Unix(),
|
|
||||||
want: ErrTaskNotFound,
|
want: ErrTaskNotFound,
|
||||||
wantScheduled: map[string][]base.Z{
|
wantScheduled: map[string][]base.Z{
|
||||||
"default": {{Message: m1, Score: t1.Unix()}},
|
"default": {{Message: m1, Score: t1.Unix()}},
|
||||||
@ -1833,7 +1814,6 @@ func TestKillScheduledTask(t *testing.T) {
|
|||||||
},
|
},
|
||||||
qname: "custom",
|
qname: "custom",
|
||||||
id: m3.ID,
|
id: m3.ID,
|
||||||
score: t3.Unix(),
|
|
||||||
want: nil,
|
want: nil,
|
||||||
wantScheduled: map[string][]base.Z{
|
wantScheduled: map[string][]base.Z{
|
||||||
"default": {
|
"default": {
|
||||||
@ -1856,10 +1836,10 @@ func TestKillScheduledTask(t *testing.T) {
|
|||||||
h.SeedAllScheduledQueues(t, r.client, tc.scheduled)
|
h.SeedAllScheduledQueues(t, r.client, tc.scheduled)
|
||||||
h.SeedAllArchivedQueues(t, r.client, tc.archived)
|
h.SeedAllArchivedQueues(t, r.client, tc.archived)
|
||||||
|
|
||||||
got := r.ArchiveScheduledTask(tc.qname, tc.id, tc.score)
|
got := r.ArchiveScheduledTask(tc.qname, tc.id)
|
||||||
if got != tc.want {
|
if got != tc.want {
|
||||||
t.Errorf("(*RDB).KillScheduledTask(%q, %v, %v) = %v, want %v",
|
t.Errorf("(*RDB).ArchiveScheduledTask(%q, %v) = %v, want %v",
|
||||||
tc.qname, tc.id, tc.score, got, tc.want)
|
tc.qname, tc.id, got, tc.want)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1881,7 +1861,244 @@ func TestKillScheduledTask(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestKillAllRetryTasks(t *testing.T) {
|
func TestArchivePendingTask(t *testing.T) {
|
||||||
|
r := setup(t)
|
||||||
|
defer r.Close()
|
||||||
|
m1 := h.NewTaskMessage("task1", nil)
|
||||||
|
m2 := h.NewTaskMessage("task2", nil)
|
||||||
|
m3 := h.NewTaskMessageWithQueue("task3", nil, "custom")
|
||||||
|
m4 := h.NewTaskMessageWithQueue("task4", nil, "custom")
|
||||||
|
|
||||||
|
oneHourAgo := time.Now().Add(-1 * time.Hour)
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
pending map[string][]*base.TaskMessage
|
||||||
|
archived map[string][]base.Z
|
||||||
|
qname string
|
||||||
|
id uuid.UUID
|
||||||
|
want error
|
||||||
|
wantPending map[string][]*base.TaskMessage
|
||||||
|
wantArchived map[string][]base.Z
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
pending: map[string][]*base.TaskMessage{
|
||||||
|
"default": {m1, m2},
|
||||||
|
},
|
||||||
|
archived: map[string][]base.Z{
|
||||||
|
"default": {},
|
||||||
|
},
|
||||||
|
qname: "default",
|
||||||
|
id: m1.ID,
|
||||||
|
want: nil,
|
||||||
|
wantPending: map[string][]*base.TaskMessage{
|
||||||
|
"default": {m2},
|
||||||
|
},
|
||||||
|
wantArchived: map[string][]base.Z{
|
||||||
|
"default": {{Message: m1, Score: time.Now().Unix()}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
pending: map[string][]*base.TaskMessage{
|
||||||
|
"default": {m1},
|
||||||
|
},
|
||||||
|
archived: map[string][]base.Z{
|
||||||
|
"default": {{Message: m2, Score: oneHourAgo.Unix()}},
|
||||||
|
},
|
||||||
|
qname: "default",
|
||||||
|
id: m2.ID,
|
||||||
|
want: ErrTaskNotFound,
|
||||||
|
wantPending: map[string][]*base.TaskMessage{
|
||||||
|
"default": {m1},
|
||||||
|
},
|
||||||
|
wantArchived: map[string][]base.Z{
|
||||||
|
"default": {{Message: m2, Score: oneHourAgo.Unix()}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
pending: map[string][]*base.TaskMessage{
|
||||||
|
"default": {m1, m2},
|
||||||
|
"custom": {m3, m4},
|
||||||
|
},
|
||||||
|
archived: map[string][]base.Z{
|
||||||
|
"default": {},
|
||||||
|
"custom": {},
|
||||||
|
},
|
||||||
|
qname: "custom",
|
||||||
|
id: m3.ID,
|
||||||
|
want: nil,
|
||||||
|
wantPending: map[string][]*base.TaskMessage{
|
||||||
|
"default": {m1, m2},
|
||||||
|
"custom": {m4},
|
||||||
|
},
|
||||||
|
wantArchived: map[string][]base.Z{
|
||||||
|
"default": {},
|
||||||
|
"custom": {{Message: m3, Score: time.Now().Unix()}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
h.FlushDB(t, r.client)
|
||||||
|
h.SeedAllPendingQueues(t, r.client, tc.pending)
|
||||||
|
h.SeedAllArchivedQueues(t, r.client, tc.archived)
|
||||||
|
|
||||||
|
got := r.ArchivePendingTask(tc.qname, tc.id)
|
||||||
|
if got != tc.want {
|
||||||
|
t.Errorf("(*RDB).ArchivePendingTask(%q, %v) = %v, want %v",
|
||||||
|
tc.qname, tc.id, got, tc.want)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
for qname, want := range tc.wantPending {
|
||||||
|
gotPending := h.GetPendingMessages(t, r.client, qname)
|
||||||
|
if diff := cmp.Diff(want, gotPending, h.SortMsgOpt); diff != "" {
|
||||||
|
t.Errorf("mismatch found in %q; (-want,+got)\n%s",
|
||||||
|
base.PendingKey(qname), diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for qname, want := range tc.wantArchived {
|
||||||
|
gotDead := h.GetArchivedEntries(t, r.client, qname)
|
||||||
|
if diff := cmp.Diff(want, gotDead, h.SortZSetEntryOpt, zScoreCmpOpt); diff != "" {
|
||||||
|
t.Errorf("mismatch found in %q; (-want,+got)\n%s",
|
||||||
|
base.ArchivedKey(qname), diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func TestArchiveAllPendingTasks(t *testing.T) {
|
||||||
|
r := setup(t)
|
||||||
|
defer r.Close()
|
||||||
|
m1 := h.NewTaskMessage("task1", nil)
|
||||||
|
m2 := h.NewTaskMessage("task2", nil)
|
||||||
|
m3 := h.NewTaskMessageWithQueue("task3", nil, "custom")
|
||||||
|
m4 := h.NewTaskMessageWithQueue("task4", nil, "custom")
|
||||||
|
t1 := time.Now().Add(1 * time.Minute)
|
||||||
|
t2 := time.Now().Add(1 * time.Hour)
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
pending map[string][]*base.TaskMessage
|
||||||
|
archived map[string][]base.Z
|
||||||
|
qname string
|
||||||
|
want int64
|
||||||
|
wantPending map[string][]*base.TaskMessage
|
||||||
|
wantArchived map[string][]base.Z
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
pending: map[string][]*base.TaskMessage{
|
||||||
|
"default": {m1, m2},
|
||||||
|
},
|
||||||
|
archived: map[string][]base.Z{
|
||||||
|
"default": {},
|
||||||
|
},
|
||||||
|
qname: "default",
|
||||||
|
want: 2,
|
||||||
|
wantPending: map[string][]*base.TaskMessage{
|
||||||
|
"default": {},
|
||||||
|
},
|
||||||
|
wantArchived: map[string][]base.Z{
|
||||||
|
"default": {
|
||||||
|
{Message: m1, Score: time.Now().Unix()},
|
||||||
|
{Message: m2, Score: time.Now().Unix()},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
pending: map[string][]*base.TaskMessage{
|
||||||
|
"default": {m1},
|
||||||
|
},
|
||||||
|
archived: map[string][]base.Z{
|
||||||
|
"default": {{Message: m2, Score: t2.Unix()}},
|
||||||
|
},
|
||||||
|
qname: "default",
|
||||||
|
want: 1,
|
||||||
|
wantPending: map[string][]*base.TaskMessage{
|
||||||
|
"default": {},
|
||||||
|
},
|
||||||
|
wantArchived: map[string][]base.Z{
|
||||||
|
"default": {
|
||||||
|
{Message: m1, Score: time.Now().Unix()},
|
||||||
|
{Message: m2, Score: t2.Unix()},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
pending: map[string][]*base.TaskMessage{
|
||||||
|
"default": {},
|
||||||
|
},
|
||||||
|
archived: map[string][]base.Z{
|
||||||
|
"default": {
|
||||||
|
{Message: m1, Score: t1.Unix()},
|
||||||
|
{Message: m2, Score: t2.Unix()},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
qname: "default",
|
||||||
|
want: 0,
|
||||||
|
wantPending: map[string][]*base.TaskMessage{
|
||||||
|
"default": {},
|
||||||
|
},
|
||||||
|
wantArchived: map[string][]base.Z{
|
||||||
|
"default": {
|
||||||
|
{Message: m1, Score: t1.Unix()},
|
||||||
|
{Message: m2, Score: t2.Unix()},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
pending: map[string][]*base.TaskMessage{
|
||||||
|
"default": {m1, m2},
|
||||||
|
"custom": {m3, m4},
|
||||||
|
},
|
||||||
|
archived: map[string][]base.Z{
|
||||||
|
"default": {},
|
||||||
|
"custom": {},
|
||||||
|
},
|
||||||
|
qname: "custom",
|
||||||
|
want: 2,
|
||||||
|
wantPending: map[string][]*base.TaskMessage{
|
||||||
|
"default": {m1, m2},
|
||||||
|
"custom": {},
|
||||||
|
},
|
||||||
|
wantArchived: map[string][]base.Z{
|
||||||
|
"default": {},
|
||||||
|
"custom": {
|
||||||
|
{Message: m3, Score: time.Now().Unix()},
|
||||||
|
{Message: m4, Score: time.Now().Unix()},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
h.FlushDB(t, r.client)
|
||||||
|
h.SeedAllPendingQueues(t, r.client, tc.pending)
|
||||||
|
h.SeedAllArchivedQueues(t, r.client, tc.archived)
|
||||||
|
|
||||||
|
got, err := r.ArchiveAllPendingTasks(tc.qname)
|
||||||
|
if got != tc.want || err != nil {
|
||||||
|
t.Errorf("(*RDB).KillAllRetryTasks(%q) = %v, %v; want %v, nil",
|
||||||
|
tc.qname, got, err, tc.want)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
for qname, want := range tc.wantPending {
|
||||||
|
gotPending := h.GetPendingMessages(t, r.client, qname)
|
||||||
|
if diff := cmp.Diff(want, gotPending, h.SortMsgOpt); diff != "" {
|
||||||
|
t.Errorf("mismatch found in %q; (-want,+got)\n%s",
|
||||||
|
base.PendingKey(qname), diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for qname, want := range tc.wantArchived {
|
||||||
|
gotDead := h.GetArchivedEntries(t, r.client, qname)
|
||||||
|
if diff := cmp.Diff(want, gotDead, h.SortZSetEntryOpt, zScoreCmpOpt); diff != "" {
|
||||||
|
t.Errorf("mismatch found in %q; (-want,+got)\n%s",
|
||||||
|
base.ArchivedKey(qname), diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func TestArchiveAllRetryTasks(t *testing.T) {
|
||||||
r := setup(t)
|
r := setup(t)
|
||||||
defer r.Close()
|
defer r.Close()
|
||||||
m1 := h.NewTaskMessage("task1", nil)
|
m1 := h.NewTaskMessage("task1", nil)
|
||||||
@ -2028,7 +2245,7 @@ func TestKillAllRetryTasks(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestKillAllScheduledTasks(t *testing.T) {
|
func TestArchiveAllScheduledTasks(t *testing.T) {
|
||||||
r := setup(t)
|
r := setup(t)
|
||||||
defer r.Close()
|
defer r.Close()
|
||||||
m1 := h.NewTaskMessage("task1", nil)
|
m1 := h.NewTaskMessage("task1", nil)
|
||||||
@ -2175,7 +2392,7 @@ func TestKillAllScheduledTasks(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDeleteDeadTask(t *testing.T) {
|
func TestDeleteArchivedTask(t *testing.T) {
|
||||||
r := setup(t)
|
r := setup(t)
|
||||||
defer r.Close()
|
defer r.Close()
|
||||||
m1 := h.NewTaskMessage("task1", nil)
|
m1 := h.NewTaskMessage("task1", nil)
|
||||||
@ -2189,7 +2406,6 @@ func TestDeleteDeadTask(t *testing.T) {
|
|||||||
archived map[string][]base.Z
|
archived map[string][]base.Z
|
||||||
qname string
|
qname string
|
||||||
id uuid.UUID
|
id uuid.UUID
|
||||||
score int64
|
|
||||||
want error
|
want error
|
||||||
wantArchived map[string][]*base.TaskMessage
|
wantArchived map[string][]*base.TaskMessage
|
||||||
}{
|
}{
|
||||||
@ -2202,7 +2418,6 @@ func TestDeleteDeadTask(t *testing.T) {
|
|||||||
},
|
},
|
||||||
qname: "default",
|
qname: "default",
|
||||||
id: m1.ID,
|
id: m1.ID,
|
||||||
score: t1.Unix(),
|
|
||||||
want: nil,
|
want: nil,
|
||||||
wantArchived: map[string][]*base.TaskMessage{
|
wantArchived: map[string][]*base.TaskMessage{
|
||||||
"default": {m2},
|
"default": {m2},
|
||||||
@ -2220,7 +2435,6 @@ func TestDeleteDeadTask(t *testing.T) {
|
|||||||
},
|
},
|
||||||
qname: "custom",
|
qname: "custom",
|
||||||
id: m3.ID,
|
id: m3.ID,
|
||||||
score: t3.Unix(),
|
|
||||||
want: nil,
|
want: nil,
|
||||||
wantArchived: map[string][]*base.TaskMessage{
|
wantArchived: map[string][]*base.TaskMessage{
|
||||||
"default": {m1, m2},
|
"default": {m1, m2},
|
||||||
@ -2235,8 +2449,7 @@ func TestDeleteDeadTask(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
qname: "default",
|
qname: "default",
|
||||||
id: m1.ID,
|
id: uuid.New(),
|
||||||
score: t2.Unix(), // id and score mismatch
|
|
||||||
want: ErrTaskNotFound,
|
want: ErrTaskNotFound,
|
||||||
wantArchived: map[string][]*base.TaskMessage{
|
wantArchived: map[string][]*base.TaskMessage{
|
||||||
"default": {m1, m2},
|
"default": {m1, m2},
|
||||||
@ -2248,7 +2461,6 @@ func TestDeleteDeadTask(t *testing.T) {
|
|||||||
},
|
},
|
||||||
qname: "default",
|
qname: "default",
|
||||||
id: m1.ID,
|
id: m1.ID,
|
||||||
score: t1.Unix(),
|
|
||||||
want: ErrTaskNotFound,
|
want: ErrTaskNotFound,
|
||||||
wantArchived: map[string][]*base.TaskMessage{
|
wantArchived: map[string][]*base.TaskMessage{
|
||||||
"default": {},
|
"default": {},
|
||||||
@ -2260,9 +2472,9 @@ func TestDeleteDeadTask(t *testing.T) {
|
|||||||
h.FlushDB(t, r.client) // clean up db before each test case
|
h.FlushDB(t, r.client) // clean up db before each test case
|
||||||
h.SeedAllArchivedQueues(t, r.client, tc.archived)
|
h.SeedAllArchivedQueues(t, r.client, tc.archived)
|
||||||
|
|
||||||
got := r.DeleteArchivedTask(tc.qname, tc.id, tc.score)
|
got := r.DeleteArchivedTask(tc.qname, tc.id)
|
||||||
if got != tc.want {
|
if got != tc.want {
|
||||||
t.Errorf("r.DeleteDeadTask(%q, %v, %v) = %v, want %v", tc.qname, tc.id, tc.score, got, tc.want)
|
t.Errorf("r.DeleteArchivedTask(%q, %v) = %v, want %v", tc.qname, tc.id, got, tc.want)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2289,7 +2501,6 @@ func TestDeleteRetryTask(t *testing.T) {
|
|||||||
retry map[string][]base.Z
|
retry map[string][]base.Z
|
||||||
qname string
|
qname string
|
||||||
id uuid.UUID
|
id uuid.UUID
|
||||||
score int64
|
|
||||||
want error
|
want error
|
||||||
wantRetry map[string][]*base.TaskMessage
|
wantRetry map[string][]*base.TaskMessage
|
||||||
}{
|
}{
|
||||||
@ -2302,7 +2513,6 @@ func TestDeleteRetryTask(t *testing.T) {
|
|||||||
},
|
},
|
||||||
qname: "default",
|
qname: "default",
|
||||||
id: m1.ID,
|
id: m1.ID,
|
||||||
score: t1.Unix(),
|
|
||||||
want: nil,
|
want: nil,
|
||||||
wantRetry: map[string][]*base.TaskMessage{
|
wantRetry: map[string][]*base.TaskMessage{
|
||||||
"default": {m2},
|
"default": {m2},
|
||||||
@ -2320,7 +2530,6 @@ func TestDeleteRetryTask(t *testing.T) {
|
|||||||
},
|
},
|
||||||
qname: "custom",
|
qname: "custom",
|
||||||
id: m3.ID,
|
id: m3.ID,
|
||||||
score: t3.Unix(),
|
|
||||||
want: nil,
|
want: nil,
|
||||||
wantRetry: map[string][]*base.TaskMessage{
|
wantRetry: map[string][]*base.TaskMessage{
|
||||||
"default": {m1, m2},
|
"default": {m1, m2},
|
||||||
@ -2332,8 +2541,7 @@ func TestDeleteRetryTask(t *testing.T) {
|
|||||||
"default": {{Message: m1, Score: t1.Unix()}},
|
"default": {{Message: m1, Score: t1.Unix()}},
|
||||||
},
|
},
|
||||||
qname: "default",
|
qname: "default",
|
||||||
id: m2.ID,
|
id: uuid.New(),
|
||||||
score: t2.Unix(),
|
|
||||||
want: ErrTaskNotFound,
|
want: ErrTaskNotFound,
|
||||||
wantRetry: map[string][]*base.TaskMessage{
|
wantRetry: map[string][]*base.TaskMessage{
|
||||||
"default": {m1},
|
"default": {m1},
|
||||||
@ -2345,9 +2553,9 @@ func TestDeleteRetryTask(t *testing.T) {
|
|||||||
h.FlushDB(t, r.client) // clean up db before each test case
|
h.FlushDB(t, r.client) // clean up db before each test case
|
||||||
h.SeedAllRetryQueues(t, r.client, tc.retry)
|
h.SeedAllRetryQueues(t, r.client, tc.retry)
|
||||||
|
|
||||||
got := r.DeleteRetryTask(tc.qname, tc.id, tc.score)
|
got := r.DeleteRetryTask(tc.qname, tc.id)
|
||||||
if got != tc.want {
|
if got != tc.want {
|
||||||
t.Errorf("r.DeleteRetryTask(%q, %v, %v) = %v, want %v", tc.qname, tc.id, tc.score, got, tc.want)
|
t.Errorf("r.DeleteRetryTask(%q, %v) = %v, want %v", tc.qname, tc.id, got, tc.want)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2374,7 +2582,6 @@ func TestDeleteScheduledTask(t *testing.T) {
|
|||||||
scheduled map[string][]base.Z
|
scheduled map[string][]base.Z
|
||||||
qname string
|
qname string
|
||||||
id uuid.UUID
|
id uuid.UUID
|
||||||
score int64
|
|
||||||
want error
|
want error
|
||||||
wantScheduled map[string][]*base.TaskMessage
|
wantScheduled map[string][]*base.TaskMessage
|
||||||
}{
|
}{
|
||||||
@ -2387,7 +2594,6 @@ func TestDeleteScheduledTask(t *testing.T) {
|
|||||||
},
|
},
|
||||||
qname: "default",
|
qname: "default",
|
||||||
id: m1.ID,
|
id: m1.ID,
|
||||||
score: t1.Unix(),
|
|
||||||
want: nil,
|
want: nil,
|
||||||
wantScheduled: map[string][]*base.TaskMessage{
|
wantScheduled: map[string][]*base.TaskMessage{
|
||||||
"default": {m2},
|
"default": {m2},
|
||||||
@ -2405,7 +2611,6 @@ func TestDeleteScheduledTask(t *testing.T) {
|
|||||||
},
|
},
|
||||||
qname: "custom",
|
qname: "custom",
|
||||||
id: m3.ID,
|
id: m3.ID,
|
||||||
score: t3.Unix(),
|
|
||||||
want: nil,
|
want: nil,
|
||||||
wantScheduled: map[string][]*base.TaskMessage{
|
wantScheduled: map[string][]*base.TaskMessage{
|
||||||
"default": {m1, m2},
|
"default": {m1, m2},
|
||||||
@ -2417,8 +2622,7 @@ func TestDeleteScheduledTask(t *testing.T) {
|
|||||||
"default": {{Message: m1, Score: t1.Unix()}},
|
"default": {{Message: m1, Score: t1.Unix()}},
|
||||||
},
|
},
|
||||||
qname: "default",
|
qname: "default",
|
||||||
id: m2.ID,
|
id: uuid.New(),
|
||||||
score: t2.Unix(),
|
|
||||||
want: ErrTaskNotFound,
|
want: ErrTaskNotFound,
|
||||||
wantScheduled: map[string][]*base.TaskMessage{
|
wantScheduled: map[string][]*base.TaskMessage{
|
||||||
"default": {m1},
|
"default": {m1},
|
||||||
@ -2430,9 +2634,9 @@ func TestDeleteScheduledTask(t *testing.T) {
|
|||||||
h.FlushDB(t, r.client) // clean up db before each test case
|
h.FlushDB(t, r.client) // clean up db before each test case
|
||||||
h.SeedAllScheduledQueues(t, r.client, tc.scheduled)
|
h.SeedAllScheduledQueues(t, r.client, tc.scheduled)
|
||||||
|
|
||||||
got := r.DeleteScheduledTask(tc.qname, tc.id, tc.score)
|
got := r.DeleteScheduledTask(tc.qname, tc.id)
|
||||||
if got != tc.want {
|
if got != tc.want {
|
||||||
t.Errorf("r.DeleteScheduledTask(%q, %v, %v) = %v, want %v", tc.qname, tc.id, tc.score, got, tc.want)
|
t.Errorf("r.DeleteScheduledTask(%q, %v) = %v, want %v", tc.qname, tc.id, got, tc.want)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2445,67 +2649,76 @@ func TestDeleteScheduledTask(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDeleteUniqueTask(t *testing.T) {
|
func TestDeletePendingTask(t *testing.T) {
|
||||||
r := setup(t)
|
r := setup(t)
|
||||||
defer r.Close()
|
defer r.Close()
|
||||||
m1 := &base.TaskMessage{
|
m1 := h.NewTaskMessage("task1", nil)
|
||||||
ID: uuid.New(),
|
m2 := h.NewTaskMessage("task2", nil)
|
||||||
Type: "reindex",
|
m3 := h.NewTaskMessageWithQueue("task3", nil, "custom")
|
||||||
Payload: nil,
|
|
||||||
Timeout: 1800,
|
|
||||||
Deadline: 0,
|
|
||||||
UniqueKey: "asynq:{default}:unique:reindex:nil",
|
|
||||||
Queue: "default",
|
|
||||||
}
|
|
||||||
t1 := time.Now().Add(5 * time.Minute)
|
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
scheduled map[string][]base.Z
|
pending map[string][]*base.TaskMessage
|
||||||
qname string
|
qname string
|
||||||
id uuid.UUID
|
id uuid.UUID
|
||||||
score int64
|
want error
|
||||||
uniqueKey string
|
wantPending map[string][]*base.TaskMessage
|
||||||
wantScheduled map[string][]*base.TaskMessage
|
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
scheduled: map[string][]base.Z{
|
pending: map[string][]*base.TaskMessage{
|
||||||
"default": {
|
"default": {m1, m2},
|
||||||
{Message: m1, Score: t1.Unix()},
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
qname: "default",
|
qname: "default",
|
||||||
id: m1.ID,
|
id: m1.ID,
|
||||||
score: t1.Unix(),
|
want: nil,
|
||||||
uniqueKey: m1.UniqueKey,
|
wantPending: map[string][]*base.TaskMessage{
|
||||||
wantScheduled: map[string][]*base.TaskMessage{
|
"default": {m2},
|
||||||
"default": {},
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
pending: map[string][]*base.TaskMessage{
|
||||||
|
"default": {m1, m2},
|
||||||
|
"custom": {m3},
|
||||||
|
},
|
||||||
|
qname: "custom",
|
||||||
|
id: m3.ID,
|
||||||
|
want: nil,
|
||||||
|
wantPending: map[string][]*base.TaskMessage{
|
||||||
|
"default": {m1, m2},
|
||||||
|
"custom": {},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
pending: map[string][]*base.TaskMessage{
|
||||||
|
"default": {m1, m2},
|
||||||
|
},
|
||||||
|
qname: "default",
|
||||||
|
id: uuid.New(),
|
||||||
|
want: ErrTaskNotFound,
|
||||||
|
wantPending: map[string][]*base.TaskMessage{
|
||||||
|
"default": {m1, m2},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
h.FlushDB(t, r.client) // clean up db before each test case
|
h.FlushDB(t, r.client)
|
||||||
h.SeedAllScheduledQueues(t, r.client, tc.scheduled)
|
h.SeedAllPendingQueues(t, r.client, tc.pending)
|
||||||
if err := r.client.SetNX(tc.uniqueKey, tc.id.String(), time.Minute).Err(); err != nil {
|
|
||||||
t.Fatalf("Could not set unique lock in redis: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := r.DeleteScheduledTask(tc.qname, tc.id, tc.score); err != nil {
|
got := r.DeletePendingTask(tc.qname, tc.id)
|
||||||
t.Errorf("r.DeleteScheduledTask(%q, %v, %v) returned error: %v", tc.qname, tc.id, tc.score, err)
|
if got != tc.want {
|
||||||
|
t.Errorf("r.DeletePendingTask(%q, %v) = %v, want %v", tc.qname, tc.id, got, tc.want)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
for qname, want := range tc.wantScheduled {
|
for qname, want := range tc.wantPending {
|
||||||
gotScheduled := h.GetScheduledMessages(t, r.client, qname)
|
gotPending := h.GetPendingMessages(t, r.client, qname)
|
||||||
if diff := cmp.Diff(want, gotScheduled, h.SortMsgOpt); diff != "" {
|
if diff := cmp.Diff(want, gotPending, h.SortMsgOpt); diff != "" {
|
||||||
t.Errorf("mismatch found in %q; (-want, +got)\n%s", base.ScheduledKey(qname), diff)
|
t.Errorf("mismatch found in %q; (-want, +got)\n%s", base.PendingKey(qname), diff)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if r.client.Exists(tc.uniqueKey).Val() != 0 {
|
|
||||||
t.Errorf("Uniqueness lock %q still exists", tc.uniqueKey)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDeleteAllArchivedTasks(t *testing.T) {
|
func TestDeleteAllArchivedTasks(t *testing.T) {
|
||||||
r := setup(t)
|
r := setup(t)
|
||||||
defer r.Close()
|
defer r.Close()
|
||||||
@ -2775,6 +2988,63 @@ func TestDeleteAllScheduledTasks(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestDeleteAllPendingTasks(t *testing.T) {
|
||||||
|
r := setup(t)
|
||||||
|
defer r.Close()
|
||||||
|
m1 := h.NewTaskMessage("task1", nil)
|
||||||
|
m2 := h.NewTaskMessage("task2", nil)
|
||||||
|
m3 := h.NewTaskMessageWithQueue("task3", nil, "custom")
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
pending map[string][]*base.TaskMessage
|
||||||
|
qname string
|
||||||
|
want int64
|
||||||
|
wantPending map[string][]*base.TaskMessage
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
pending: map[string][]*base.TaskMessage{
|
||||||
|
"default": {m1, m2},
|
||||||
|
"custom": {m3},
|
||||||
|
},
|
||||||
|
qname: "default",
|
||||||
|
want: 2,
|
||||||
|
wantPending: map[string][]*base.TaskMessage{
|
||||||
|
"default": {},
|
||||||
|
"custom": {m3},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
pending: map[string][]*base.TaskMessage{
|
||||||
|
"custom": {},
|
||||||
|
},
|
||||||
|
qname: "custom",
|
||||||
|
want: 0,
|
||||||
|
wantPending: map[string][]*base.TaskMessage{
|
||||||
|
"custom": {},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
h.FlushDB(t, r.client) // clean up db before each test case
|
||||||
|
h.SeedAllPendingQueues(t, r.client, tc.pending)
|
||||||
|
|
||||||
|
got, err := r.DeleteAllPendingTasks(tc.qname)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("r.DeleteAllPendingTasks(%q) returned error: %v", tc.qname, err)
|
||||||
|
}
|
||||||
|
if got != tc.want {
|
||||||
|
t.Errorf("r.DeleteAllPendingTasks(%q) = %d, nil, want %d, nil", tc.qname, got, tc.want)
|
||||||
|
}
|
||||||
|
for qname, want := range tc.wantPending {
|
||||||
|
gotPending := h.GetPendingMessages(t, r.client, qname)
|
||||||
|
if diff := cmp.Diff(want, gotPending, h.SortMsgOpt); diff != "" {
|
||||||
|
t.Errorf("mismatch found in %q; (-want, +got)\n%s", base.PendingKey(qname), diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestRemoveQueue(t *testing.T) {
|
func TestRemoveQueue(t *testing.T) {
|
||||||
r := setup(t)
|
r := setup(t)
|
||||||
defer r.Close()
|
defer r.Close()
|
||||||
@ -2861,7 +3131,7 @@ func TestRemoveQueue(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
keys := []string{
|
keys := []string{
|
||||||
base.QueueKey(tc.qname),
|
base.PendingKey(tc.qname),
|
||||||
base.ActiveKey(tc.qname),
|
base.ActiveKey(tc.qname),
|
||||||
base.DeadlinesKey(tc.qname),
|
base.DeadlinesKey(tc.qname),
|
||||||
base.ScheduledKey(tc.qname),
|
base.ScheduledKey(tc.qname),
|
||||||
@ -2873,6 +3143,10 @@ func TestRemoveQueue(t *testing.T) {
|
|||||||
t.Errorf("key %q still exists", key)
|
t.Errorf("key %q still exists", key)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if n := len(r.client.Keys(base.TaskKeyPrefix(tc.qname) + "*").Val()); n != 0 {
|
||||||
|
t.Errorf("%d keys still exists for tasks", n)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2990,7 +3264,7 @@ func TestRemoveQueueError(t *testing.T) {
|
|||||||
for qname, want := range tc.pending {
|
for qname, want := range tc.pending {
|
||||||
gotPending := h.GetPendingMessages(t, r.client, qname)
|
gotPending := h.GetPendingMessages(t, r.client, qname)
|
||||||
if diff := cmp.Diff(want, gotPending, h.SortMsgOpt); diff != "" {
|
if diff := cmp.Diff(want, gotPending, h.SortMsgOpt); diff != "" {
|
||||||
t.Errorf("%s;mismatch found in %q; (-want,+got):\n%s", tc.desc, base.QueueKey(qname), diff)
|
t.Errorf("%s;mismatch found in %q; (-want,+got):\n%s", tc.desc, base.PendingKey(qname), diff)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for qname, want := range tc.inProgress {
|
for qname, want := range tc.inProgress {
|
||||||
|
@ -6,10 +6,8 @@
|
|||||||
package rdb
|
package rdb
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strconv"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-redis/redis/v7"
|
"github.com/go-redis/redis/v7"
|
||||||
@ -50,7 +48,19 @@ func (r *RDB) Ping() error {
|
|||||||
return r.client.Ping().Err()
|
return r.client.Ping().Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Enqueue inserts the given task to the tail of the queue.
|
// KEYS[1] -> asynq:{<qname>}:t:<task_id>
|
||||||
|
// KEYS[2] -> asynq:{<qname>}:pending
|
||||||
|
// ARGV[1] -> task message data
|
||||||
|
// ARGV[2] -> task ID
|
||||||
|
// ARGV[3] -> task timeout in seconds (0 if not timeout)
|
||||||
|
// ARGV[4] -> task deadline in unix time (0 if no deadline)
|
||||||
|
var enqueueCmd = redis.NewScript(`
|
||||||
|
redis.call("HSET", KEYS[1], "msg", ARGV[1], "timeout", ARGV[3], "deadline", ARGV[4])
|
||||||
|
redis.call("LPUSH", KEYS[2], ARGV[2])
|
||||||
|
return 1
|
||||||
|
`)
|
||||||
|
|
||||||
|
// Enqueue adds the given task to the pending list of the queue.
|
||||||
func (r *RDB) Enqueue(msg *base.TaskMessage) error {
|
func (r *RDB) Enqueue(msg *base.TaskMessage) error {
|
||||||
encoded, err := base.EncodeMessage(msg)
|
encoded, err := base.EncodeMessage(msg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -59,21 +69,34 @@ func (r *RDB) Enqueue(msg *base.TaskMessage) error {
|
|||||||
if err := r.client.SAdd(base.AllQueues, msg.Queue).Err(); err != nil {
|
if err := r.client.SAdd(base.AllQueues, msg.Queue).Err(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
key := base.QueueKey(msg.Queue)
|
keys := []string{
|
||||||
return r.client.LPush(key, encoded).Err()
|
base.TaskKey(msg.Queue, msg.ID.String()),
|
||||||
|
base.PendingKey(msg.Queue),
|
||||||
|
}
|
||||||
|
argv := []interface{}{
|
||||||
|
encoded,
|
||||||
|
msg.ID.String(),
|
||||||
|
msg.Timeout,
|
||||||
|
msg.Deadline,
|
||||||
|
}
|
||||||
|
return enqueueCmd.Run(r.client, keys, argv...).Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
// KEYS[1] -> unique key
|
// KEYS[1] -> unique key
|
||||||
// KEYS[2] -> asynq:{<qname>}
|
// KEYS[2] -> asynq:{<qname>}:t:<taskid>
|
||||||
|
// KEYS[3] -> asynq:{<qname>}:pending
|
||||||
// ARGV[1] -> task ID
|
// ARGV[1] -> task ID
|
||||||
// ARGV[2] -> uniqueness lock TTL
|
// ARGV[2] -> uniqueness lock TTL
|
||||||
// ARGV[3] -> task message data
|
// ARGV[3] -> task message data
|
||||||
|
// ARGV[4] -> task timeout in seconds (0 if not timeout)
|
||||||
|
// ARGV[5] -> task deadline in unix time (0 if no deadline)
|
||||||
var enqueueUniqueCmd = redis.NewScript(`
|
var enqueueUniqueCmd = redis.NewScript(`
|
||||||
local ok = redis.call("SET", KEYS[1], ARGV[1], "NX", "EX", ARGV[2])
|
local ok = redis.call("SET", KEYS[1], ARGV[1], "NX", "EX", ARGV[2])
|
||||||
if not ok then
|
if not ok then
|
||||||
return 0
|
return 0
|
||||||
end
|
end
|
||||||
redis.call("LPUSH", KEYS[2], ARGV[3])
|
redis.call("HSET", KEYS[2], "msg", ARGV[3], "timeout", ARGV[4], "deadline", ARGV[5])
|
||||||
|
redis.call("LPUSH", KEYS[3], ARGV[1])
|
||||||
return 1
|
return 1
|
||||||
`)
|
`)
|
||||||
|
|
||||||
@ -87,9 +110,19 @@ func (r *RDB) EnqueueUnique(msg *base.TaskMessage, ttl time.Duration) error {
|
|||||||
if err := r.client.SAdd(base.AllQueues, msg.Queue).Err(); err != nil {
|
if err := r.client.SAdd(base.AllQueues, msg.Queue).Err(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
res, err := enqueueUniqueCmd.Run(r.client,
|
keys := []string{
|
||||||
[]string{msg.UniqueKey, base.QueueKey(msg.Queue)},
|
msg.UniqueKey,
|
||||||
msg.ID.String(), int(ttl.Seconds()), encoded).Result()
|
base.TaskKey(msg.Queue, msg.ID.String()),
|
||||||
|
base.PendingKey(msg.Queue),
|
||||||
|
}
|
||||||
|
argv := []interface{}{
|
||||||
|
msg.ID.String(),
|
||||||
|
int(ttl.Seconds()),
|
||||||
|
encoded,
|
||||||
|
msg.Timeout,
|
||||||
|
msg.Deadline,
|
||||||
|
}
|
||||||
|
res, err := enqueueUniqueCmd.Run(r.client, keys, argv...).Result()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -108,21 +141,22 @@ func (r *RDB) EnqueueUnique(msg *base.TaskMessage, ttl time.Duration) error {
|
|||||||
// Dequeue skips a queue if the queue is paused.
|
// Dequeue skips a queue if the queue is paused.
|
||||||
// If all queues are empty, ErrNoProcessableTask error is returned.
|
// If all queues are empty, ErrNoProcessableTask error is returned.
|
||||||
func (r *RDB) Dequeue(qnames ...string) (msg *base.TaskMessage, deadline time.Time, err error) {
|
func (r *RDB) Dequeue(qnames ...string) (msg *base.TaskMessage, deadline time.Time, err error) {
|
||||||
data, d, err := r.dequeue(qnames...)
|
encoded, d, err := r.dequeue(qnames...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, time.Time{}, err
|
return nil, time.Time{}, err
|
||||||
}
|
}
|
||||||
if msg, err = base.DecodeMessage(data); err != nil {
|
if msg, err = base.DecodeMessage([]byte(encoded)); err != nil {
|
||||||
return nil, time.Time{}, err
|
return nil, time.Time{}, err
|
||||||
}
|
}
|
||||||
return msg, time.Unix(d, 0), nil
|
return msg, time.Unix(d, 0), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// KEYS[1] -> asynq:{<qname>}
|
// KEYS[1] -> asynq:{<qname>}:pending
|
||||||
// KEYS[2] -> asynq:{<qname>}:paused
|
// KEYS[2] -> asynq:{<qname>}:paused
|
||||||
// KEYS[3] -> asynq:{<qname>}:active
|
// KEYS[3] -> asynq:{<qname>}:active
|
||||||
// KEYS[4] -> asynq:{<qname>}:deadlines
|
// KEYS[4] -> asynq:{<qname>}:deadlines
|
||||||
// ARGV[1] -> current time in Unix time
|
// ARGV[1] -> current time in Unix time
|
||||||
|
// ARGV[2] -> task key prefix
|
||||||
//
|
//
|
||||||
// dequeueCmd checks whether a queue is paused first, before
|
// dequeueCmd checks whether a queue is paused first, before
|
||||||
// calling RPOPLPUSH to pop a task from the queue.
|
// calling RPOPLPUSH to pop a task from the queue.
|
||||||
@ -130,11 +164,13 @@ func (r *RDB) Dequeue(qnames ...string) (msg *base.TaskMessage, deadline time.Ti
|
|||||||
// and inserts the task with deadlines set.
|
// and inserts the task with deadlines set.
|
||||||
var dequeueCmd = redis.NewScript(`
|
var dequeueCmd = redis.NewScript(`
|
||||||
if redis.call("EXISTS", KEYS[2]) == 0 then
|
if redis.call("EXISTS", KEYS[2]) == 0 then
|
||||||
local msg = redis.call("RPOPLPUSH", KEYS[1], KEYS[3])
|
local id = redis.call("RPOPLPUSH", KEYS[1], KEYS[3])
|
||||||
if msg then
|
if id then
|
||||||
local decoded = cjson.decode(msg)
|
local key = ARGV[2] .. id
|
||||||
local timeout = decoded["Timeout"]
|
local data = redis.call("HMGET", key, "msg", "timeout", "deadline")
|
||||||
local deadline = decoded["Deadline"]
|
local msg = data[1]
|
||||||
|
local timeout = tonumber(data[2])
|
||||||
|
local deadline = tonumber(data[3])
|
||||||
local score
|
local score
|
||||||
if timeout ~= 0 and deadline ~= 0 then
|
if timeout ~= 0 and deadline ~= 0 then
|
||||||
score = math.min(ARGV[1]+timeout, deadline)
|
score = math.min(ARGV[1]+timeout, deadline)
|
||||||
@ -145,21 +181,25 @@ if redis.call("EXISTS", KEYS[2]) == 0 then
|
|||||||
else
|
else
|
||||||
return redis.error_reply("asynq internal error: both timeout and deadline are not set")
|
return redis.error_reply("asynq internal error: both timeout and deadline are not set")
|
||||||
end
|
end
|
||||||
redis.call("ZADD", KEYS[4], score, msg)
|
redis.call("ZADD", KEYS[4], score, id)
|
||||||
return {msg, score}
|
return {msg, score}
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
return nil`)
|
return nil`)
|
||||||
|
|
||||||
func (r *RDB) dequeue(qnames ...string) (msgjson string, deadline int64, err error) {
|
func (r *RDB) dequeue(qnames ...string) (encoded string, deadline int64, err error) {
|
||||||
for _, qname := range qnames {
|
for _, qname := range qnames {
|
||||||
keys := []string{
|
keys := []string{
|
||||||
base.QueueKey(qname),
|
base.PendingKey(qname),
|
||||||
base.PausedKey(qname),
|
base.PausedKey(qname),
|
||||||
base.ActiveKey(qname),
|
base.ActiveKey(qname),
|
||||||
base.DeadlinesKey(qname),
|
base.DeadlinesKey(qname),
|
||||||
}
|
}
|
||||||
res, err := dequeueCmd.Run(r.client, keys, time.Now().Unix()).Result()
|
argv := []interface{}{
|
||||||
|
time.Now().Unix(),
|
||||||
|
base.TaskKeyPrefix(qname),
|
||||||
|
}
|
||||||
|
res, err := dequeueCmd.Run(r.client, keys, argv...).Result()
|
||||||
if err == redis.Nil {
|
if err == redis.Nil {
|
||||||
continue
|
continue
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
@ -172,21 +212,22 @@ func (r *RDB) dequeue(qnames ...string) (msgjson string, deadline int64, err err
|
|||||||
if len(data) != 2 {
|
if len(data) != 2 {
|
||||||
return "", 0, fmt.Errorf("asynq: internal error: dequeue command returned %d values", len(data))
|
return "", 0, fmt.Errorf("asynq: internal error: dequeue command returned %d values", len(data))
|
||||||
}
|
}
|
||||||
if msgjson, err = cast.ToStringE(data[0]); err != nil {
|
if encoded, err = cast.ToStringE(data[0]); err != nil {
|
||||||
return "", 0, err
|
return "", 0, err
|
||||||
}
|
}
|
||||||
if deadline, err = cast.ToInt64E(data[1]); err != nil {
|
if deadline, err = cast.ToInt64E(data[1]); err != nil {
|
||||||
return "", 0, err
|
return "", 0, err
|
||||||
}
|
}
|
||||||
return msgjson, deadline, nil
|
return encoded, deadline, nil
|
||||||
}
|
}
|
||||||
return "", 0, ErrNoProcessableTask
|
return "", 0, ErrNoProcessableTask
|
||||||
}
|
}
|
||||||
|
|
||||||
// KEYS[1] -> asynq:{<qname>}:active
|
// KEYS[1] -> asynq:{<qname>}:active
|
||||||
// KEYS[2] -> asynq:{<qname>}:deadlines
|
// KEYS[2] -> asynq:{<qname>}:deadlines
|
||||||
// KEYS[3] -> asynq:{<qname>}:processed:<yyyy-mm-dd>
|
// KEYS[3] -> asynq:{<qname>}:t:<task_id>
|
||||||
// ARGV[1] -> base.TaskMessage value
|
// KEYS[4] -> asynq:{<qname>}:processed:<yyyy-mm-dd>
|
||||||
|
// ARGV[1] -> task ID
|
||||||
// ARGV[2] -> stats expiration timestamp
|
// ARGV[2] -> stats expiration timestamp
|
||||||
var doneCmd = redis.NewScript(`
|
var doneCmd = redis.NewScript(`
|
||||||
if redis.call("LREM", KEYS[1], 0, ARGV[1]) == 0 then
|
if redis.call("LREM", KEYS[1], 0, ARGV[1]) == 0 then
|
||||||
@ -195,20 +236,23 @@ end
|
|||||||
if redis.call("ZREM", KEYS[2], ARGV[1]) == 0 then
|
if redis.call("ZREM", KEYS[2], ARGV[1]) == 0 then
|
||||||
return redis.error_reply("NOT FOUND")
|
return redis.error_reply("NOT FOUND")
|
||||||
end
|
end
|
||||||
local n = redis.call("INCR", KEYS[3])
|
if redis.call("DEL", KEYS[3]) == 0 then
|
||||||
|
return redis.error_reply("NOT FOUND")
|
||||||
|
end
|
||||||
|
local n = redis.call("INCR", KEYS[4])
|
||||||
if tonumber(n) == 1 then
|
if tonumber(n) == 1 then
|
||||||
redis.call("EXPIREAT", KEYS[3], ARGV[2])
|
redis.call("EXPIREAT", KEYS[4], ARGV[2])
|
||||||
end
|
end
|
||||||
return redis.status_reply("OK")
|
return redis.status_reply("OK")
|
||||||
`)
|
`)
|
||||||
|
|
||||||
// KEYS[1] -> asynq:{<qname>}:active
|
// KEYS[1] -> asynq:{<qname>}:active
|
||||||
// KEYS[2] -> asynq:{<qname>}:deadlines
|
// KEYS[2] -> asynq:{<qname>}:deadlines
|
||||||
// KEYS[3] -> asynq:{<qname>}:processed:<yyyy-mm-dd>
|
// KEYS[3] -> asynq:{<qname>}:t:<task_id>
|
||||||
// KEYS[4] -> unique key
|
// KEYS[4] -> asynq:{<qname>}:processed:<yyyy-mm-dd>
|
||||||
// ARGV[1] -> base.TaskMessage value
|
// KEYS[5] -> unique key
|
||||||
|
// ARGV[1] -> task ID
|
||||||
// ARGV[2] -> stats expiration timestamp
|
// ARGV[2] -> stats expiration timestamp
|
||||||
// ARGV[3] -> task ID
|
|
||||||
var doneUniqueCmd = redis.NewScript(`
|
var doneUniqueCmd = redis.NewScript(`
|
||||||
if redis.call("LREM", KEYS[1], 0, ARGV[1]) == 0 then
|
if redis.call("LREM", KEYS[1], 0, ARGV[1]) == 0 then
|
||||||
return redis.error_reply("NOT FOUND")
|
return redis.error_reply("NOT FOUND")
|
||||||
@ -216,12 +260,15 @@ end
|
|||||||
if redis.call("ZREM", KEYS[2], ARGV[1]) == 0 then
|
if redis.call("ZREM", KEYS[2], ARGV[1]) == 0 then
|
||||||
return redis.error_reply("NOT FOUND")
|
return redis.error_reply("NOT FOUND")
|
||||||
end
|
end
|
||||||
local n = redis.call("INCR", KEYS[3])
|
if redis.call("DEL", KEYS[3]) == 0 then
|
||||||
if tonumber(n) == 1 then
|
return redis.error_reply("NOT FOUND")
|
||||||
redis.call("EXPIREAT", KEYS[3], ARGV[2])
|
|
||||||
end
|
end
|
||||||
if redis.call("GET", KEYS[4]) == ARGV[3] then
|
local n = redis.call("INCR", KEYS[4])
|
||||||
redis.call("DEL", KEYS[4])
|
if tonumber(n) == 1 then
|
||||||
|
redis.call("EXPIREAT", KEYS[4], ARGV[2])
|
||||||
|
end
|
||||||
|
if redis.call("GET", KEYS[5]) == ARGV[1] then
|
||||||
|
redis.call("DEL", KEYS[5])
|
||||||
end
|
end
|
||||||
return redis.status_reply("OK")
|
return redis.status_reply("OK")
|
||||||
`)
|
`)
|
||||||
@ -229,30 +276,29 @@ return redis.status_reply("OK")
|
|||||||
// Done removes the task from active queue to mark the task as done.
|
// Done removes the task from active queue to mark the task as done.
|
||||||
// It removes a uniqueness lock acquired by the task, if any.
|
// It removes a uniqueness lock acquired by the task, if any.
|
||||||
func (r *RDB) Done(msg *base.TaskMessage) error {
|
func (r *RDB) Done(msg *base.TaskMessage) error {
|
||||||
encoded, err := base.EncodeMessage(msg)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
expireAt := now.Add(statsTTL)
|
expireAt := now.Add(statsTTL)
|
||||||
keys := []string{
|
keys := []string{
|
||||||
base.ActiveKey(msg.Queue),
|
base.ActiveKey(msg.Queue),
|
||||||
base.DeadlinesKey(msg.Queue),
|
base.DeadlinesKey(msg.Queue),
|
||||||
|
base.TaskKey(msg.Queue, msg.ID.String()),
|
||||||
base.ProcessedKey(msg.Queue, now),
|
base.ProcessedKey(msg.Queue, now),
|
||||||
}
|
}
|
||||||
args := []interface{}{encoded, expireAt.Unix()}
|
argv := []interface{}{
|
||||||
|
msg.ID.String(),
|
||||||
|
expireAt.Unix(),
|
||||||
|
}
|
||||||
if len(msg.UniqueKey) > 0 {
|
if len(msg.UniqueKey) > 0 {
|
||||||
keys = append(keys, msg.UniqueKey)
|
keys = append(keys, msg.UniqueKey)
|
||||||
args = append(args, msg.ID.String())
|
return doneUniqueCmd.Run(r.client, keys, argv...).Err()
|
||||||
return doneUniqueCmd.Run(r.client, keys, args...).Err()
|
|
||||||
}
|
}
|
||||||
return doneCmd.Run(r.client, keys, args...).Err()
|
return doneCmd.Run(r.client, keys, argv...).Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
// KEYS[1] -> asynq:{<qname>}:active
|
// KEYS[1] -> asynq:{<qname>}:active
|
||||||
// KEYS[2] -> asynq:{<qname>}:deadlines
|
// KEYS[2] -> asynq:{<qname>}:deadlines
|
||||||
// KEYS[3] -> asynq:{<qname>}
|
// KEYS[3] -> asynq:{<qname>}:pending
|
||||||
// ARGV[1] -> base.TaskMessage value
|
// ARGV[1] -> task ID
|
||||||
// Note: Use RPUSH to push to the head of the queue.
|
// Note: Use RPUSH to push to the head of the queue.
|
||||||
var requeueCmd = redis.NewScript(`
|
var requeueCmd = redis.NewScript(`
|
||||||
if redis.call("LREM", KEYS[1], 0, ARGV[1]) == 0 then
|
if redis.call("LREM", KEYS[1], 0, ARGV[1]) == 0 then
|
||||||
@ -266,16 +312,25 @@ return redis.status_reply("OK")`)
|
|||||||
|
|
||||||
// Requeue moves the task from active queue to the specified queue.
|
// Requeue moves the task from active queue to the specified queue.
|
||||||
func (r *RDB) Requeue(msg *base.TaskMessage) error {
|
func (r *RDB) Requeue(msg *base.TaskMessage) error {
|
||||||
encoded, err := base.EncodeMessage(msg)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return requeueCmd.Run(r.client,
|
return requeueCmd.Run(r.client,
|
||||||
[]string{base.ActiveKey(msg.Queue), base.DeadlinesKey(msg.Queue), base.QueueKey(msg.Queue)},
|
[]string{base.ActiveKey(msg.Queue), base.DeadlinesKey(msg.Queue), base.PendingKey(msg.Queue)},
|
||||||
encoded).Err()
|
msg.ID.String()).Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Schedule adds the task to the backlog queue to be processed in the future.
|
// KEYS[1] -> asynq:{<qname>}:t:<task_id>
|
||||||
|
// KEYS[2] -> asynq:{<qname>}:scheduled
|
||||||
|
// ARGV[1] -> task message data
|
||||||
|
// ARGV[2] -> process_at time in Unix time
|
||||||
|
// ARGV[3] -> task ID
|
||||||
|
// ARGV[4] -> task timeout in seconds (0 if not timeout)
|
||||||
|
// ARGV[5] -> task deadline in unix time (0 if no deadline)
|
||||||
|
var scheduleCmd = redis.NewScript(`
|
||||||
|
redis.call("HSET", KEYS[1], "msg", ARGV[1], "timeout", ARGV[4], "deadline", ARGV[5])
|
||||||
|
redis.call("ZADD", KEYS[2], ARGV[2], ARGV[3])
|
||||||
|
return 1
|
||||||
|
`)
|
||||||
|
|
||||||
|
// Schedule adds the task to the scheduled set to be processed in the future.
|
||||||
func (r *RDB) Schedule(msg *base.TaskMessage, processAt time.Time) error {
|
func (r *RDB) Schedule(msg *base.TaskMessage, processAt time.Time) error {
|
||||||
encoded, err := base.EncodeMessage(msg)
|
encoded, err := base.EncodeMessage(msg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -284,22 +339,36 @@ func (r *RDB) Schedule(msg *base.TaskMessage, processAt time.Time) error {
|
|||||||
if err := r.client.SAdd(base.AllQueues, msg.Queue).Err(); err != nil {
|
if err := r.client.SAdd(base.AllQueues, msg.Queue).Err(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
score := float64(processAt.Unix())
|
keys := []string{
|
||||||
return r.client.ZAdd(base.ScheduledKey(msg.Queue), &redis.Z{Score: score, Member: encoded}).Err()
|
base.TaskKey(msg.Queue, msg.ID.String()),
|
||||||
|
base.ScheduledKey(msg.Queue),
|
||||||
|
}
|
||||||
|
argv := []interface{}{
|
||||||
|
encoded,
|
||||||
|
processAt.Unix(),
|
||||||
|
msg.ID.String(),
|
||||||
|
msg.Timeout,
|
||||||
|
msg.Deadline,
|
||||||
|
}
|
||||||
|
return scheduleCmd.Run(r.client, keys, argv...).Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
// KEYS[1] -> unique key
|
// KEYS[1] -> unique key
|
||||||
// KEYS[2] -> asynq:{<qname>}:scheduled
|
// KEYS[2] -> asynq:{<qname>}:t:<task_id>
|
||||||
|
// KEYS[3] -> asynq:{<qname>}:scheduled
|
||||||
// ARGV[1] -> task ID
|
// ARGV[1] -> task ID
|
||||||
// ARGV[2] -> uniqueness lock TTL
|
// ARGV[2] -> uniqueness lock TTL
|
||||||
// ARGV[3] -> score (process_at timestamp)
|
// ARGV[3] -> score (process_at timestamp)
|
||||||
// ARGV[4] -> task message
|
// ARGV[4] -> task message
|
||||||
|
// ARGV[5] -> task timeout in seconds (0 if not timeout)
|
||||||
|
// ARGV[6] -> task deadline in unix time (0 if no deadline)
|
||||||
var scheduleUniqueCmd = redis.NewScript(`
|
var scheduleUniqueCmd = redis.NewScript(`
|
||||||
local ok = redis.call("SET", KEYS[1], ARGV[1], "NX", "EX", ARGV[2])
|
local ok = redis.call("SET", KEYS[1], ARGV[1], "NX", "EX", ARGV[2])
|
||||||
if not ok then
|
if not ok then
|
||||||
return 0
|
return 0
|
||||||
end
|
end
|
||||||
redis.call("ZADD", KEYS[2], ARGV[3], ARGV[4])
|
redis.call("HSET", KEYS[2], "msg", ARGV[4], "timeout", ARGV[5], "deadline", ARGV[6])
|
||||||
|
redis.call("ZADD", KEYS[3], ARGV[3], ARGV[1])
|
||||||
return 1
|
return 1
|
||||||
`)
|
`)
|
||||||
|
|
||||||
@ -313,10 +382,20 @@ func (r *RDB) ScheduleUnique(msg *base.TaskMessage, processAt time.Time, ttl tim
|
|||||||
if err := r.client.SAdd(base.AllQueues, msg.Queue).Err(); err != nil {
|
if err := r.client.SAdd(base.AllQueues, msg.Queue).Err(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
score := float64(processAt.Unix())
|
keys := []string{
|
||||||
res, err := scheduleUniqueCmd.Run(r.client,
|
msg.UniqueKey,
|
||||||
[]string{msg.UniqueKey, base.ScheduledKey(msg.Queue)},
|
base.TaskKey(msg.Queue, msg.ID.String()),
|
||||||
msg.ID.String(), int(ttl.Seconds()), score, encoded).Result()
|
base.ScheduledKey(msg.Queue),
|
||||||
|
}
|
||||||
|
argv := []interface{}{
|
||||||
|
msg.ID.String(),
|
||||||
|
int(ttl.Seconds()),
|
||||||
|
processAt.Unix(),
|
||||||
|
encoded,
|
||||||
|
msg.Timeout,
|
||||||
|
msg.Deadline,
|
||||||
|
}
|
||||||
|
res, err := scheduleUniqueCmd.Run(r.client, keys, argv...).Result()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -330,54 +409,62 @@ func (r *RDB) ScheduleUnique(msg *base.TaskMessage, processAt time.Time, ttl tim
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// KEYS[1] -> asynq:{<qname>}:active
|
// KEYS[1] -> asynq:{<qname>}:t:<task_id>
|
||||||
// KEYS[2] -> asynq:{<qname>}:deadlines
|
// KEYS[2] -> asynq:{<qname>}:active
|
||||||
// KEYS[3] -> asynq:{<qname>}:retry
|
// KEYS[3] -> asynq:{<qname>}:deadlines
|
||||||
// KEYS[4] -> asynq:{<qname>}:processed:<yyyy-mm-dd>
|
// KEYS[4] -> asynq:{<qname>}:retry
|
||||||
// KEYS[5] -> asynq:{<qname>}:failed:<yyyy-mm-dd>
|
// KEYS[5] -> asynq:{<qname>}:processed:<yyyy-mm-dd>
|
||||||
// ARGV[1] -> base.TaskMessage value to remove from base.ActiveQueue queue
|
// KEYS[6] -> asynq:{<qname>}:failed:<yyyy-mm-dd>
|
||||||
// ARGV[2] -> base.TaskMessage value to add to Retry queue
|
// ARGV[1] -> task ID
|
||||||
|
// ARGV[2] -> updated base.TaskMessage value
|
||||||
// ARGV[3] -> retry_at UNIX timestamp
|
// ARGV[3] -> retry_at UNIX timestamp
|
||||||
// ARGV[4] -> stats expiration timestamp
|
// ARGV[4] -> stats expiration timestamp
|
||||||
var retryCmd = redis.NewScript(`
|
var retryCmd = redis.NewScript(`
|
||||||
if redis.call("LREM", KEYS[1], 0, ARGV[1]) == 0 then
|
if redis.call("LREM", KEYS[2], 0, ARGV[1]) == 0 then
|
||||||
return redis.error_reply("NOT FOUND")
|
return redis.error_reply("NOT FOUND")
|
||||||
end
|
end
|
||||||
if redis.call("ZREM", KEYS[2], ARGV[1]) == 0 then
|
if redis.call("ZREM", KEYS[3], ARGV[1]) == 0 then
|
||||||
return redis.error_reply("NOT FOUND")
|
return redis.error_reply("NOT FOUND")
|
||||||
end
|
end
|
||||||
redis.call("ZADD", KEYS[3], ARGV[3], ARGV[2])
|
redis.call("ZADD", KEYS[4], ARGV[3], ARGV[1])
|
||||||
local n = redis.call("INCR", KEYS[4])
|
redis.call("HSET", KEYS[1], "msg", ARGV[2])
|
||||||
|
local n = redis.call("INCR", KEYS[5])
|
||||||
if tonumber(n) == 1 then
|
if tonumber(n) == 1 then
|
||||||
redis.call("EXPIREAT", KEYS[4], ARGV[4])
|
|
||||||
end
|
|
||||||
local m = redis.call("INCR", KEYS[5])
|
|
||||||
if tonumber(m) == 1 then
|
|
||||||
redis.call("EXPIREAT", KEYS[5], ARGV[4])
|
redis.call("EXPIREAT", KEYS[5], ARGV[4])
|
||||||
end
|
end
|
||||||
|
local m = redis.call("INCR", KEYS[6])
|
||||||
|
if tonumber(m) == 1 then
|
||||||
|
redis.call("EXPIREAT", KEYS[6], ARGV[4])
|
||||||
|
end
|
||||||
return redis.status_reply("OK")`)
|
return redis.status_reply("OK")`)
|
||||||
|
|
||||||
// Retry moves the task from active to retry queue, incrementing retry count
|
// Retry moves the task from active to retry queue, incrementing retry count
|
||||||
// and assigning error message to the task message.
|
// and assigning error message to the task message.
|
||||||
func (r *RDB) Retry(msg *base.TaskMessage, processAt time.Time, errMsg string) error {
|
func (r *RDB) Retry(msg *base.TaskMessage, processAt time.Time, errMsg string) error {
|
||||||
msgToRemove, err := base.EncodeMessage(msg)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
modified := *msg
|
modified := *msg
|
||||||
modified.Retried++
|
modified.Retried++
|
||||||
modified.ErrorMsg = errMsg
|
modified.ErrorMsg = errMsg
|
||||||
msgToAdd, err := base.EncodeMessage(&modified)
|
encoded, err := base.EncodeMessage(&modified)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
processedKey := base.ProcessedKey(msg.Queue, now)
|
|
||||||
failedKey := base.FailedKey(msg.Queue, now)
|
|
||||||
expireAt := now.Add(statsTTL)
|
expireAt := now.Add(statsTTL)
|
||||||
return retryCmd.Run(r.client,
|
keys := []string{
|
||||||
[]string{base.ActiveKey(msg.Queue), base.DeadlinesKey(msg.Queue), base.RetryKey(msg.Queue), processedKey, failedKey},
|
base.TaskKey(msg.Queue, msg.ID.String()),
|
||||||
msgToRemove, msgToAdd, processAt.Unix(), expireAt.Unix()).Err()
|
base.ActiveKey(msg.Queue),
|
||||||
|
base.DeadlinesKey(msg.Queue),
|
||||||
|
base.RetryKey(msg.Queue),
|
||||||
|
base.ProcessedKey(msg.Queue, now),
|
||||||
|
base.FailedKey(msg.Queue, now),
|
||||||
|
}
|
||||||
|
argv := []interface{}{
|
||||||
|
msg.ID.String(),
|
||||||
|
encoded,
|
||||||
|
processAt.Unix(),
|
||||||
|
expireAt.Unix(),
|
||||||
|
}
|
||||||
|
return retryCmd.Run(r.client, keys, argv...).Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -385,68 +472,78 @@ const (
|
|||||||
archivedExpirationInDays = 90 // number of days before an archived task gets deleted permanently
|
archivedExpirationInDays = 90 // number of days before an archived task gets deleted permanently
|
||||||
)
|
)
|
||||||
|
|
||||||
// KEYS[1] -> asynq:{<qname>}:active
|
// KEYS[1] -> asynq:{<qname>}:t:<task_id>
|
||||||
// KEYS[2] -> asynq:{<qname>}:deadlines
|
// KEYS[2] -> asynq:{<qname>}:active
|
||||||
// KEYS[3] -> asynq:{<qname>}:archived
|
// KEYS[3] -> asynq:{<qname>}:deadlines
|
||||||
// KEYS[4] -> asynq:{<qname>}:processed:<yyyy-mm-dd>
|
// KEYS[4] -> asynq:{<qname>}:archived
|
||||||
// KEYS[5] -> asynq:{<qname>}:failed:<yyyy-mm-dd>
|
// KEYS[5] -> asynq:{<qname>}:processed:<yyyy-mm-dd>
|
||||||
// ARGV[1] -> base.TaskMessage value to remove
|
// KEYS[6] -> asynq:{<qname>}:failed:<yyyy-mm-dd>
|
||||||
// ARGV[2] -> base.TaskMessage value to add
|
// ARGV[1] -> task ID
|
||||||
|
// ARGV[2] -> updated base.TaskMessage value
|
||||||
// ARGV[3] -> died_at UNIX timestamp
|
// ARGV[3] -> died_at UNIX timestamp
|
||||||
// ARGV[4] -> cutoff timestamp (e.g., 90 days ago)
|
// ARGV[4] -> cutoff timestamp (e.g., 90 days ago)
|
||||||
// ARGV[5] -> max number of tasks in archive (e.g., 100)
|
// ARGV[5] -> max number of tasks in archive (e.g., 100)
|
||||||
// ARGV[6] -> stats expiration timestamp
|
// ARGV[6] -> stats expiration timestamp
|
||||||
var archiveCmd = redis.NewScript(`
|
var archiveCmd = redis.NewScript(`
|
||||||
if redis.call("LREM", KEYS[1], 0, ARGV[1]) == 0 then
|
if redis.call("LREM", KEYS[2], 0, ARGV[1]) == 0 then
|
||||||
return redis.error_reply("NOT FOUND")
|
return redis.error_reply("NOT FOUND")
|
||||||
end
|
end
|
||||||
if redis.call("ZREM", KEYS[2], ARGV[1]) == 0 then
|
if redis.call("ZREM", KEYS[3], ARGV[1]) == 0 then
|
||||||
return redis.error_reply("NOT FOUND")
|
return redis.error_reply("NOT FOUND")
|
||||||
end
|
end
|
||||||
redis.call("ZADD", KEYS[3], ARGV[3], ARGV[2])
|
redis.call("ZADD", KEYS[4], ARGV[3], ARGV[1])
|
||||||
redis.call("ZREMRANGEBYSCORE", KEYS[3], "-inf", ARGV[4])
|
redis.call("ZREMRANGEBYSCORE", KEYS[4], "-inf", ARGV[4])
|
||||||
redis.call("ZREMRANGEBYRANK", KEYS[3], 0, -ARGV[5])
|
redis.call("ZREMRANGEBYRANK", KEYS[4], 0, -ARGV[5])
|
||||||
local n = redis.call("INCR", KEYS[4])
|
redis.call("HSET", KEYS[1], "msg", ARGV[2])
|
||||||
|
local n = redis.call("INCR", KEYS[5])
|
||||||
if tonumber(n) == 1 then
|
if tonumber(n) == 1 then
|
||||||
redis.call("EXPIREAT", KEYS[4], ARGV[6])
|
|
||||||
end
|
|
||||||
local m = redis.call("INCR", KEYS[5])
|
|
||||||
if tonumber(m) == 1 then
|
|
||||||
redis.call("EXPIREAT", KEYS[5], ARGV[6])
|
redis.call("EXPIREAT", KEYS[5], ARGV[6])
|
||||||
end
|
end
|
||||||
|
local m = redis.call("INCR", KEYS[6])
|
||||||
|
if tonumber(m) == 1 then
|
||||||
|
redis.call("EXPIREAT", KEYS[6], ARGV[6])
|
||||||
|
end
|
||||||
return redis.status_reply("OK")`)
|
return redis.status_reply("OK")`)
|
||||||
|
|
||||||
// Archive sends the given task to archive, attaching the error message to the task.
|
// Archive sends the given task to archive, attaching the error message to the task.
|
||||||
// It also trims the archive by timestamp and set size.
|
// It also trims the archive by timestamp and set size.
|
||||||
func (r *RDB) Archive(msg *base.TaskMessage, errMsg string) error {
|
func (r *RDB) Archive(msg *base.TaskMessage, errMsg string) error {
|
||||||
msgToRemove, err := base.EncodeMessage(msg)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
modified := *msg
|
modified := *msg
|
||||||
modified.ErrorMsg = errMsg
|
modified.ErrorMsg = errMsg
|
||||||
msgToAdd, err := base.EncodeMessage(&modified)
|
encoded, err := base.EncodeMessage(&modified)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
limit := now.AddDate(0, 0, -archivedExpirationInDays).Unix() // 90 days ago
|
cutoff := now.AddDate(0, 0, -archivedExpirationInDays)
|
||||||
processedKey := base.ProcessedKey(msg.Queue, now)
|
|
||||||
failedKey := base.FailedKey(msg.Queue, now)
|
|
||||||
expireAt := now.Add(statsTTL)
|
expireAt := now.Add(statsTTL)
|
||||||
return archiveCmd.Run(r.client,
|
keys := []string{
|
||||||
[]string{base.ActiveKey(msg.Queue), base.DeadlinesKey(msg.Queue), base.ArchivedKey(msg.Queue), processedKey, failedKey},
|
base.TaskKey(msg.Queue, msg.ID.String()),
|
||||||
msgToRemove, msgToAdd, now.Unix(), limit, maxArchiveSize, expireAt.Unix()).Err()
|
base.ActiveKey(msg.Queue),
|
||||||
|
base.DeadlinesKey(msg.Queue),
|
||||||
|
base.ArchivedKey(msg.Queue),
|
||||||
|
base.ProcessedKey(msg.Queue, now),
|
||||||
|
base.FailedKey(msg.Queue, now),
|
||||||
|
}
|
||||||
|
argv := []interface{}{
|
||||||
|
msg.ID.String(),
|
||||||
|
encoded,
|
||||||
|
now.Unix(),
|
||||||
|
cutoff.Unix(),
|
||||||
|
maxArchiveSize,
|
||||||
|
expireAt.Unix(),
|
||||||
|
}
|
||||||
|
return archiveCmd.Run(r.client, keys, argv...).Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
// CheckAndEnqueue checks for scheduled/retry tasks for the given queues
|
// ForwardIfReady checks scheduled and retry sets of the given queues
|
||||||
//and enqueues any tasks that are ready to be processed.
|
// and move any tasks that are ready to be processed to the pending set.
|
||||||
func (r *RDB) CheckAndEnqueue(qnames ...string) error {
|
func (r *RDB) ForwardIfReady(qnames ...string) error {
|
||||||
for _, qname := range qnames {
|
for _, qname := range qnames {
|
||||||
if err := r.forwardAll(base.ScheduledKey(qname), base.QueueKey(qname)); err != nil {
|
if err := r.forwardAll(base.ScheduledKey(qname), base.PendingKey(qname)); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := r.forwardAll(base.RetryKey(qname), base.QueueKey(qname)); err != nil {
|
if err := r.forwardAll(base.RetryKey(qname), base.PendingKey(qname)); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -458,12 +555,12 @@ func (r *RDB) CheckAndEnqueue(qnames ...string) error {
|
|||||||
// ARGV[1] -> current unix time
|
// ARGV[1] -> current unix time
|
||||||
// Note: Script moves tasks up to 100 at a time to keep the runtime of script short.
|
// Note: Script moves tasks up to 100 at a time to keep the runtime of script short.
|
||||||
var forwardCmd = redis.NewScript(`
|
var forwardCmd = redis.NewScript(`
|
||||||
local msgs = redis.call("ZRANGEBYSCORE", KEYS[1], "-inf", ARGV[1], "LIMIT", 0, 100)
|
local ids = redis.call("ZRANGEBYSCORE", KEYS[1], "-inf", ARGV[1], "LIMIT", 0, 100)
|
||||||
for _, msg in ipairs(msgs) do
|
for _, id in ipairs(ids) do
|
||||||
redis.call("LPUSH", KEYS[2], msg)
|
redis.call("LPUSH", KEYS[2], id)
|
||||||
redis.call("ZREM", KEYS[1], msg)
|
redis.call("ZREM", KEYS[1], id)
|
||||||
end
|
end
|
||||||
return table.getn(msgs)`)
|
return table.getn(ids)`)
|
||||||
|
|
||||||
// forward moves tasks with a score less than the current unix time
|
// forward moves tasks with a score less than the current unix time
|
||||||
// from the src zset to the dst list. It returns the number of tasks moved.
|
// from the src zset to the dst list. It returns the number of tasks moved.
|
||||||
@ -489,20 +586,35 @@ func (r *RDB) forwardAll(src, dst string) (err error) {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// KEYS[1] -> asynq:{<qname>}:deadlines
|
||||||
|
// ARGV[1] -> deadline in unix time
|
||||||
|
// ARGV[2] -> task key prefix
|
||||||
|
var listDeadlineExceededCmd = redis.NewScript(`
|
||||||
|
local res = {}
|
||||||
|
local ids = redis.call("ZRANGEBYSCORE", KEYS[1], "-inf", ARGV[1])
|
||||||
|
for _, id in ipairs(ids) do
|
||||||
|
local key = ARGV[2] .. id
|
||||||
|
table.insert(res, redis.call("HGET", key, "msg"))
|
||||||
|
end
|
||||||
|
return res
|
||||||
|
`)
|
||||||
|
|
||||||
// ListDeadlineExceeded returns a list of task messages that have exceeded the deadline from the given queues.
|
// ListDeadlineExceeded returns a list of task messages that have exceeded the deadline from the given queues.
|
||||||
func (r *RDB) ListDeadlineExceeded(deadline time.Time, qnames ...string) ([]*base.TaskMessage, error) {
|
func (r *RDB) ListDeadlineExceeded(deadline time.Time, qnames ...string) ([]*base.TaskMessage, error) {
|
||||||
var msgs []*base.TaskMessage
|
var msgs []*base.TaskMessage
|
||||||
opt := &redis.ZRangeBy{
|
|
||||||
Min: "-inf",
|
|
||||||
Max: strconv.FormatInt(deadline.Unix(), 10),
|
|
||||||
}
|
|
||||||
for _, qname := range qnames {
|
for _, qname := range qnames {
|
||||||
res, err := r.client.ZRangeByScore(base.DeadlinesKey(qname), opt).Result()
|
res, err := listDeadlineExceededCmd.Run(r.client,
|
||||||
|
[]string{base.DeadlinesKey(qname)},
|
||||||
|
deadline.Unix(), base.TaskKeyPrefix(qname)).Result()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
for _, s := range res {
|
data, err := cast.ToStringSliceE(res)
|
||||||
msg, err := base.DecodeMessage(s)
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
for _, s := range data {
|
||||||
|
msg, err := base.DecodeMessage([]byte(s))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -530,14 +642,14 @@ return redis.status_reply("OK")`)
|
|||||||
|
|
||||||
// WriteServerState writes server state data to redis with expiration set to the value ttl.
|
// WriteServerState writes server state data to redis with expiration set to the value ttl.
|
||||||
func (r *RDB) WriteServerState(info *base.ServerInfo, workers []*base.WorkerInfo, ttl time.Duration) error {
|
func (r *RDB) WriteServerState(info *base.ServerInfo, workers []*base.WorkerInfo, ttl time.Duration) error {
|
||||||
bytes, err := json.Marshal(info)
|
bytes, err := base.EncodeServerInfo(info)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
exp := time.Now().Add(ttl).UTC()
|
exp := time.Now().Add(ttl).UTC()
|
||||||
args := []interface{}{ttl.Seconds(), bytes} // args to the lua script
|
args := []interface{}{ttl.Seconds(), bytes} // args to the lua script
|
||||||
for _, w := range workers {
|
for _, w := range workers {
|
||||||
bytes, err := json.Marshal(w)
|
bytes, err := base.EncodeWorkerInfo(w)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
continue // skip bad data
|
continue // skip bad data
|
||||||
}
|
}
|
||||||
@ -589,7 +701,7 @@ return redis.status_reply("OK")`)
|
|||||||
func (r *RDB) WriteSchedulerEntries(schedulerID string, entries []*base.SchedulerEntry, ttl time.Duration) error {
|
func (r *RDB) WriteSchedulerEntries(schedulerID string, entries []*base.SchedulerEntry, ttl time.Duration) error {
|
||||||
args := []interface{}{ttl.Seconds()}
|
args := []interface{}{ttl.Seconds()}
|
||||||
for _, e := range entries {
|
for _, e := range entries {
|
||||||
bytes, err := json.Marshal(e)
|
bytes, err := base.EncodeSchedulerEntry(e)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
continue // skip bad data
|
continue // skip bad data
|
||||||
}
|
}
|
||||||
@ -644,7 +756,7 @@ const maxEvents = 1000
|
|||||||
// RecordSchedulerEnqueueEvent records the time when the given task was enqueued.
|
// RecordSchedulerEnqueueEvent records the time when the given task was enqueued.
|
||||||
func (r *RDB) RecordSchedulerEnqueueEvent(entryID string, event *base.SchedulerEnqueueEvent) error {
|
func (r *RDB) RecordSchedulerEnqueueEvent(entryID string, event *base.SchedulerEnqueueEvent) error {
|
||||||
key := base.SchedulerHistoryKey(entryID)
|
key := base.SchedulerHistoryKey(entryID)
|
||||||
data, err := json.Marshal(event)
|
data, err := base.EncodeSchedulerEnqueueEvent(event)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -83,7 +83,7 @@ func TestEnqueue(t *testing.T) {
|
|||||||
|
|
||||||
gotPending := h.GetPendingMessages(t, r.client, tc.msg.Queue)
|
gotPending := h.GetPendingMessages(t, r.client, tc.msg.Queue)
|
||||||
if len(gotPending) != 1 {
|
if len(gotPending) != 1 {
|
||||||
t.Errorf("%q has length %d, want 1", base.QueueKey(tc.msg.Queue), len(gotPending))
|
t.Errorf("%q has length %d, want 1", base.PendingKey(tc.msg.Queue), len(gotPending))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if diff := cmp.Diff(tc.msg, gotPending[0]); diff != "" {
|
if diff := cmp.Diff(tc.msg, gotPending[0]); diff != "" {
|
||||||
@ -101,7 +101,7 @@ func TestEnqueueUnique(t *testing.T) {
|
|||||||
m1 := base.TaskMessage{
|
m1 := base.TaskMessage{
|
||||||
ID: uuid.New(),
|
ID: uuid.New(),
|
||||||
Type: "email",
|
Type: "email",
|
||||||
Payload: map[string]interface{}{"user_id": 123},
|
Payload: map[string]interface{}{"user_id": json.Number("123")},
|
||||||
Queue: base.DefaultQueueName,
|
Queue: base.DefaultQueueName,
|
||||||
UniqueKey: base.UniqueKey(base.DefaultQueueName, "email", map[string]interface{}{"user_id": 123}),
|
UniqueKey: base.UniqueKey(base.DefaultQueueName, "email", map[string]interface{}{"user_id": 123}),
|
||||||
}
|
}
|
||||||
@ -116,13 +116,26 @@ func TestEnqueueUnique(t *testing.T) {
|
|||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
h.FlushDB(t, r.client) // clean up db before each test case.
|
h.FlushDB(t, r.client) // clean up db before each test case.
|
||||||
|
|
||||||
|
// Enqueue the first message, should succeed.
|
||||||
err := r.EnqueueUnique(tc.msg, tc.ttl)
|
err := r.EnqueueUnique(tc.msg, tc.ttl)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("First message: (*RDB).EnqueueUnique(%v, %v) = %v, want nil",
|
t.Errorf("First message: (*RDB).EnqueueUnique(%v, %v) = %v, want nil",
|
||||||
tc.msg, tc.ttl, err)
|
tc.msg, tc.ttl, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
gotPending := h.GetPendingMessages(t, r.client, tc.msg.Queue)
|
||||||
|
if len(gotPending) != 1 {
|
||||||
|
t.Errorf("%q has length %d, want 1", base.PendingKey(tc.msg.Queue), len(gotPending))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if diff := cmp.Diff(tc.msg, gotPending[0]); diff != "" {
|
||||||
|
t.Errorf("persisted data differed from the original input (-want, +got)\n%s", diff)
|
||||||
|
}
|
||||||
|
if !r.client.SIsMember(base.AllQueues, tc.msg.Queue).Val() {
|
||||||
|
t.Errorf("%q is not a member of SET %q", tc.msg.Queue, base.AllQueues)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Enqueue the second message, should fail.
|
||||||
got := r.EnqueueUnique(tc.msg, tc.ttl)
|
got := r.EnqueueUnique(tc.msg, tc.ttl)
|
||||||
if got != ErrDuplicateTask {
|
if got != ErrDuplicateTask {
|
||||||
t.Errorf("Second message: (*RDB).EnqueueUnique(%v, %v) = %v, want %v",
|
t.Errorf("Second message: (*RDB).EnqueueUnique(%v, %v) = %v, want %v",
|
||||||
@ -134,9 +147,6 @@ func TestEnqueueUnique(t *testing.T) {
|
|||||||
t.Errorf("TTL %q = %v, want %v", tc.msg.UniqueKey, gotTTL, tc.ttl)
|
t.Errorf("TTL %q = %v, want %v", tc.msg.UniqueKey, gotTTL, tc.ttl)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if !r.client.SIsMember(base.AllQueues, tc.msg.Queue).Val() {
|
|
||||||
t.Errorf("%q is not a member of SET %q", tc.msg.Queue, base.AllQueues)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -148,6 +158,7 @@ func TestDequeue(t *testing.T) {
|
|||||||
ID: uuid.New(),
|
ID: uuid.New(),
|
||||||
Type: "send_email",
|
Type: "send_email",
|
||||||
Payload: map[string]interface{}{"subject": "hello!"},
|
Payload: map[string]interface{}{"subject": "hello!"},
|
||||||
|
Queue: "default",
|
||||||
Timeout: 1800,
|
Timeout: 1800,
|
||||||
Deadline: 0,
|
Deadline: 0,
|
||||||
}
|
}
|
||||||
@ -156,6 +167,7 @@ func TestDequeue(t *testing.T) {
|
|||||||
ID: uuid.New(),
|
ID: uuid.New(),
|
||||||
Type: "export_csv",
|
Type: "export_csv",
|
||||||
Payload: nil,
|
Payload: nil,
|
||||||
|
Queue: "critical",
|
||||||
Timeout: 0,
|
Timeout: 0,
|
||||||
Deadline: 1593021600,
|
Deadline: 1593021600,
|
||||||
}
|
}
|
||||||
@ -164,10 +176,10 @@ func TestDequeue(t *testing.T) {
|
|||||||
ID: uuid.New(),
|
ID: uuid.New(),
|
||||||
Type: "reindex",
|
Type: "reindex",
|
||||||
Payload: nil,
|
Payload: nil,
|
||||||
|
Queue: "low",
|
||||||
Timeout: int64((5 * time.Minute).Seconds()),
|
Timeout: int64((5 * time.Minute).Seconds()),
|
||||||
Deadline: time.Now().Add(10 * time.Minute).Unix(),
|
Deadline: time.Now().Add(10 * time.Minute).Unix(),
|
||||||
}
|
}
|
||||||
t3Deadline := now.Unix() + t3.Timeout // use whichever is earliest
|
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
pending map[string][]*base.TaskMessage
|
pending map[string][]*base.TaskMessage
|
||||||
@ -243,26 +255,26 @@ func TestDequeue(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
pending: map[string][]*base.TaskMessage{
|
pending: map[string][]*base.TaskMessage{
|
||||||
"default": {t3},
|
"default": {t1},
|
||||||
"critical": {},
|
"critical": {},
|
||||||
"low": {t2, t1},
|
"low": {t3},
|
||||||
},
|
},
|
||||||
args: []string{"critical", "default", "low"},
|
args: []string{"critical", "default", "low"},
|
||||||
wantMsg: t3,
|
wantMsg: t1,
|
||||||
wantDeadline: time.Unix(t3Deadline, 0),
|
wantDeadline: time.Unix(t1Deadline, 0),
|
||||||
err: nil,
|
err: nil,
|
||||||
wantPending: map[string][]*base.TaskMessage{
|
wantPending: map[string][]*base.TaskMessage{
|
||||||
"default": {},
|
"default": {},
|
||||||
"critical": {},
|
"critical": {},
|
||||||
"low": {t2, t1},
|
"low": {t3},
|
||||||
},
|
},
|
||||||
wantActive: map[string][]*base.TaskMessage{
|
wantActive: map[string][]*base.TaskMessage{
|
||||||
"default": {t3},
|
"default": {t1},
|
||||||
"critical": {},
|
"critical": {},
|
||||||
"low": {},
|
"low": {},
|
||||||
},
|
},
|
||||||
wantDeadlines: map[string][]base.Z{
|
wantDeadlines: map[string][]base.Z{
|
||||||
"default": {{Message: t3, Score: t3Deadline}},
|
"default": {{Message: t1, Score: t1Deadline}},
|
||||||
"critical": {},
|
"critical": {},
|
||||||
"low": {},
|
"low": {},
|
||||||
},
|
},
|
||||||
@ -319,7 +331,7 @@ func TestDequeue(t *testing.T) {
|
|||||||
for queue, want := range tc.wantPending {
|
for queue, want := range tc.wantPending {
|
||||||
gotPending := h.GetPendingMessages(t, r.client, queue)
|
gotPending := h.GetPendingMessages(t, r.client, queue)
|
||||||
if diff := cmp.Diff(want, gotPending, h.SortMsgOpt); diff != "" {
|
if diff := cmp.Diff(want, gotPending, h.SortMsgOpt); diff != "" {
|
||||||
t.Errorf("mismatch found in %q: (-want,+got):\n%s", base.QueueKey(queue), diff)
|
t.Errorf("mismatch found in %q: (-want,+got):\n%s", base.PendingKey(queue), diff)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for queue, want := range tc.wantActive {
|
for queue, want := range tc.wantActive {
|
||||||
@ -438,7 +450,7 @@ func TestDequeueIgnoresPausedQueues(t *testing.T) {
|
|||||||
for queue, want := range tc.wantPending {
|
for queue, want := range tc.wantPending {
|
||||||
gotPending := h.GetPendingMessages(t, r.client, queue)
|
gotPending := h.GetPendingMessages(t, r.client, queue)
|
||||||
if diff := cmp.Diff(want, gotPending, h.SortMsgOpt); diff != "" {
|
if diff := cmp.Diff(want, gotPending, h.SortMsgOpt); diff != "" {
|
||||||
t.Errorf("mismatch found in %q: (-want,+got):\n%s", base.QueueKey(queue), diff)
|
t.Errorf("mismatch found in %q: (-want,+got):\n%s", base.PendingKey(queue), diff)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for queue, want := range tc.wantActive {
|
for queue, want := range tc.wantActive {
|
||||||
@ -485,7 +497,7 @@ func TestDone(t *testing.T) {
|
|||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
desc string
|
desc string
|
||||||
inProgress map[string][]*base.TaskMessage // initial state of the active list
|
active map[string][]*base.TaskMessage // initial state of the active list
|
||||||
deadlines map[string][]base.Z // initial state of deadlines set
|
deadlines map[string][]base.Z // initial state of deadlines set
|
||||||
target *base.TaskMessage // task to remove
|
target *base.TaskMessage // task to remove
|
||||||
wantActive map[string][]*base.TaskMessage // final state of the active list
|
wantActive map[string][]*base.TaskMessage // final state of the active list
|
||||||
@ -493,7 +505,7 @@ func TestDone(t *testing.T) {
|
|||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
desc: "removes message from the correct queue",
|
desc: "removes message from the correct queue",
|
||||||
inProgress: map[string][]*base.TaskMessage{
|
active: map[string][]*base.TaskMessage{
|
||||||
"default": {t1},
|
"default": {t1},
|
||||||
"custom": {t2},
|
"custom": {t2},
|
||||||
},
|
},
|
||||||
@ -513,7 +525,7 @@ func TestDone(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
desc: "with one queue",
|
desc: "with one queue",
|
||||||
inProgress: map[string][]*base.TaskMessage{
|
active: map[string][]*base.TaskMessage{
|
||||||
"default": {t1},
|
"default": {t1},
|
||||||
},
|
},
|
||||||
deadlines: map[string][]base.Z{
|
deadlines: map[string][]base.Z{
|
||||||
@ -529,7 +541,7 @@ func TestDone(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
desc: "with multiple messages in a queue",
|
desc: "with multiple messages in a queue",
|
||||||
inProgress: map[string][]*base.TaskMessage{
|
active: map[string][]*base.TaskMessage{
|
||||||
"default": {t1, t3},
|
"default": {t1, t3},
|
||||||
"custom": {t2},
|
"custom": {t2},
|
||||||
},
|
},
|
||||||
@ -552,8 +564,8 @@ func TestDone(t *testing.T) {
|
|||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
h.FlushDB(t, r.client) // clean up db before each test case
|
h.FlushDB(t, r.client) // clean up db before each test case
|
||||||
h.SeedAllDeadlines(t, r.client, tc.deadlines)
|
h.SeedAllDeadlines(t, r.client, tc.deadlines)
|
||||||
h.SeedAllActiveQueues(t, r.client, tc.inProgress)
|
h.SeedAllActiveQueues(t, r.client, tc.active)
|
||||||
for _, msgs := range tc.inProgress {
|
for _, msgs := range tc.active {
|
||||||
for _, msg := range msgs {
|
for _, msg := range msgs {
|
||||||
// Set uniqueness lock if unique key is present.
|
// Set uniqueness lock if unique key is present.
|
||||||
if len(msg.UniqueKey) > 0 {
|
if len(msg.UniqueKey) > 0 {
|
||||||
@ -634,7 +646,7 @@ func TestRequeue(t *testing.T) {
|
|||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
pending map[string][]*base.TaskMessage // initial state of queues
|
pending map[string][]*base.TaskMessage // initial state of queues
|
||||||
inProgress map[string][]*base.TaskMessage // initial state of the active list
|
active map[string][]*base.TaskMessage // initial state of the active list
|
||||||
deadlines map[string][]base.Z // initial state of the deadlines set
|
deadlines map[string][]base.Z // initial state of the deadlines set
|
||||||
target *base.TaskMessage // task to requeue
|
target *base.TaskMessage // task to requeue
|
||||||
wantPending map[string][]*base.TaskMessage // final state of queues
|
wantPending map[string][]*base.TaskMessage // final state of queues
|
||||||
@ -645,7 +657,7 @@ func TestRequeue(t *testing.T) {
|
|||||||
pending: map[string][]*base.TaskMessage{
|
pending: map[string][]*base.TaskMessage{
|
||||||
"default": {},
|
"default": {},
|
||||||
},
|
},
|
||||||
inProgress: map[string][]*base.TaskMessage{
|
active: map[string][]*base.TaskMessage{
|
||||||
"default": {t1, t2},
|
"default": {t1, t2},
|
||||||
},
|
},
|
||||||
deadlines: map[string][]base.Z{
|
deadlines: map[string][]base.Z{
|
||||||
@ -671,7 +683,7 @@ func TestRequeue(t *testing.T) {
|
|||||||
pending: map[string][]*base.TaskMessage{
|
pending: map[string][]*base.TaskMessage{
|
||||||
"default": {t1},
|
"default": {t1},
|
||||||
},
|
},
|
||||||
inProgress: map[string][]*base.TaskMessage{
|
active: map[string][]*base.TaskMessage{
|
||||||
"default": {t2},
|
"default": {t2},
|
||||||
},
|
},
|
||||||
deadlines: map[string][]base.Z{
|
deadlines: map[string][]base.Z{
|
||||||
@ -695,7 +707,7 @@ func TestRequeue(t *testing.T) {
|
|||||||
"default": {t1},
|
"default": {t1},
|
||||||
"critical": {},
|
"critical": {},
|
||||||
},
|
},
|
||||||
inProgress: map[string][]*base.TaskMessage{
|
active: map[string][]*base.TaskMessage{
|
||||||
"default": {t2},
|
"default": {t2},
|
||||||
"critical": {t3},
|
"critical": {t3},
|
||||||
},
|
},
|
||||||
@ -722,7 +734,7 @@ func TestRequeue(t *testing.T) {
|
|||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
h.FlushDB(t, r.client) // clean up db before each test case
|
h.FlushDB(t, r.client) // clean up db before each test case
|
||||||
h.SeedAllPendingQueues(t, r.client, tc.pending)
|
h.SeedAllPendingQueues(t, r.client, tc.pending)
|
||||||
h.SeedAllActiveQueues(t, r.client, tc.inProgress)
|
h.SeedAllActiveQueues(t, r.client, tc.active)
|
||||||
h.SeedAllDeadlines(t, r.client, tc.deadlines)
|
h.SeedAllDeadlines(t, r.client, tc.deadlines)
|
||||||
|
|
||||||
err := r.Requeue(tc.target)
|
err := r.Requeue(tc.target)
|
||||||
@ -734,7 +746,7 @@ func TestRequeue(t *testing.T) {
|
|||||||
for qname, want := range tc.wantPending {
|
for qname, want := range tc.wantPending {
|
||||||
gotPending := h.GetPendingMessages(t, r.client, qname)
|
gotPending := h.GetPendingMessages(t, r.client, qname)
|
||||||
if diff := cmp.Diff(want, gotPending, h.SortMsgOpt); diff != "" {
|
if diff := cmp.Diff(want, gotPending, h.SortMsgOpt); diff != "" {
|
||||||
t.Errorf("mismatch found in %q; (-want, +got)\n%s", base.QueueKey(qname), diff)
|
t.Errorf("mismatch found in %q; (-want, +got)\n%s", base.PendingKey(qname), diff)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for qname, want := range tc.wantActive {
|
for qname, want := range tc.wantActive {
|
||||||
@ -755,12 +767,12 @@ func TestRequeue(t *testing.T) {
|
|||||||
func TestSchedule(t *testing.T) {
|
func TestSchedule(t *testing.T) {
|
||||||
r := setup(t)
|
r := setup(t)
|
||||||
defer r.Close()
|
defer r.Close()
|
||||||
t1 := h.NewTaskMessage("send_email", map[string]interface{}{"subject": "hello"})
|
msg := h.NewTaskMessage("send_email", map[string]interface{}{"subject": "hello"})
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
msg *base.TaskMessage
|
msg *base.TaskMessage
|
||||||
processAt time.Time
|
processAt time.Time
|
||||||
}{
|
}{
|
||||||
{t1, time.Now().Add(15 * time.Minute)},
|
{msg, time.Now().Add(15 * time.Minute)},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
@ -886,7 +898,7 @@ func TestRetry(t *testing.T) {
|
|||||||
errMsg := "SMTP server is not responding"
|
errMsg := "SMTP server is not responding"
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
inProgress map[string][]*base.TaskMessage
|
active map[string][]*base.TaskMessage
|
||||||
deadlines map[string][]base.Z
|
deadlines map[string][]base.Z
|
||||||
retry map[string][]base.Z
|
retry map[string][]base.Z
|
||||||
msg *base.TaskMessage
|
msg *base.TaskMessage
|
||||||
@ -897,7 +909,7 @@ func TestRetry(t *testing.T) {
|
|||||||
wantRetry map[string][]base.Z
|
wantRetry map[string][]base.Z
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
inProgress: map[string][]*base.TaskMessage{
|
active: map[string][]*base.TaskMessage{
|
||||||
"default": {t1, t2},
|
"default": {t1, t2},
|
||||||
},
|
},
|
||||||
deadlines: map[string][]base.Z{
|
deadlines: map[string][]base.Z{
|
||||||
@ -923,7 +935,7 @@ func TestRetry(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
inProgress: map[string][]*base.TaskMessage{
|
active: map[string][]*base.TaskMessage{
|
||||||
"default": {t1, t2},
|
"default": {t1, t2},
|
||||||
"custom": {t4},
|
"custom": {t4},
|
||||||
},
|
},
|
||||||
@ -957,7 +969,7 @@ func TestRetry(t *testing.T) {
|
|||||||
|
|
||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
h.FlushDB(t, r.client)
|
h.FlushDB(t, r.client)
|
||||||
h.SeedAllActiveQueues(t, r.client, tc.inProgress)
|
h.SeedAllActiveQueues(t, r.client, tc.active)
|
||||||
h.SeedAllDeadlines(t, r.client, tc.deadlines)
|
h.SeedAllDeadlines(t, r.client, tc.deadlines)
|
||||||
h.SeedAllRetryQueues(t, r.client, tc.retry)
|
h.SeedAllRetryQueues(t, r.client, tc.retry)
|
||||||
|
|
||||||
@ -1056,7 +1068,7 @@ func TestArchive(t *testing.T) {
|
|||||||
|
|
||||||
// TODO(hibiken): add test cases for trimming
|
// TODO(hibiken): add test cases for trimming
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
inProgress map[string][]*base.TaskMessage
|
active map[string][]*base.TaskMessage
|
||||||
deadlines map[string][]base.Z
|
deadlines map[string][]base.Z
|
||||||
archived map[string][]base.Z
|
archived map[string][]base.Z
|
||||||
target *base.TaskMessage // task to archive
|
target *base.TaskMessage // task to archive
|
||||||
@ -1065,7 +1077,7 @@ func TestArchive(t *testing.T) {
|
|||||||
wantArchived map[string][]base.Z
|
wantArchived map[string][]base.Z
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
inProgress: map[string][]*base.TaskMessage{
|
active: map[string][]*base.TaskMessage{
|
||||||
"default": {t1, t2},
|
"default": {t1, t2},
|
||||||
},
|
},
|
||||||
deadlines: map[string][]base.Z{
|
deadlines: map[string][]base.Z{
|
||||||
@ -1094,7 +1106,7 @@ func TestArchive(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
inProgress: map[string][]*base.TaskMessage{
|
active: map[string][]*base.TaskMessage{
|
||||||
"default": {t1, t2, t3},
|
"default": {t1, t2, t3},
|
||||||
},
|
},
|
||||||
deadlines: map[string][]base.Z{
|
deadlines: map[string][]base.Z{
|
||||||
@ -1124,7 +1136,7 @@ func TestArchive(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
inProgress: map[string][]*base.TaskMessage{
|
active: map[string][]*base.TaskMessage{
|
||||||
"default": {t1},
|
"default": {t1},
|
||||||
"custom": {t4},
|
"custom": {t4},
|
||||||
},
|
},
|
||||||
@ -1160,7 +1172,7 @@ func TestArchive(t *testing.T) {
|
|||||||
|
|
||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
h.FlushDB(t, r.client) // clean up db before each test case
|
h.FlushDB(t, r.client) // clean up db before each test case
|
||||||
h.SeedAllActiveQueues(t, r.client, tc.inProgress)
|
h.SeedAllActiveQueues(t, r.client, tc.active)
|
||||||
h.SeedAllDeadlines(t, r.client, tc.deadlines)
|
h.SeedAllDeadlines(t, r.client, tc.deadlines)
|
||||||
h.SeedAllArchivedQueues(t, r.client, tc.archived)
|
h.SeedAllArchivedQueues(t, r.client, tc.archived)
|
||||||
|
|
||||||
@ -1211,7 +1223,7 @@ func TestArchive(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCheckAndEnqueue(t *testing.T) {
|
func TestForwardIfReady(t *testing.T) {
|
||||||
r := setup(t)
|
r := setup(t)
|
||||||
defer r.Close()
|
defer r.Close()
|
||||||
t1 := h.NewTaskMessage("send_email", nil)
|
t1 := h.NewTaskMessage("send_email", nil)
|
||||||
@ -1328,7 +1340,7 @@ func TestCheckAndEnqueue(t *testing.T) {
|
|||||||
h.SeedAllScheduledQueues(t, r.client, tc.scheduled)
|
h.SeedAllScheduledQueues(t, r.client, tc.scheduled)
|
||||||
h.SeedAllRetryQueues(t, r.client, tc.retry)
|
h.SeedAllRetryQueues(t, r.client, tc.retry)
|
||||||
|
|
||||||
err := r.CheckAndEnqueue(tc.qnames...)
|
err := r.ForwardIfReady(tc.qnames...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("(*RDB).CheckScheduled(%v) = %v, want nil", tc.qnames, err)
|
t.Errorf("(*RDB).CheckScheduled(%v) = %v, want nil", tc.qnames, err)
|
||||||
continue
|
continue
|
||||||
@ -1337,7 +1349,7 @@ func TestCheckAndEnqueue(t *testing.T) {
|
|||||||
for qname, want := range tc.wantPending {
|
for qname, want := range tc.wantPending {
|
||||||
gotPending := h.GetPendingMessages(t, r.client, qname)
|
gotPending := h.GetPendingMessages(t, r.client, qname)
|
||||||
if diff := cmp.Diff(want, gotPending, h.SortMsgOpt); diff != "" {
|
if diff := cmp.Diff(want, gotPending, h.SortMsgOpt); diff != "" {
|
||||||
t.Errorf("mismatch found in %q; (-want, +got)\n%s", base.QueueKey(qname), diff)
|
t.Errorf("mismatch found in %q; (-want, +got)\n%s", base.PendingKey(qname), diff)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for qname, want := range tc.wantScheduled {
|
for qname, want := range tc.wantScheduled {
|
||||||
@ -1462,7 +1474,7 @@ func TestWriteServerState(t *testing.T) {
|
|||||||
Concurrency: 10,
|
Concurrency: 10,
|
||||||
Queues: map[string]int{"default": 2, "email": 5, "low": 1},
|
Queues: map[string]int{"default": 2, "email": 5, "low": 1},
|
||||||
StrictPriority: false,
|
StrictPriority: false,
|
||||||
Started: time.Now(),
|
Started: time.Now().UTC(),
|
||||||
Status: "running",
|
Status: "running",
|
||||||
ActiveWorkerCount: 0,
|
ActiveWorkerCount: 0,
|
||||||
}
|
}
|
||||||
@ -1475,12 +1487,11 @@ func TestWriteServerState(t *testing.T) {
|
|||||||
// Check ServerInfo was written correctly.
|
// Check ServerInfo was written correctly.
|
||||||
skey := base.ServerInfoKey(host, pid, serverID)
|
skey := base.ServerInfoKey(host, pid, serverID)
|
||||||
data := r.client.Get(skey).Val()
|
data := r.client.Get(skey).Val()
|
||||||
var got base.ServerInfo
|
got, err := base.DecodeServerInfo([]byte(data))
|
||||||
err = json.Unmarshal([]byte(data), &got)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("could not decode json: %v", err)
|
t.Fatalf("could not decode server info: %v", err)
|
||||||
}
|
}
|
||||||
if diff := cmp.Diff(info, got); diff != "" {
|
if diff := cmp.Diff(info, *got); diff != "" {
|
||||||
t.Errorf("persisted ServerInfo was %v, want %v; (-want,+got)\n%s",
|
t.Errorf("persisted ServerInfo was %v, want %v; (-want,+got)\n%s",
|
||||||
got, info, diff)
|
got, info, diff)
|
||||||
}
|
}
|
||||||
@ -1553,7 +1564,7 @@ func TestWriteServerStateWithWorkers(t *testing.T) {
|
|||||||
Concurrency: 10,
|
Concurrency: 10,
|
||||||
Queues: map[string]int{"default": 2, "email": 5, "low": 1},
|
Queues: map[string]int{"default": 2, "email": 5, "low": 1},
|
||||||
StrictPriority: false,
|
StrictPriority: false,
|
||||||
Started: time.Now().Add(-10 * time.Minute),
|
Started: time.Now().Add(-10 * time.Minute).UTC(),
|
||||||
Status: "running",
|
Status: "running",
|
||||||
ActiveWorkerCount: len(workers),
|
ActiveWorkerCount: len(workers),
|
||||||
}
|
}
|
||||||
@ -1566,12 +1577,11 @@ func TestWriteServerStateWithWorkers(t *testing.T) {
|
|||||||
// Check ServerInfo was written correctly.
|
// Check ServerInfo was written correctly.
|
||||||
skey := base.ServerInfoKey(host, pid, serverID)
|
skey := base.ServerInfoKey(host, pid, serverID)
|
||||||
data := r.client.Get(skey).Val()
|
data := r.client.Get(skey).Val()
|
||||||
var got base.ServerInfo
|
got, err := base.DecodeServerInfo([]byte(data))
|
||||||
err = json.Unmarshal([]byte(data), &got)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("could not decode json: %v", err)
|
t.Fatalf("could not decode server info: %v", err)
|
||||||
}
|
}
|
||||||
if diff := cmp.Diff(serverInfo, got); diff != "" {
|
if diff := cmp.Diff(serverInfo, *got); diff != "" {
|
||||||
t.Errorf("persisted ServerInfo was %v, want %v; (-want,+got)\n%s",
|
t.Errorf("persisted ServerInfo was %v, want %v; (-want,+got)\n%s",
|
||||||
got, serverInfo, diff)
|
got, serverInfo, diff)
|
||||||
}
|
}
|
||||||
@ -1595,11 +1605,11 @@ func TestWriteServerStateWithWorkers(t *testing.T) {
|
|||||||
}
|
}
|
||||||
var gotWorkers []*base.WorkerInfo
|
var gotWorkers []*base.WorkerInfo
|
||||||
for _, val := range wdata {
|
for _, val := range wdata {
|
||||||
var w base.WorkerInfo
|
w, err := base.DecodeWorkerInfo([]byte(val))
|
||||||
if err := json.Unmarshal([]byte(val), &w); err != nil {
|
if err != nil {
|
||||||
t.Fatalf("could not unmarshal worker's data: %v", err)
|
t.Fatalf("could not unmarshal worker's data: %v", err)
|
||||||
}
|
}
|
||||||
gotWorkers = append(gotWorkers, &w)
|
gotWorkers = append(gotWorkers, w)
|
||||||
}
|
}
|
||||||
if diff := cmp.Diff(workers, gotWorkers, h.SortWorkerInfoOpt); diff != "" {
|
if diff := cmp.Diff(workers, gotWorkers, h.SortWorkerInfoOpt); diff != "" {
|
||||||
t.Errorf("persisted workers info was %v, want %v; (-want,+got)\n%s",
|
t.Errorf("persisted workers info was %v, want %v; (-want,+got)\n%s",
|
||||||
|
@ -126,13 +126,13 @@ func (tb *TestBroker) Archive(msg *base.TaskMessage, errMsg string) error {
|
|||||||
return tb.real.Archive(msg, errMsg)
|
return tb.real.Archive(msg, errMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tb *TestBroker) CheckAndEnqueue(qnames ...string) error {
|
func (tb *TestBroker) ForwardIfReady(qnames ...string) error {
|
||||||
tb.mu.Lock()
|
tb.mu.Lock()
|
||||||
defer tb.mu.Unlock()
|
defer tb.mu.Unlock()
|
||||||
if tb.sleeping {
|
if tb.sleeping {
|
||||||
return errRedisDown
|
return errRedisDown
|
||||||
}
|
}
|
||||||
return tb.real.CheckAndEnqueue(qnames...)
|
return tb.real.ForwardIfReady(qnames...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tb *TestBroker) ListDeadlineExceeded(deadline time.Time, qnames ...string) ([]*base.TaskMessage, error) {
|
func (tb *TestBroker) ListDeadlineExceeded(deadline time.Time, qnames ...string) ([]*base.TaskMessage, error) {
|
||||||
|
Loading…
Reference in New Issue
Block a user