mirror of
https://github.com/hibiken/asynq.git
synced 2024-11-10 11:31:58 +08:00
Change internal constructor signatures.
Created "params" type to avoid positional arguments. Personally it feels more explicit and reads better.
This commit is contained in:
parent
4e3e053989
commit
4492ed9255
17
heartbeat.go
17
heartbeat.go
@ -27,13 +27,20 @@ type heartbeater struct {
|
||||
interval time.Duration
|
||||
}
|
||||
|
||||
func newHeartbeater(l *log.Logger, b base.Broker, ss *base.ServerState, interval time.Duration) *heartbeater {
|
||||
type heartbeaterParams struct {
|
||||
logger *log.Logger
|
||||
broker base.Broker
|
||||
serverState *base.ServerState
|
||||
interval time.Duration
|
||||
}
|
||||
|
||||
func newHeartbeater(params heartbeaterParams) *heartbeater {
|
||||
return &heartbeater{
|
||||
logger: l,
|
||||
broker: b,
|
||||
ss: ss,
|
||||
logger: params.logger,
|
||||
broker: params.broker,
|
||||
ss: params.serverState,
|
||||
done: make(chan struct{}),
|
||||
interval: interval,
|
||||
interval: params.interval,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -38,7 +38,12 @@ func TestHeartbeater(t *testing.T) {
|
||||
h.FlushDB(t, r)
|
||||
|
||||
state := base.NewServerState(tc.host, tc.pid, tc.concurrency, tc.queues, false)
|
||||
hb := newHeartbeater(testLogger, rdbClient, state, tc.interval)
|
||||
hb := newHeartbeater(heartbeaterParams{
|
||||
logger: testLogger,
|
||||
broker: rdbClient,
|
||||
serverState: state,
|
||||
interval: tc.interval,
|
||||
})
|
||||
|
||||
var wg sync.WaitGroup
|
||||
hb.start(&wg)
|
||||
@ -115,7 +120,12 @@ func TestHeartbeaterWithRedisDown(t *testing.T) {
|
||||
r := rdb.NewRDB(setup(t))
|
||||
testBroker := testbroker.NewTestBroker(r)
|
||||
ss := base.NewServerState("localhost", 1234, 10, map[string]int{"default": 1}, false)
|
||||
hb := newHeartbeater(testLogger, testBroker, ss, time.Second)
|
||||
hb := newHeartbeater(heartbeaterParams{
|
||||
logger: testLogger,
|
||||
broker: testBroker,
|
||||
serverState: ss,
|
||||
interval: time.Second,
|
||||
})
|
||||
|
||||
testBroker.Sleep()
|
||||
var wg sync.WaitGroup
|
||||
|
@ -64,7 +64,7 @@ type processor struct {
|
||||
|
||||
type retryDelayFunc func(n int, err error, task *Task) time.Duration
|
||||
|
||||
type newProcessorParams struct {
|
||||
type processorParams struct {
|
||||
logger *log.Logger
|
||||
broker base.Broker
|
||||
ss *base.ServerState
|
||||
@ -76,7 +76,7 @@ type newProcessorParams struct {
|
||||
}
|
||||
|
||||
// newProcessor constructs a new processor.
|
||||
func newProcessor(params newProcessorParams) *processor {
|
||||
func newProcessor(params processorParams) *processor {
|
||||
info := params.ss.GetInfo()
|
||||
qcfg := normalizeQueueCfg(info.Queues)
|
||||
orderedQueues := []string(nil)
|
||||
|
@ -65,7 +65,7 @@ func TestProcessorSuccess(t *testing.T) {
|
||||
return nil
|
||||
}
|
||||
ss := base.NewServerState("localhost", 1234, 10, defaultQueueConfig, false)
|
||||
p := newProcessor(newProcessorParams{
|
||||
p := newProcessor(processorParams{
|
||||
logger: testLogger,
|
||||
broker: rdbClient,
|
||||
ss: ss,
|
||||
@ -170,7 +170,7 @@ func TestProcessorRetry(t *testing.T) {
|
||||
n++
|
||||
}
|
||||
ss := base.NewServerState("localhost", 1234, 10, defaultQueueConfig, false)
|
||||
p := newProcessor(newProcessorParams{
|
||||
p := newProcessor(processorParams{
|
||||
logger: testLogger,
|
||||
broker: rdbClient,
|
||||
ss: ss,
|
||||
@ -243,7 +243,7 @@ func TestProcessorQueues(t *testing.T) {
|
||||
|
||||
for _, tc := range tests {
|
||||
ss := base.NewServerState("localhost", 1234, 10, tc.queueCfg, false)
|
||||
p := newProcessor(newProcessorParams{
|
||||
p := newProcessor(processorParams{
|
||||
logger: testLogger,
|
||||
broker: nil,
|
||||
ss: ss,
|
||||
@ -319,7 +319,7 @@ func TestProcessorWithStrictPriority(t *testing.T) {
|
||||
}
|
||||
// Note: Set concurrency to 1 to make sure tasks are processed one at a time.
|
||||
ss := base.NewServerState("localhost", 1234, 1 /* concurrency */, queueCfg, true /*strict*/)
|
||||
p := newProcessor(newProcessorParams{
|
||||
p := newProcessor(processorParams{
|
||||
logger: testLogger,
|
||||
broker: rdbClient,
|
||||
ss: ss,
|
||||
|
17
scheduler.go
17
scheduler.go
@ -26,16 +26,23 @@ type scheduler struct {
|
||||
qnames []string
|
||||
}
|
||||
|
||||
func newScheduler(l *log.Logger, b base.Broker, avgInterval time.Duration, qcfg map[string]int) *scheduler {
|
||||
type schedulerParams struct {
|
||||
logger *log.Logger
|
||||
broker base.Broker
|
||||
interval time.Duration
|
||||
queues map[string]int
|
||||
}
|
||||
|
||||
func newScheduler(params schedulerParams) *scheduler {
|
||||
var qnames []string
|
||||
for q := range qcfg {
|
||||
for q := range params.queues {
|
||||
qnames = append(qnames, q)
|
||||
}
|
||||
return &scheduler{
|
||||
logger: l,
|
||||
broker: b,
|
||||
logger: params.logger,
|
||||
broker: params.broker,
|
||||
done: make(chan struct{}),
|
||||
avgInterval: avgInterval,
|
||||
avgInterval: params.interval,
|
||||
qnames: qnames,
|
||||
}
|
||||
}
|
||||
|
@ -19,7 +19,12 @@ func TestScheduler(t *testing.T) {
|
||||
r := setup(t)
|
||||
rdbClient := rdb.NewRDB(r)
|
||||
const pollInterval = time.Second
|
||||
s := newScheduler(testLogger, rdbClient, pollInterval, defaultQueueConfig)
|
||||
s := newScheduler(schedulerParams{
|
||||
logger: testLogger,
|
||||
broker: rdbClient,
|
||||
interval: pollInterval,
|
||||
queues: defaultQueueConfig,
|
||||
})
|
||||
t1 := h.NewTaskMessage("gen_thumbnail", nil)
|
||||
t2 := h.NewTaskMessage("send_email", nil)
|
||||
t3 := h.NewTaskMessage("reindex", nil)
|
||||
|
29
server.go
29
server.go
@ -293,11 +293,30 @@ func NewServer(r RedisConnOpt, cfg Config) *Server {
|
||||
ss := base.NewServerState(host, pid, n, queues, cfg.StrictPriority)
|
||||
syncCh := make(chan *syncRequest)
|
||||
cancels := base.NewCancelations()
|
||||
syncer := newSyncer(logger, syncCh, 5*time.Second)
|
||||
heartbeater := newHeartbeater(logger, rdb, ss, 5*time.Second)
|
||||
scheduler := newScheduler(logger, rdb, 5*time.Second, queues)
|
||||
subscriber := newSubscriber(logger, rdb, cancels)
|
||||
processor := newProcessor(newProcessorParams{
|
||||
|
||||
syncer := newSyncer(syncerParams{
|
||||
logger: logger,
|
||||
requestsCh: syncCh,
|
||||
interval: 5 * time.Second,
|
||||
})
|
||||
heartbeater := newHeartbeater(heartbeaterParams{
|
||||
logger: logger,
|
||||
broker: rdb,
|
||||
serverState: ss,
|
||||
interval: 5 * time.Second,
|
||||
})
|
||||
scheduler := newScheduler(schedulerParams{
|
||||
logger: logger,
|
||||
broker: rdb,
|
||||
interval: 5 * time.Second,
|
||||
queues: queues,
|
||||
})
|
||||
subscriber := newSubscriber(subscriberParams{
|
||||
logger: logger,
|
||||
broker: rdb,
|
||||
cancelations: cancels,
|
||||
})
|
||||
processor := newProcessor(processorParams{
|
||||
logger: logger,
|
||||
broker: rdb,
|
||||
ss: ss,
|
||||
|
@ -27,12 +27,18 @@ type subscriber struct {
|
||||
retryTimeout time.Duration
|
||||
}
|
||||
|
||||
func newSubscriber(l *log.Logger, b base.Broker, cancelations *base.Cancelations) *subscriber {
|
||||
type subscriberParams struct {
|
||||
logger *log.Logger
|
||||
broker base.Broker
|
||||
cancelations *base.Cancelations
|
||||
}
|
||||
|
||||
func newSubscriber(params subscriberParams) *subscriber {
|
||||
return &subscriber{
|
||||
logger: l,
|
||||
broker: b,
|
||||
logger: params.logger,
|
||||
broker: params.broker,
|
||||
done: make(chan struct{}),
|
||||
cancelations: cancelations,
|
||||
cancelations: params.cancelations,
|
||||
retryTimeout: 5 * time.Second,
|
||||
}
|
||||
}
|
||||
|
@ -38,7 +38,11 @@ func TestSubscriber(t *testing.T) {
|
||||
cancelations := base.NewCancelations()
|
||||
cancelations.Add(tc.registeredID, fakeCancelFunc)
|
||||
|
||||
subscriber := newSubscriber(testLogger, rdbClient, cancelations)
|
||||
subscriber := newSubscriber(subscriberParams{
|
||||
logger: testLogger,
|
||||
broker: rdbClient,
|
||||
cancelations: cancelations,
|
||||
})
|
||||
var wg sync.WaitGroup
|
||||
subscriber.start(&wg)
|
||||
defer subscriber.terminate()
|
||||
@ -75,7 +79,11 @@ func TestSubscriberWithRedisDown(t *testing.T) {
|
||||
testBroker := testbroker.NewTestBroker(r)
|
||||
|
||||
cancelations := base.NewCancelations()
|
||||
subscriber := newSubscriber(testLogger, testBroker, cancelations)
|
||||
subscriber := newSubscriber(subscriberParams{
|
||||
logger: testLogger,
|
||||
broker: testBroker,
|
||||
cancelations: cancelations,
|
||||
})
|
||||
subscriber.retryTimeout = 1 * time.Second // set shorter retry timeout for testing purpose.
|
||||
|
||||
testBroker.Sleep() // simulate a situation where subscriber cannot connect to redis.
|
||||
|
14
syncer.go
14
syncer.go
@ -30,12 +30,18 @@ type syncRequest struct {
|
||||
errMsg string // error message
|
||||
}
|
||||
|
||||
func newSyncer(l *log.Logger, requestsCh <-chan *syncRequest, interval time.Duration) *syncer {
|
||||
type syncerParams struct {
|
||||
logger *log.Logger
|
||||
requestsCh <-chan *syncRequest
|
||||
interval time.Duration
|
||||
}
|
||||
|
||||
func newSyncer(params syncerParams) *syncer {
|
||||
return &syncer{
|
||||
logger: l,
|
||||
requestsCh: requestsCh,
|
||||
logger: params.logger,
|
||||
requestsCh: params.requestsCh,
|
||||
done: make(chan struct{}),
|
||||
interval: interval,
|
||||
interval: params.interval,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -27,7 +27,11 @@ func TestSyncer(t *testing.T) {
|
||||
|
||||
const interval = time.Second
|
||||
syncRequestCh := make(chan *syncRequest)
|
||||
syncer := newSyncer(testLogger, syncRequestCh, interval)
|
||||
syncer := newSyncer(syncerParams{
|
||||
logger: testLogger,
|
||||
requestsCh: syncRequestCh,
|
||||
interval: interval,
|
||||
})
|
||||
var wg sync.WaitGroup
|
||||
syncer.start(&wg)
|
||||
defer syncer.terminate()
|
||||
@ -52,7 +56,11 @@ func TestSyncer(t *testing.T) {
|
||||
func TestSyncerRetry(t *testing.T) {
|
||||
const interval = time.Second
|
||||
syncRequestCh := make(chan *syncRequest)
|
||||
syncer := newSyncer(testLogger, syncRequestCh, interval)
|
||||
syncer := newSyncer(syncerParams{
|
||||
logger: testLogger,
|
||||
requestsCh: syncRequestCh,
|
||||
interval: interval,
|
||||
})
|
||||
|
||||
var wg sync.WaitGroup
|
||||
syncer.start(&wg)
|
||||
|
Loading…
Reference in New Issue
Block a user