2
0
mirror of https://github.com/hibiken/asynq.git synced 2024-11-10 11:31:58 +08:00

Define GroupAggregator interface

This commit is contained in:
Ken Hibino 2022-04-07 06:13:49 -07:00
parent a369443955
commit 829f64fd38
3 changed files with 55 additions and 39 deletions

View File

@ -31,8 +31,8 @@ type aggregator struct {
maxDelay time.Duration maxDelay time.Duration
maxSize int maxSize int
// Aggregation function // User provided group aggregator.
aggregateFunc func(gname string, tasks []*Task) *Task ga GroupAggregator
// interval used to check for aggregation // interval used to check for aggregation
interval time.Duration interval time.Duration
@ -43,13 +43,13 @@ type aggregator struct {
} }
type aggregatorParams struct { type aggregatorParams struct {
logger *log.Logger logger *log.Logger
broker base.Broker broker base.Broker
queues []string queues []string
gracePeriod time.Duration gracePeriod time.Duration
maxDelay time.Duration maxDelay time.Duration
maxSize int maxSize int
aggregateFunc func(gname string, msgs []*Task) *Task groupAggregator GroupAggregator
} }
const ( const (
@ -67,22 +67,22 @@ func newAggregator(params aggregatorParams) *aggregator {
interval = params.gracePeriod interval = params.gracePeriod
} }
return &aggregator{ return &aggregator{
logger: params.logger, logger: params.logger,
broker: params.broker, broker: params.broker,
client: &Client{broker: params.broker}, client: &Client{broker: params.broker},
done: make(chan struct{}), done: make(chan struct{}),
queues: params.queues, queues: params.queues,
gracePeriod: params.gracePeriod, gracePeriod: params.gracePeriod,
maxDelay: params.maxDelay, maxDelay: params.maxDelay,
maxSize: params.maxSize, maxSize: params.maxSize,
aggregateFunc: params.aggregateFunc, ga: params.groupAggregator,
sema: make(chan struct{}, maxConcurrentAggregationChecks), sema: make(chan struct{}, maxConcurrentAggregationChecks),
interval: interval, interval: interval,
} }
} }
func (a *aggregator) shutdown() { func (a *aggregator) shutdown() {
if a.aggregateFunc == nil { if a.ga == nil {
return return
} }
a.logger.Debug("Aggregator shutting down...") a.logger.Debug("Aggregator shutting down...")
@ -91,7 +91,7 @@ func (a *aggregator) shutdown() {
} }
func (a *aggregator) start(wg *sync.WaitGroup) { func (a *aggregator) start(wg *sync.WaitGroup) {
if a.aggregateFunc == nil { if a.ga == nil {
return return
} }
wg.Add(1) wg.Add(1)
@ -158,7 +158,7 @@ func (a *aggregator) aggregate(t time.Time) {
for i, m := range msgs { for i, m := range msgs {
tasks[i] = NewTask(m.Type, m.Payload) tasks[i] = NewTask(m.Type, m.Payload)
} }
aggregatedTask := a.aggregateFunc(gname, tasks) aggregatedTask := a.ga.Aggregate(gname, tasks)
ctx, cancel := context.WithDeadline(context.Background(), deadline) ctx, cancel := context.WithDeadline(context.Background(), deadline)
if _, err := a.client.EnqueueContext(ctx, aggregatedTask); err != nil { if _, err := a.client.EnqueueContext(ctx, aggregatedTask); err != nil {
a.logger.Errorf("Failed to enqueue aggregated task (queue=%q, group=%q, setID=%q): %v", a.logger.Errorf("Failed to enqueue aggregated task (queue=%q, group=%q, setID=%q): %v",

View File

@ -120,13 +120,13 @@ func TestAggregator(t *testing.T) {
h.FlushDB(t, r) h.FlushDB(t, r)
aggregator := newAggregator(aggregatorParams{ aggregator := newAggregator(aggregatorParams{
logger: testLogger, logger: testLogger,
broker: rdbClient, broker: rdbClient,
queues: []string{"default"}, queues: []string{"default"},
gracePeriod: tc.gracePeriod, gracePeriod: tc.gracePeriod,
maxDelay: tc.maxDelay, maxDelay: tc.maxDelay,
maxSize: tc.maxSize, maxSize: tc.maxSize,
aggregateFunc: tc.aggregateFunc, groupAggregator: GroupAggregatorFunc(tc.aggregateFunc),
}) })
var wg sync.WaitGroup var wg sync.WaitGroup

View File

@ -216,10 +216,26 @@ type Config struct {
// If unset or zero, no size limit is used. // If unset or zero, no size limit is used.
GroupMaxSize int GroupMaxSize int
// GroupAggregateFunc specifies the aggregation function used to aggregate multiple tasks in a group into one task. // GroupAggregator specifies the aggregation function used to aggregate multiple tasks in a group into one task.
// //
// If unset or nil, the group aggregation feature will be disabled on the server. // If unset or nil, the group aggregation feature will be disabled on the server.
GroupAggregateFunc func(groupKey string, tasks []*Task) *Task GroupAggregator GroupAggregator
}
// GroupAggregator aggregates a group of tasks into one before the tasks are passed to the Handler.
type GroupAggregator interface {
// Aggregate aggregates the given tasks which belong to a same group
// and returns a new task which is the aggregation of those tasks.
Aggregate(groupKey string, tasks []*Task) *Task
}
// The GroupAggregatorFunc type is an adapter to allow the use of ordinary functions as a GroupAggregator.
// If f is a function with the appropriate signature, GroupAggregatorFunc(f) is a GroupAggregator that calls f.
type GroupAggregatorFunc func(groupKey string, tasks []*Task) *Task
// Aggregate calls fn(groupKey, tasks)
func (fn GroupAggregatorFunc) Aggregate(groupKey string, tasks []*Task) *Task {
return fn(groupKey, tasks)
} }
// An ErrorHandler handles an error occured during task processing. // An ErrorHandler handles an error occured during task processing.
@ -506,13 +522,13 @@ func NewServer(r RedisConnOpt, cfg Config) *Server {
interval: 8 * time.Second, interval: 8 * time.Second,
}) })
aggregator := newAggregator(aggregatorParams{ aggregator := newAggregator(aggregatorParams{
logger: logger, logger: logger,
broker: rdb, broker: rdb,
queues: qnames, queues: qnames,
gracePeriod: groupGracePeriod, gracePeriod: groupGracePeriod,
maxDelay: cfg.GroupMaxDelay, maxDelay: cfg.GroupMaxDelay,
maxSize: cfg.GroupMaxSize, maxSize: cfg.GroupMaxSize,
aggregateFunc: cfg.GroupAggregateFunc, groupAggregator: cfg.GroupAggregator,
}) })
return &Server{ return &Server{
logger: logger, logger: logger,