mirror of
https://github.com/hibiken/asynqmon.git
synced 2025-01-18 10:53:31 +08:00
unexport types
This commit is contained in:
parent
4b54ec1548
commit
d0b72f135c
@ -72,7 +72,7 @@ import (
|
||||
var staticContents embed.FS
|
||||
|
||||
func main() {
|
||||
api := asynqmon.NewHTTPHandler(asynqmon.Options{
|
||||
h := asynqmon.New(asynqmon.Options{
|
||||
RedisConnOpt: asynq.RedisClientOpt{Addr: ":6379"},
|
||||
StaticContentHandler: asynqmon.NewStaticContentHandler(
|
||||
staticContents,
|
||||
@ -80,10 +80,10 @@ func main() {
|
||||
"index.html",
|
||||
),
|
||||
})
|
||||
defer api.Close()
|
||||
defer h.Close()
|
||||
|
||||
srv := &http.Server{
|
||||
Handler: api,
|
||||
Handler: h,
|
||||
Addr: ":8080",
|
||||
}
|
||||
|
||||
|
@ -105,7 +105,7 @@ func main() {
|
||||
}
|
||||
}
|
||||
|
||||
api := asynqmon.NewHTTPHandler(asynqmon.Options{
|
||||
h := asynqmon.New(asynqmon.Options{
|
||||
RedisConnOpt: redisConnOpt,
|
||||
Middlewares: []asynqmon.MiddlewareFunc{loggingMiddleware},
|
||||
StaticContentHandler: asynqmon.NewStaticContentHandler(
|
||||
@ -114,14 +114,14 @@ func main() {
|
||||
"index.html",
|
||||
),
|
||||
})
|
||||
defer api.Close()
|
||||
defer h.Close()
|
||||
|
||||
c := cors.New(cors.Options{
|
||||
AllowedMethods: []string{"GET", "POST", "DELETE"},
|
||||
})
|
||||
|
||||
srv := &http.Server{
|
||||
Handler: c.Handler(api),
|
||||
Handler: c.Handler(h),
|
||||
Addr: fmt.Sprintf(":%d", flagPort),
|
||||
WriteTimeout: 10 * time.Second,
|
||||
ReadTimeout: 10 * time.Second,
|
||||
|
@ -19,8 +19,10 @@ type PayloadFormatter interface {
|
||||
FormatPayload(taskType string, payload []byte) string
|
||||
}
|
||||
|
||||
// PayloadFormatterFunc can be used to create a PayloadFormatter.
|
||||
type PayloadFormatterFunc func(string, []byte) string
|
||||
|
||||
// FormatPayload returns the string representation of the payload of a type.
|
||||
func (f PayloadFormatterFunc) FormatPayload(taskType string, payload []byte) string {
|
||||
return f(taskType, payload)
|
||||
}
|
||||
@ -49,7 +51,7 @@ func isPrintable(data []byte) bool {
|
||||
return !isAllSpace
|
||||
}
|
||||
|
||||
type QueueStateSnapshot struct {
|
||||
type queueStateSnapshot struct {
|
||||
// Name of the queue.
|
||||
Queue string `json:"queue"`
|
||||
// Total number of bytes the queue and its tasks require to be stored in redis.
|
||||
@ -76,8 +78,8 @@ type QueueStateSnapshot struct {
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
}
|
||||
|
||||
func toQueueStateSnapshot(s *asynq.QueueInfo) *QueueStateSnapshot {
|
||||
return &QueueStateSnapshot{
|
||||
func toQueueStateSnapshot(s *asynq.QueueInfo) *queueStateSnapshot {
|
||||
return &queueStateSnapshot{
|
||||
Queue: s.Queue,
|
||||
MemoryUsage: s.MemoryUsage,
|
||||
Size: s.Size,
|
||||
@ -94,7 +96,7 @@ func toQueueStateSnapshot(s *asynq.QueueInfo) *QueueStateSnapshot {
|
||||
}
|
||||
}
|
||||
|
||||
type DailyStats struct {
|
||||
type dailyStats struct {
|
||||
Queue string `json:"queue"`
|
||||
Processed int `json:"processed"`
|
||||
Succeeded int `json:"succeeded"`
|
||||
@ -102,8 +104,8 @@ type DailyStats struct {
|
||||
Date string `json:"date"`
|
||||
}
|
||||
|
||||
func toDailyStats(s *asynq.DailyStats) *DailyStats {
|
||||
return &DailyStats{
|
||||
func toDailyStats(s *asynq.DailyStats) *dailyStats {
|
||||
return &dailyStats{
|
||||
Queue: s.Queue,
|
||||
Processed: s.Processed,
|
||||
Succeeded: s.Processed - s.Failed,
|
||||
@ -112,15 +114,15 @@ func toDailyStats(s *asynq.DailyStats) *DailyStats {
|
||||
}
|
||||
}
|
||||
|
||||
func toDailyStatsList(in []*asynq.DailyStats) []*DailyStats {
|
||||
out := make([]*DailyStats, len(in))
|
||||
func toDailyStatsList(in []*asynq.DailyStats) []*dailyStats {
|
||||
out := make([]*dailyStats, len(in))
|
||||
for i, s := range in {
|
||||
out[i] = toDailyStats(s)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
type TaskInfo struct {
|
||||
type taskInfo struct {
|
||||
// ID is the identifier of the task.
|
||||
ID string `json:"id"`
|
||||
// Queue is the name of the queue in which the task belongs.
|
||||
@ -158,8 +160,8 @@ func formatTimeInRFC3339(t time.Time) string {
|
||||
return t.Format(time.RFC3339)
|
||||
}
|
||||
|
||||
func toTaskInfo(info *asynq.TaskInfo, pf PayloadFormatter) *TaskInfo {
|
||||
return &TaskInfo{
|
||||
func toTaskInfo(info *asynq.TaskInfo, pf PayloadFormatter) *taskInfo {
|
||||
return &taskInfo{
|
||||
ID: info.ID,
|
||||
Queue: info.Queue,
|
||||
Type: info.Type,
|
||||
@ -175,7 +177,7 @@ func toTaskInfo(info *asynq.TaskInfo, pf PayloadFormatter) *TaskInfo {
|
||||
}
|
||||
}
|
||||
|
||||
type BaseTask struct {
|
||||
type baseTask struct {
|
||||
ID string `json:"id"`
|
||||
Type string `json:"type"`
|
||||
Payload string `json:"payload"`
|
||||
@ -185,8 +187,8 @@ type BaseTask struct {
|
||||
LastError string `json:"error_message"`
|
||||
}
|
||||
|
||||
type ActiveTask struct {
|
||||
*BaseTask
|
||||
type activeTask struct {
|
||||
*baseTask
|
||||
|
||||
// Started time indicates when a worker started working on ths task.
|
||||
//
|
||||
@ -202,8 +204,8 @@ type ActiveTask struct {
|
||||
Deadline string `json:"deadline"`
|
||||
}
|
||||
|
||||
func toActiveTask(ti *asynq.TaskInfo, pf PayloadFormatter) *ActiveTask {
|
||||
base := &BaseTask{
|
||||
func toActiveTask(ti *asynq.TaskInfo, pf PayloadFormatter) *activeTask {
|
||||
base := &baseTask{
|
||||
ID: ti.ID,
|
||||
Type: ti.Type,
|
||||
Payload: pf.FormatPayload(ti.Type, ti.Payload),
|
||||
@ -212,24 +214,24 @@ func toActiveTask(ti *asynq.TaskInfo, pf PayloadFormatter) *ActiveTask {
|
||||
Retried: ti.Retried,
|
||||
LastError: ti.LastErr,
|
||||
}
|
||||
return &ActiveTask{BaseTask: base}
|
||||
return &activeTask{baseTask: base}
|
||||
}
|
||||
|
||||
func toActiveTasks(in []*asynq.TaskInfo, pf PayloadFormatter) []*ActiveTask {
|
||||
out := make([]*ActiveTask, len(in))
|
||||
func toActiveTasks(in []*asynq.TaskInfo, pf PayloadFormatter) []*activeTask {
|
||||
out := make([]*activeTask, len(in))
|
||||
for i, ti := range in {
|
||||
out[i] = toActiveTask(ti, pf)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// TODO: Maybe we don't need state specific type, just use TaskInfo
|
||||
type PendingTask struct {
|
||||
*BaseTask
|
||||
// TODO: Maybe we don't need state specific type, just use taskInfo
|
||||
type pendingTask struct {
|
||||
*baseTask
|
||||
}
|
||||
|
||||
func toPendingTask(ti *asynq.TaskInfo, pf PayloadFormatter) *PendingTask {
|
||||
base := &BaseTask{
|
||||
func toPendingTask(ti *asynq.TaskInfo, pf PayloadFormatter) *pendingTask {
|
||||
base := &baseTask{
|
||||
ID: ti.ID,
|
||||
Type: ti.Type,
|
||||
Payload: pf.FormatPayload(ti.Type, ti.Payload),
|
||||
@ -238,26 +240,26 @@ func toPendingTask(ti *asynq.TaskInfo, pf PayloadFormatter) *PendingTask {
|
||||
Retried: ti.Retried,
|
||||
LastError: ti.LastErr,
|
||||
}
|
||||
return &PendingTask{
|
||||
BaseTask: base,
|
||||
return &pendingTask{
|
||||
baseTask: base,
|
||||
}
|
||||
}
|
||||
|
||||
func toPendingTasks(in []*asynq.TaskInfo, pf PayloadFormatter) []*PendingTask {
|
||||
out := make([]*PendingTask, len(in))
|
||||
func toPendingTasks(in []*asynq.TaskInfo, pf PayloadFormatter) []*pendingTask {
|
||||
out := make([]*pendingTask, len(in))
|
||||
for i, ti := range in {
|
||||
out[i] = toPendingTask(ti, pf)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
type ScheduledTask struct {
|
||||
*BaseTask
|
||||
type scheduledTask struct {
|
||||
*baseTask
|
||||
NextProcessAt time.Time `json:"next_process_at"`
|
||||
}
|
||||
|
||||
func toScheduledTask(ti *asynq.TaskInfo, pf PayloadFormatter) *ScheduledTask {
|
||||
base := &BaseTask{
|
||||
func toScheduledTask(ti *asynq.TaskInfo, pf PayloadFormatter) *scheduledTask {
|
||||
base := &baseTask{
|
||||
ID: ti.ID,
|
||||
Type: ti.Type,
|
||||
Payload: pf.FormatPayload(ti.Type, ti.Payload),
|
||||
@ -266,27 +268,27 @@ func toScheduledTask(ti *asynq.TaskInfo, pf PayloadFormatter) *ScheduledTask {
|
||||
Retried: ti.Retried,
|
||||
LastError: ti.LastErr,
|
||||
}
|
||||
return &ScheduledTask{
|
||||
BaseTask: base,
|
||||
return &scheduledTask{
|
||||
baseTask: base,
|
||||
NextProcessAt: ti.NextProcessAt,
|
||||
}
|
||||
}
|
||||
|
||||
func toScheduledTasks(in []*asynq.TaskInfo, pf PayloadFormatter) []*ScheduledTask {
|
||||
out := make([]*ScheduledTask, len(in))
|
||||
func toScheduledTasks(in []*asynq.TaskInfo, pf PayloadFormatter) []*scheduledTask {
|
||||
out := make([]*scheduledTask, len(in))
|
||||
for i, ti := range in {
|
||||
out[i] = toScheduledTask(ti, pf)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
type RetryTask struct {
|
||||
*BaseTask
|
||||
type retryTask struct {
|
||||
*baseTask
|
||||
NextProcessAt time.Time `json:"next_process_at"`
|
||||
}
|
||||
|
||||
func toRetryTask(ti *asynq.TaskInfo, pf PayloadFormatter) *RetryTask {
|
||||
base := &BaseTask{
|
||||
func toRetryTask(ti *asynq.TaskInfo, pf PayloadFormatter) *retryTask {
|
||||
base := &baseTask{
|
||||
ID: ti.ID,
|
||||
Type: ti.Type,
|
||||
Payload: pf.FormatPayload(ti.Type, ti.Payload),
|
||||
@ -295,27 +297,27 @@ func toRetryTask(ti *asynq.TaskInfo, pf PayloadFormatter) *RetryTask {
|
||||
Retried: ti.Retried,
|
||||
LastError: ti.LastErr,
|
||||
}
|
||||
return &RetryTask{
|
||||
BaseTask: base,
|
||||
return &retryTask{
|
||||
baseTask: base,
|
||||
NextProcessAt: ti.NextProcessAt,
|
||||
}
|
||||
}
|
||||
|
||||
func toRetryTasks(in []*asynq.TaskInfo, pf PayloadFormatter) []*RetryTask {
|
||||
out := make([]*RetryTask, len(in))
|
||||
func toRetryTasks(in []*asynq.TaskInfo, pf PayloadFormatter) []*retryTask {
|
||||
out := make([]*retryTask, len(in))
|
||||
for i, ti := range in {
|
||||
out[i] = toRetryTask(ti, pf)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
type ArchivedTask struct {
|
||||
*BaseTask
|
||||
type archivedTask struct {
|
||||
*baseTask
|
||||
LastFailedAt time.Time `json:"last_failed_at"`
|
||||
}
|
||||
|
||||
func toArchivedTask(ti *asynq.TaskInfo, pf PayloadFormatter) *ArchivedTask {
|
||||
base := &BaseTask{
|
||||
func toArchivedTask(ti *asynq.TaskInfo, pf PayloadFormatter) *archivedTask {
|
||||
base := &baseTask{
|
||||
ID: ti.ID,
|
||||
Type: ti.Type,
|
||||
Payload: pf.FormatPayload(ti.Type, ti.Payload),
|
||||
@ -324,21 +326,21 @@ func toArchivedTask(ti *asynq.TaskInfo, pf PayloadFormatter) *ArchivedTask {
|
||||
Retried: ti.Retried,
|
||||
LastError: ti.LastErr,
|
||||
}
|
||||
return &ArchivedTask{
|
||||
BaseTask: base,
|
||||
return &archivedTask{
|
||||
baseTask: base,
|
||||
LastFailedAt: ti.LastFailedAt,
|
||||
}
|
||||
}
|
||||
|
||||
func toArchivedTasks(in []*asynq.TaskInfo, pf PayloadFormatter) []*ArchivedTask {
|
||||
out := make([]*ArchivedTask, len(in))
|
||||
func toArchivedTasks(in []*asynq.TaskInfo, pf PayloadFormatter) []*archivedTask {
|
||||
out := make([]*archivedTask, len(in))
|
||||
for i, ti := range in {
|
||||
out[i] = toArchivedTask(ti, pf)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
type SchedulerEntry struct {
|
||||
type schedulerEntry struct {
|
||||
ID string `json:"id"`
|
||||
Spec string `json:"spec"`
|
||||
TaskType string `json:"task_type"`
|
||||
@ -349,7 +351,7 @@ type SchedulerEntry struct {
|
||||
PrevEnqueueAt string `json:"prev_enqueue_at,omitempty"`
|
||||
}
|
||||
|
||||
func toSchedulerEntry(e *asynq.SchedulerEntry, pf PayloadFormatter) *SchedulerEntry {
|
||||
func toSchedulerEntry(e *asynq.SchedulerEntry, pf PayloadFormatter) *schedulerEntry {
|
||||
opts := make([]string, 0) // create a non-nil, empty slice to avoid null in json output
|
||||
for _, o := range e.Opts {
|
||||
opts = append(opts, o.String())
|
||||
@ -358,7 +360,7 @@ func toSchedulerEntry(e *asynq.SchedulerEntry, pf PayloadFormatter) *SchedulerEn
|
||||
if !e.Prev.IsZero() {
|
||||
prev = e.Prev.Format(time.RFC3339)
|
||||
}
|
||||
return &SchedulerEntry{
|
||||
return &schedulerEntry{
|
||||
ID: e.ID,
|
||||
Spec: e.Spec,
|
||||
TaskType: e.Task.Type(),
|
||||
@ -369,35 +371,35 @@ func toSchedulerEntry(e *asynq.SchedulerEntry, pf PayloadFormatter) *SchedulerEn
|
||||
}
|
||||
}
|
||||
|
||||
func toSchedulerEntries(in []*asynq.SchedulerEntry, pf PayloadFormatter) []*SchedulerEntry {
|
||||
out := make([]*SchedulerEntry, len(in))
|
||||
func toSchedulerEntries(in []*asynq.SchedulerEntry, pf PayloadFormatter) []*schedulerEntry {
|
||||
out := make([]*schedulerEntry, len(in))
|
||||
for i, e := range in {
|
||||
out[i] = toSchedulerEntry(e, pf)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
type SchedulerEnqueueEvent struct {
|
||||
type schedulerEnqueueEvent struct {
|
||||
TaskID string `json:"task_id"`
|
||||
EnqueuedAt string `json:"enqueued_at"`
|
||||
}
|
||||
|
||||
func toSchedulerEnqueueEvent(e *asynq.SchedulerEnqueueEvent) *SchedulerEnqueueEvent {
|
||||
return &SchedulerEnqueueEvent{
|
||||
func toSchedulerEnqueueEvent(e *asynq.SchedulerEnqueueEvent) *schedulerEnqueueEvent {
|
||||
return &schedulerEnqueueEvent{
|
||||
TaskID: e.TaskID,
|
||||
EnqueuedAt: e.EnqueuedAt.Format(time.RFC3339),
|
||||
}
|
||||
}
|
||||
|
||||
func toSchedulerEnqueueEvents(in []*asynq.SchedulerEnqueueEvent) []*SchedulerEnqueueEvent {
|
||||
out := make([]*SchedulerEnqueueEvent, len(in))
|
||||
func toSchedulerEnqueueEvents(in []*asynq.SchedulerEnqueueEvent) []*schedulerEnqueueEvent {
|
||||
out := make([]*schedulerEnqueueEvent, len(in))
|
||||
for i, e := range in {
|
||||
out[i] = toSchedulerEnqueueEvent(e)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
type ServerInfo struct {
|
||||
type serverInfo struct {
|
||||
ID string `json:"id"`
|
||||
Host string `json:"host"`
|
||||
PID int `json:"pid"`
|
||||
@ -406,11 +408,11 @@ type ServerInfo struct {
|
||||
StrictPriority bool `json:"strict_priority_enabled"`
|
||||
Started string `json:"start_time"`
|
||||
Status string `json:"status"`
|
||||
ActiveWorkers []*WorkerInfo `json:"active_workers"`
|
||||
ActiveWorkers []*workerInfo `json:"active_workers"`
|
||||
}
|
||||
|
||||
func toServerInfo(info *asynq.ServerInfo, pf PayloadFormatter) *ServerInfo {
|
||||
return &ServerInfo{
|
||||
func toServerInfo(info *asynq.ServerInfo, pf PayloadFormatter) *serverInfo {
|
||||
return &serverInfo{
|
||||
ID: info.ID,
|
||||
Host: info.Host,
|
||||
PID: info.PID,
|
||||
@ -423,15 +425,15 @@ func toServerInfo(info *asynq.ServerInfo, pf PayloadFormatter) *ServerInfo {
|
||||
}
|
||||
}
|
||||
|
||||
func toServerInfoList(in []*asynq.ServerInfo, pf PayloadFormatter) []*ServerInfo {
|
||||
out := make([]*ServerInfo, len(in))
|
||||
func toServerInfoList(in []*asynq.ServerInfo, pf PayloadFormatter) []*serverInfo {
|
||||
out := make([]*serverInfo, len(in))
|
||||
for i, s := range in {
|
||||
out[i] = toServerInfo(s, pf)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
type WorkerInfo struct {
|
||||
type workerInfo struct {
|
||||
TaskID string `json:"task_id"`
|
||||
Queue string `json:"queue"`
|
||||
TaskType string `json:"task_type"`
|
||||
@ -439,8 +441,8 @@ type WorkerInfo struct {
|
||||
Started string `json:"start_time"`
|
||||
}
|
||||
|
||||
func toWorkerInfo(info *asynq.WorkerInfo, pf PayloadFormatter) *WorkerInfo {
|
||||
return &WorkerInfo{
|
||||
func toWorkerInfo(info *asynq.WorkerInfo, pf PayloadFormatter) *workerInfo {
|
||||
return &workerInfo{
|
||||
TaskID: info.TaskID,
|
||||
Queue: info.Queue,
|
||||
TaskType: info.TaskType,
|
||||
@ -449,8 +451,8 @@ func toWorkerInfo(info *asynq.WorkerInfo, pf PayloadFormatter) *WorkerInfo {
|
||||
}
|
||||
}
|
||||
|
||||
func toWorkerInfoList(in []*asynq.WorkerInfo, pf PayloadFormatter) []*WorkerInfo {
|
||||
out := make([]*WorkerInfo, len(in))
|
||||
func toWorkerInfoList(in []*asynq.WorkerInfo, pf PayloadFormatter) []*workerInfo {
|
||||
out := make([]*workerInfo, len(in))
|
||||
for i, w := range in {
|
||||
out[i] = toWorkerInfo(w, pf)
|
||||
}
|
||||
|
@ -12,8 +12,8 @@ import (
|
||||
//go:embed ui-assets/*
|
||||
var staticContents embed.FS
|
||||
|
||||
func ExampleNewHTTPHandler() {
|
||||
api := asynqmon.NewHTTPHandler(asynqmon.Options{
|
||||
func ExampleNew() {
|
||||
h := asynqmon.New(asynqmon.Options{
|
||||
RedisConnOpt: asynq.RedisClientOpt{Addr: ":6379"},
|
||||
StaticContentHandler: asynqmon.NewStaticContentHandler(
|
||||
staticContents,
|
||||
@ -21,10 +21,10 @@ func ExampleNewHTTPHandler() {
|
||||
"index.html",
|
||||
),
|
||||
})
|
||||
defer api.Close()
|
||||
defer h.Close()
|
||||
|
||||
srv := &http.Server{
|
||||
Handler: api,
|
||||
Handler: h,
|
||||
Addr: ":8080",
|
||||
}
|
||||
|
||||
|
@ -29,7 +29,7 @@ func (a *HTTPHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
a.router.ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
func NewHTTPHandler(opts Options) *HTTPHandler {
|
||||
func New(opts Options) *HTTPHandler {
|
||||
rc, ok := opts.RedisConnOpt.MakeRedisClient().(redis.UniversalClient)
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("asnyqmon.HTTPHandler: unsupported RedisConnOpt type %T", opts.RedisConnOpt))
|
@ -22,7 +22,7 @@ func newListQueuesHandlerFunc(inspector *asynq.Inspector) http.HandlerFunc {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
snapshots := make([]*QueueStateSnapshot, len(qnames))
|
||||
snapshots := make([]*queueStateSnapshot, len(qnames))
|
||||
for i, qname := range qnames {
|
||||
qinfo, err := inspector.GetQueueInfo(qname)
|
||||
if err != nil {
|
||||
@ -56,7 +56,7 @@ func newGetQueueHandlerFunc(inspector *asynq.Inspector) http.HandlerFunc {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
var dailyStats []*DailyStats
|
||||
var dailyStats []*dailyStats
|
||||
for _, s := range data {
|
||||
dailyStats = append(dailyStats, toDailyStats(s))
|
||||
}
|
||||
@ -109,8 +109,8 @@ func newResumeQueueHandlerFunc(inspector *asynq.Inspector) http.HandlerFunc {
|
||||
}
|
||||
}
|
||||
|
||||
type ListQueueStatsResponse struct {
|
||||
Stats map[string][]*DailyStats `json:"stats"`
|
||||
type listQueueStatsResponse struct {
|
||||
Stats map[string][]*dailyStats `json:"stats"`
|
||||
}
|
||||
|
||||
func newListQueueStatsHandlerFunc(inspector *asynq.Inspector) http.HandlerFunc {
|
||||
@ -120,7 +120,7 @@ func newListQueueStatsHandlerFunc(inspector *asynq.Inspector) http.HandlerFunc {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
resp := ListQueueStatsResponse{Stats: make(map[string][]*DailyStats)}
|
||||
resp := listQueueStatsResponse{Stats: make(map[string][]*dailyStats)}
|
||||
const numdays = 90 // Get stats for the last 90 days.
|
||||
for _, qname := range qnames {
|
||||
stats, err := inspector.History(qname, numdays)
|
||||
|
@ -16,7 +16,7 @@ import (
|
||||
// - http.Handler(s) for redis info related endpoints
|
||||
// ****************************************************************************
|
||||
|
||||
type RedisInfoResponse struct {
|
||||
type redisInfoResponse struct {
|
||||
Addr string `json:"address"`
|
||||
Info map[string]string `json:"info"`
|
||||
RawInfo string `json:"raw_info"`
|
||||
@ -24,10 +24,10 @@ type RedisInfoResponse struct {
|
||||
|
||||
// Following fields are only set when connected to redis cluster.
|
||||
RawClusterNodes string `json:"raw_cluster_nodes"`
|
||||
QueueLocations []*QueueLocationInfo `json:"queue_locations"`
|
||||
QueueLocations []*queueLocationInfo `json:"queue_locations"`
|
||||
}
|
||||
|
||||
type QueueLocationInfo struct {
|
||||
type queueLocationInfo struct {
|
||||
Queue string `json:"queue"` // queue name
|
||||
KeySlot int64 `json:"keyslot"` // cluster key slot for the queue
|
||||
Nodes []string `json:"nodes"` // list of cluster node addresses
|
||||
@ -41,7 +41,7 @@ func newRedisInfoHandlerFunc(client *redis.Client) http.HandlerFunc {
|
||||
return
|
||||
}
|
||||
info := parseRedisInfo(res)
|
||||
resp := RedisInfoResponse{
|
||||
resp := redisInfoResponse{
|
||||
Addr: client.Options().Addr,
|
||||
Info: info,
|
||||
RawInfo: res,
|
||||
@ -73,9 +73,9 @@ func newRedisClusterInfoHandlerFunc(client *redis.ClusterClient, inspector *asyn
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
var queueLocations []*QueueLocationInfo
|
||||
var queueLocations []*queueLocationInfo
|
||||
for _, qname := range queues {
|
||||
q := QueueLocationInfo{Queue: qname}
|
||||
q := queueLocationInfo{Queue: qname}
|
||||
q.KeySlot, err = inspector.ClusterKeySlot(qname)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
@ -92,7 +92,7 @@ func newRedisClusterInfoHandlerFunc(client *redis.ClusterClient, inspector *asyn
|
||||
queueLocations = append(queueLocations, &q)
|
||||
}
|
||||
|
||||
resp := RedisInfoResponse{
|
||||
resp := redisInfoResponse{
|
||||
Addr: strings.Join(client.Options().Addrs, ","),
|
||||
Info: info,
|
||||
RawInfo: rawClusterInfo,
|
||||
|
@ -24,7 +24,7 @@ func newListSchedulerEntriesHandlerFunc(inspector *asynq.Inspector, pf PayloadFo
|
||||
payload := make(map[string]interface{})
|
||||
if len(entries) == 0 {
|
||||
// avoid nil for the entries field in json output.
|
||||
payload["entries"] = make([]*SchedulerEntry, 0)
|
||||
payload["entries"] = make([]*schedulerEntry, 0)
|
||||
} else {
|
||||
payload["entries"] = toSchedulerEntries(entries, pf)
|
||||
}
|
||||
@ -35,8 +35,8 @@ func newListSchedulerEntriesHandlerFunc(inspector *asynq.Inspector, pf PayloadFo
|
||||
}
|
||||
}
|
||||
|
||||
type ListSchedulerEnqueueEventsResponse struct {
|
||||
Events []*SchedulerEnqueueEvent `json:"events"`
|
||||
type listSchedulerEnqueueEventsResponse struct {
|
||||
Events []*schedulerEnqueueEvent `json:"events"`
|
||||
}
|
||||
|
||||
func newListSchedulerEnqueueEventsHandlerFunc(inspector *asynq.Inspector) http.HandlerFunc {
|
||||
@ -49,7 +49,7 @@ func newListSchedulerEnqueueEventsHandlerFunc(inspector *asynq.Inspector) http.H
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
resp := ListSchedulerEnqueueEventsResponse{
|
||||
resp := listSchedulerEnqueueEventsResponse{
|
||||
Events: toSchedulerEnqueueEvents(events),
|
||||
}
|
||||
if err := json.NewEncoder(w).Encode(resp); err != nil {
|
||||
|
@ -12,8 +12,8 @@ import (
|
||||
// - http.Handler(s) for server related endpoints
|
||||
// ****************************************************************************
|
||||
|
||||
type ListServersResponse struct {
|
||||
Servers []*ServerInfo `json:"servers"`
|
||||
type listServersResponse struct {
|
||||
Servers []*serverInfo `json:"servers"`
|
||||
}
|
||||
|
||||
func newListServersHandlerFunc(inspector *asynq.Inspector, pf PayloadFormatter) http.HandlerFunc {
|
||||
@ -23,7 +23,7 @@ func newListServersHandlerFunc(inspector *asynq.Inspector, pf PayloadFormatter)
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
resp := ListServersResponse{
|
||||
resp := listServersResponse{
|
||||
Servers: toServerInfoList(srvs, pf),
|
||||
}
|
||||
if err := json.NewEncoder(w).Encode(resp); err != nil {
|
||||
|
@ -19,9 +19,9 @@ import (
|
||||
// - http.Handler(s) for task related endpoints
|
||||
// ****************************************************************************
|
||||
|
||||
type ListActiveTasksResponse struct {
|
||||
Tasks []*ActiveTask `json:"tasks"`
|
||||
Stats *QueueStateSnapshot `json:"stats"`
|
||||
type listActiveTasksResponse struct {
|
||||
Tasks []*activeTask `json:"tasks"`
|
||||
Stats *queueStateSnapshot `json:"stats"`
|
||||
}
|
||||
|
||||
func newListActiveTasksHandlerFunc(inspector *asynq.Inspector, pf PayloadFormatter) http.HandlerFunc {
|
||||
@ -46,7 +46,7 @@ func newListActiveTasksHandlerFunc(inspector *asynq.Inspector, pf PayloadFormatt
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
// m maps taskID to WorkerInfo.
|
||||
// m maps taskID to workerInfo.
|
||||
m := make(map[string]*asynq.WorkerInfo)
|
||||
for _, srv := range servers {
|
||||
for _, w := range srv.ActiveWorkers {
|
||||
@ -67,7 +67,7 @@ func newListActiveTasksHandlerFunc(inspector *asynq.Inspector, pf PayloadFormatt
|
||||
}
|
||||
}
|
||||
|
||||
resp := ListActiveTasksResponse{
|
||||
resp := listActiveTasksResponse{
|
||||
Tasks: activeTasks,
|
||||
Stats: toQueueStateSnapshot(qinfo),
|
||||
}
|
||||
@ -175,7 +175,7 @@ func newListPendingTasksHandlerFunc(inspector *asynq.Inspector, pf PayloadFormat
|
||||
payload := make(map[string]interface{})
|
||||
if len(tasks) == 0 {
|
||||
// avoid nil for the tasks field in json output.
|
||||
payload["tasks"] = make([]*PendingTask, 0)
|
||||
payload["tasks"] = make([]*pendingTask, 0)
|
||||
} else {
|
||||
payload["tasks"] = toPendingTasks(tasks, pf)
|
||||
}
|
||||
@ -206,7 +206,7 @@ func newListScheduledTasksHandlerFunc(inspector *asynq.Inspector, pf PayloadForm
|
||||
payload := make(map[string]interface{})
|
||||
if len(tasks) == 0 {
|
||||
// avoid nil for the tasks field in json output.
|
||||
payload["tasks"] = make([]*ScheduledTask, 0)
|
||||
payload["tasks"] = make([]*scheduledTask, 0)
|
||||
} else {
|
||||
payload["tasks"] = toScheduledTasks(tasks, pf)
|
||||
}
|
||||
@ -237,7 +237,7 @@ func newListRetryTasksHandlerFunc(inspector *asynq.Inspector, pf PayloadFormatte
|
||||
payload := make(map[string]interface{})
|
||||
if len(tasks) == 0 {
|
||||
// avoid nil for the tasks field in json output.
|
||||
payload["tasks"] = make([]*RetryTask, 0)
|
||||
payload["tasks"] = make([]*retryTask, 0)
|
||||
} else {
|
||||
payload["tasks"] = toRetryTasks(tasks, pf)
|
||||
}
|
||||
@ -268,7 +268,7 @@ func newListArchivedTasksHandlerFunc(inspector *asynq.Inspector, pf PayloadForma
|
||||
payload := make(map[string]interface{})
|
||||
if len(tasks) == 0 {
|
||||
// avoid nil for the tasks field in json output.
|
||||
payload["tasks"] = make([]*ArchivedTask, 0)
|
||||
payload["tasks"] = make([]*archivedTask, 0)
|
||||
} else {
|
||||
payload["tasks"] = toArchivedTasks(tasks, pf)
|
||||
}
|
||||
@ -331,7 +331,7 @@ func newArchiveTaskHandlerFunc(inspector *asynq.Inspector) http.HandlerFunc {
|
||||
}
|
||||
}
|
||||
|
||||
type DeleteAllTasksResponse struct {
|
||||
type deleteAllTasksResponse struct {
|
||||
// Number of tasks deleted.
|
||||
Deleted int `json:"deleted"`
|
||||
}
|
||||
@ -344,7 +344,7 @@ func newDeleteAllPendingTasksHandlerFunc(inspector *asynq.Inspector) http.Handle
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
resp := DeleteAllTasksResponse{n}
|
||||
resp := deleteAllTasksResponse{n}
|
||||
if err := json.NewEncoder(w).Encode(resp); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
@ -360,7 +360,7 @@ func newDeleteAllScheduledTasksHandlerFunc(inspector *asynq.Inspector) http.Hand
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
resp := DeleteAllTasksResponse{n}
|
||||
resp := deleteAllTasksResponse{n}
|
||||
if err := json.NewEncoder(w).Encode(resp); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
@ -376,7 +376,7 @@ func newDeleteAllRetryTasksHandlerFunc(inspector *asynq.Inspector) http.HandlerF
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
resp := DeleteAllTasksResponse{n}
|
||||
resp := deleteAllTasksResponse{n}
|
||||
if err := json.NewEncoder(w).Encode(resp); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
@ -392,7 +392,7 @@ func newDeleteAllArchivedTasksHandlerFunc(inspector *asynq.Inspector) http.Handl
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
resp := DeleteAllTasksResponse{n}
|
||||
resp := deleteAllTasksResponse{n}
|
||||
if err := json.NewEncoder(w).Encode(resp); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
|
Loading…
Reference in New Issue
Block a user