2020-11-24 22:54:00 +08:00
|
|
|
package main
|
|
|
|
|
|
|
|
import (
|
|
|
|
"encoding/json"
|
2020-12-12 22:52:48 +08:00
|
|
|
"log"
|
2020-11-24 22:54:00 +08:00
|
|
|
"net/http"
|
|
|
|
"strconv"
|
2021-01-24 04:06:50 +08:00
|
|
|
"time"
|
2020-11-24 22:54:00 +08:00
|
|
|
|
|
|
|
"github.com/gorilla/mux"
|
2021-01-29 11:47:58 +08:00
|
|
|
"github.com/hibiken/asynq/inspeq"
|
2020-11-24 22:54:00 +08:00
|
|
|
)
|
|
|
|
|
2020-12-02 23:19:06 +08:00
|
|
|
// ****************************************************************************
|
|
|
|
// This file defines:
|
|
|
|
// - http.Handler(s) for task related endpoints
|
|
|
|
// ****************************************************************************
|
|
|
|
|
2021-01-24 04:06:50 +08:00
|
|
|
type ListActiveTasksResponse struct {
|
|
|
|
Tasks []*ActiveTask `json:"tasks"`
|
|
|
|
Stats *QueueStateSnapshot `json:"stats"`
|
|
|
|
}
|
|
|
|
|
2021-01-29 11:47:58 +08:00
|
|
|
func newListActiveTasksHandlerFunc(inspector *inspeq.Inspector) http.HandlerFunc {
|
2020-11-24 22:54:00 +08:00
|
|
|
return func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
vars := mux.Vars(r)
|
|
|
|
qname := vars["qname"]
|
|
|
|
pageSize, pageNum := getPageOptions(r)
|
2021-01-24 04:06:50 +08:00
|
|
|
|
2020-11-24 22:54:00 +08:00
|
|
|
tasks, err := inspector.ListActiveTasks(
|
2021-01-29 11:47:58 +08:00
|
|
|
qname, inspeq.PageSize(pageSize), inspeq.Page(pageNum))
|
2020-11-24 22:54:00 +08:00
|
|
|
if err != nil {
|
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
stats, err := inspector.CurrentStats(qname)
|
|
|
|
if err != nil {
|
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
2021-01-24 04:06:50 +08:00
|
|
|
servers, err := inspector.Servers()
|
|
|
|
if err != nil {
|
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
2021-01-28 08:16:38 +08:00
|
|
|
// m maps taskID to WorkerInfo.
|
2021-01-29 11:47:58 +08:00
|
|
|
m := make(map[string]*inspeq.WorkerInfo)
|
2021-01-24 04:06:50 +08:00
|
|
|
for _, srv := range servers {
|
|
|
|
for _, w := range srv.ActiveWorkers {
|
|
|
|
if w.Task.Queue == qname {
|
2021-01-28 08:16:38 +08:00
|
|
|
m[w.Task.ID] = w
|
2021-01-24 04:06:50 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
activeTasks := toActiveTasks(tasks)
|
|
|
|
for _, t := range activeTasks {
|
2021-01-28 08:16:38 +08:00
|
|
|
workerInfo, ok := m[t.ID]
|
2021-01-24 04:06:50 +08:00
|
|
|
if ok {
|
2021-01-28 08:16:38 +08:00
|
|
|
t.Started = workerInfo.Started.Format(time.RFC3339)
|
|
|
|
t.Deadline = workerInfo.Deadline.Format(time.RFC3339)
|
2021-01-24 04:06:50 +08:00
|
|
|
} else {
|
|
|
|
t.Started = "-"
|
2021-01-28 08:16:38 +08:00
|
|
|
t.Deadline = "-"
|
2021-01-24 04:06:50 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
resp := ListActiveTasksResponse{
|
|
|
|
Tasks: activeTasks,
|
|
|
|
Stats: toQueueStateSnapshot(stats),
|
2020-11-24 22:54:00 +08:00
|
|
|
}
|
2021-01-24 04:06:50 +08:00
|
|
|
if err := json.NewEncoder(w).Encode(resp); err != nil {
|
2020-12-12 22:52:48 +08:00
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
2020-11-24 22:54:00 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-29 11:47:58 +08:00
|
|
|
func newCancelActiveTaskHandlerFunc(inspector *inspeq.Inspector) http.HandlerFunc {
|
2020-12-05 22:47:35 +08:00
|
|
|
return func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
id := mux.Vars(r)["task_id"]
|
|
|
|
if err := inspector.CancelActiveTask(id); err != nil {
|
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
w.WriteHeader(http.StatusNoContent)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-29 11:47:58 +08:00
|
|
|
func newCancelAllActiveTasksHandlerFunc(inspector *inspeq.Inspector) http.HandlerFunc {
|
2020-12-23 22:23:15 +08:00
|
|
|
return func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
const batchSize = 100
|
|
|
|
page := 1
|
|
|
|
qname := mux.Vars(r)["qname"]
|
|
|
|
for {
|
2021-01-29 11:47:58 +08:00
|
|
|
tasks, err := inspector.ListActiveTasks(qname, inspeq.Page(page), inspeq.PageSize(batchSize))
|
2020-12-23 22:23:15 +08:00
|
|
|
if err != nil {
|
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
for _, t := range tasks {
|
|
|
|
if err := inspector.CancelActiveTask(t.ID); err != nil {
|
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if len(tasks) < batchSize {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
page++
|
|
|
|
}
|
|
|
|
w.WriteHeader(http.StatusNoContent)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
type batchCancelTasksRequest struct {
|
|
|
|
TaskIDs []string `json:"task_ids"`
|
|
|
|
}
|
|
|
|
|
|
|
|
type batchCancelTasksResponse struct {
|
|
|
|
CanceledIDs []string `json:"canceled_ids"`
|
|
|
|
ErrorIDs []string `json:"error_ids"`
|
|
|
|
}
|
|
|
|
|
2021-01-29 11:47:58 +08:00
|
|
|
func newBatchCancelActiveTasksHandlerFunc(inspector *inspeq.Inspector) http.HandlerFunc {
|
2020-12-23 22:23:15 +08:00
|
|
|
return func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
r.Body = http.MaxBytesReader(w, r.Body, maxRequestBodySize)
|
|
|
|
dec := json.NewDecoder(r.Body)
|
|
|
|
dec.DisallowUnknownFields()
|
|
|
|
|
|
|
|
var req batchCancelTasksRequest
|
|
|
|
if err := dec.Decode(&req); err != nil {
|
|
|
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
resp := batchCancelTasksResponse{
|
|
|
|
// avoid null in the json response
|
|
|
|
CanceledIDs: make([]string, 0),
|
|
|
|
ErrorIDs: make([]string, 0),
|
|
|
|
}
|
|
|
|
for _, id := range req.TaskIDs {
|
|
|
|
if err := inspector.CancelActiveTask(id); err != nil {
|
|
|
|
log.Printf("error: could not send cancelation signal to task %s", id)
|
|
|
|
resp.ErrorIDs = append(resp.ErrorIDs, id)
|
|
|
|
} else {
|
|
|
|
resp.CanceledIDs = append(resp.CanceledIDs, id)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if err := json.NewEncoder(w).Encode(resp); err != nil {
|
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-29 11:47:58 +08:00
|
|
|
func newListPendingTasksHandlerFunc(inspector *inspeq.Inspector) http.HandlerFunc {
|
2020-11-24 22:54:00 +08:00
|
|
|
return func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
vars := mux.Vars(r)
|
|
|
|
qname := vars["qname"]
|
|
|
|
pageSize, pageNum := getPageOptions(r)
|
|
|
|
tasks, err := inspector.ListPendingTasks(
|
2021-01-29 11:47:58 +08:00
|
|
|
qname, inspeq.PageSize(pageSize), inspeq.Page(pageNum))
|
2020-11-24 22:54:00 +08:00
|
|
|
if err != nil {
|
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
stats, err := inspector.CurrentStats(qname)
|
|
|
|
if err != nil {
|
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
payload := make(map[string]interface{})
|
|
|
|
if len(tasks) == 0 {
|
|
|
|
// avoid nil for the tasks field in json output.
|
2020-12-02 23:19:06 +08:00
|
|
|
payload["tasks"] = make([]*PendingTask, 0)
|
2020-11-24 22:54:00 +08:00
|
|
|
} else {
|
|
|
|
payload["tasks"] = toPendingTasks(tasks)
|
|
|
|
}
|
|
|
|
payload["stats"] = toQueueStateSnapshot(stats)
|
2020-12-12 22:52:48 +08:00
|
|
|
if err := json.NewEncoder(w).Encode(payload); err != nil {
|
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
2020-11-24 22:54:00 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-29 11:47:58 +08:00
|
|
|
func newListScheduledTasksHandlerFunc(inspector *inspeq.Inspector) http.HandlerFunc {
|
2020-11-24 22:54:00 +08:00
|
|
|
return func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
vars := mux.Vars(r)
|
|
|
|
qname := vars["qname"]
|
|
|
|
pageSize, pageNum := getPageOptions(r)
|
|
|
|
tasks, err := inspector.ListScheduledTasks(
|
2021-01-29 11:47:58 +08:00
|
|
|
qname, inspeq.PageSize(pageSize), inspeq.Page(pageNum))
|
2020-11-24 22:54:00 +08:00
|
|
|
if err != nil {
|
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
stats, err := inspector.CurrentStats(qname)
|
|
|
|
if err != nil {
|
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
payload := make(map[string]interface{})
|
|
|
|
if len(tasks) == 0 {
|
|
|
|
// avoid nil for the tasks field in json output.
|
2020-12-02 23:19:06 +08:00
|
|
|
payload["tasks"] = make([]*ScheduledTask, 0)
|
2020-11-24 22:54:00 +08:00
|
|
|
} else {
|
|
|
|
payload["tasks"] = toScheduledTasks(tasks)
|
|
|
|
}
|
|
|
|
payload["stats"] = toQueueStateSnapshot(stats)
|
2020-12-12 22:52:48 +08:00
|
|
|
if err := json.NewEncoder(w).Encode(payload); err != nil {
|
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
2020-11-24 22:54:00 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-29 11:47:58 +08:00
|
|
|
func newListRetryTasksHandlerFunc(inspector *inspeq.Inspector) http.HandlerFunc {
|
2020-11-24 22:54:00 +08:00
|
|
|
return func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
vars := mux.Vars(r)
|
|
|
|
qname := vars["qname"]
|
|
|
|
pageSize, pageNum := getPageOptions(r)
|
|
|
|
tasks, err := inspector.ListRetryTasks(
|
2021-01-29 11:47:58 +08:00
|
|
|
qname, inspeq.PageSize(pageSize), inspeq.Page(pageNum))
|
2020-11-24 22:54:00 +08:00
|
|
|
if err != nil {
|
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
stats, err := inspector.CurrentStats(qname)
|
|
|
|
if err != nil {
|
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
payload := make(map[string]interface{})
|
|
|
|
if len(tasks) == 0 {
|
|
|
|
// avoid nil for the tasks field in json output.
|
2020-12-02 23:19:06 +08:00
|
|
|
payload["tasks"] = make([]*RetryTask, 0)
|
2020-11-24 22:54:00 +08:00
|
|
|
} else {
|
|
|
|
payload["tasks"] = toRetryTasks(tasks)
|
|
|
|
}
|
|
|
|
payload["stats"] = toQueueStateSnapshot(stats)
|
2020-12-12 22:52:48 +08:00
|
|
|
if err := json.NewEncoder(w).Encode(payload); err != nil {
|
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
2020-11-24 22:54:00 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-29 11:47:58 +08:00
|
|
|
func newListArchivedTasksHandlerFunc(inspector *inspeq.Inspector) http.HandlerFunc {
|
2020-11-24 22:54:00 +08:00
|
|
|
return func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
vars := mux.Vars(r)
|
|
|
|
qname := vars["qname"]
|
|
|
|
pageSize, pageNum := getPageOptions(r)
|
2021-01-13 03:59:44 +08:00
|
|
|
tasks, err := inspector.ListArchivedTasks(
|
2021-01-29 11:47:58 +08:00
|
|
|
qname, inspeq.PageSize(pageSize), inspeq.Page(pageNum))
|
2020-11-24 22:54:00 +08:00
|
|
|
if err != nil {
|
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
stats, err := inspector.CurrentStats(qname)
|
|
|
|
if err != nil {
|
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
payload := make(map[string]interface{})
|
|
|
|
if len(tasks) == 0 {
|
|
|
|
// avoid nil for the tasks field in json output.
|
2021-01-13 03:59:44 +08:00
|
|
|
payload["tasks"] = make([]*ArchivedTask, 0)
|
2020-11-24 22:54:00 +08:00
|
|
|
} else {
|
2021-01-13 03:59:44 +08:00
|
|
|
payload["tasks"] = toArchivedTasks(tasks)
|
2020-11-24 22:54:00 +08:00
|
|
|
}
|
|
|
|
payload["stats"] = toQueueStateSnapshot(stats)
|
2020-12-12 22:52:48 +08:00
|
|
|
if err := json.NewEncoder(w).Encode(payload); err != nil {
|
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
2020-11-24 22:54:00 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-29 11:47:58 +08:00
|
|
|
func newDeleteTaskHandlerFunc(inspector *inspeq.Inspector) http.HandlerFunc {
|
2020-12-07 00:46:14 +08:00
|
|
|
return func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
vars := mux.Vars(r)
|
|
|
|
qname, key := vars["qname"], vars["task_key"]
|
2020-12-14 22:54:02 +08:00
|
|
|
if qname == "" || key == "" {
|
|
|
|
http.Error(w, "route parameters should not be empty", http.StatusBadRequest)
|
|
|
|
return
|
|
|
|
}
|
2020-12-07 00:46:14 +08:00
|
|
|
if err := inspector.DeleteTaskByKey(qname, key); err != nil {
|
|
|
|
// TODO: Handle task not found error and return 404
|
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
w.WriteHeader(http.StatusNoContent)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-29 11:47:58 +08:00
|
|
|
func newRunTaskHandlerFunc(inspector *inspeq.Inspector) http.HandlerFunc {
|
2020-12-14 22:54:02 +08:00
|
|
|
return func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
vars := mux.Vars(r)
|
|
|
|
qname, key := vars["qname"], vars["task_key"]
|
|
|
|
if qname == "" || key == "" {
|
|
|
|
http.Error(w, "route parameters should not be empty", http.StatusBadRequest)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if err := inspector.RunTaskByKey(qname, key); err != nil {
|
|
|
|
// TODO: Handle task not found error and return 404
|
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
w.WriteHeader(http.StatusNoContent)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-29 11:47:58 +08:00
|
|
|
func newArchiveTaskHandlerFunc(inspector *inspeq.Inspector) http.HandlerFunc {
|
2020-12-19 22:51:46 +08:00
|
|
|
return func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
vars := mux.Vars(r)
|
|
|
|
qname, key := vars["qname"], vars["task_key"]
|
|
|
|
if qname == "" || key == "" {
|
|
|
|
http.Error(w, "route parameters should not be empty", http.StatusBadRequest)
|
|
|
|
return
|
|
|
|
}
|
2021-01-13 03:59:44 +08:00
|
|
|
if err := inspector.ArchiveTaskByKey(qname, key); err != nil {
|
2020-12-19 22:51:46 +08:00
|
|
|
// TODO: Handle task not found error and return 404
|
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
w.WriteHeader(http.StatusNoContent)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-27 13:43:51 +08:00
|
|
|
type DeleteAllTasksResponse struct {
|
|
|
|
// Number of tasks deleted.
|
|
|
|
Deleted int `json:"deleted"`
|
|
|
|
}
|
|
|
|
|
2021-01-29 11:47:58 +08:00
|
|
|
func newDeleteAllPendingTasksHandlerFunc(inspector *inspeq.Inspector) http.HandlerFunc {
|
2021-01-21 13:30:27 +08:00
|
|
|
return func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
qname := mux.Vars(r)["qname"]
|
2021-01-27 13:43:51 +08:00
|
|
|
n, err := inspector.DeleteAllPendingTasks(qname)
|
|
|
|
if err != nil {
|
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
resp := DeleteAllTasksResponse{n}
|
|
|
|
if err := json.NewEncoder(w).Encode(resp); err != nil {
|
2021-01-21 13:30:27 +08:00
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-29 11:47:58 +08:00
|
|
|
func newDeleteAllScheduledTasksHandlerFunc(inspector *inspeq.Inspector) http.HandlerFunc {
|
2020-12-07 23:22:04 +08:00
|
|
|
return func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
qname := mux.Vars(r)["qname"]
|
2021-01-27 13:43:51 +08:00
|
|
|
n, err := inspector.DeleteAllScheduledTasks(qname)
|
|
|
|
if err != nil {
|
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
resp := DeleteAllTasksResponse{n}
|
|
|
|
if err := json.NewEncoder(w).Encode(resp); err != nil {
|
2020-12-07 23:22:04 +08:00
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-29 11:47:58 +08:00
|
|
|
func newDeleteAllRetryTasksHandlerFunc(inspector *inspeq.Inspector) http.HandlerFunc {
|
2020-12-07 23:22:04 +08:00
|
|
|
return func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
qname := mux.Vars(r)["qname"]
|
2021-01-27 13:43:51 +08:00
|
|
|
n, err := inspector.DeleteAllRetryTasks(qname)
|
|
|
|
if err != nil {
|
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
resp := DeleteAllTasksResponse{n}
|
|
|
|
if err := json.NewEncoder(w).Encode(resp); err != nil {
|
2020-12-07 23:22:04 +08:00
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-29 11:47:58 +08:00
|
|
|
func newDeleteAllArchivedTasksHandlerFunc(inspector *inspeq.Inspector) http.HandlerFunc {
|
2020-12-07 23:22:04 +08:00
|
|
|
return func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
qname := mux.Vars(r)["qname"]
|
2021-01-27 13:43:51 +08:00
|
|
|
n, err := inspector.DeleteAllArchivedTasks(qname)
|
|
|
|
if err != nil {
|
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
resp := DeleteAllTasksResponse{n}
|
|
|
|
if err := json.NewEncoder(w).Encode(resp); err != nil {
|
2020-12-07 23:22:04 +08:00
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-29 11:47:58 +08:00
|
|
|
func newRunAllScheduledTasksHandlerFunc(inspector *inspeq.Inspector) http.HandlerFunc {
|
2020-12-19 22:07:23 +08:00
|
|
|
return func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
qname := mux.Vars(r)["qname"]
|
|
|
|
if _, err := inspector.RunAllScheduledTasks(qname); err != nil {
|
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
w.WriteHeader(http.StatusNoContent)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-29 11:47:58 +08:00
|
|
|
func newRunAllRetryTasksHandlerFunc(inspector *inspeq.Inspector) http.HandlerFunc {
|
2020-12-19 22:07:23 +08:00
|
|
|
return func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
qname := mux.Vars(r)["qname"]
|
|
|
|
if _, err := inspector.RunAllRetryTasks(qname); err != nil {
|
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
w.WriteHeader(http.StatusNoContent)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-29 11:47:58 +08:00
|
|
|
func newRunAllArchivedTasksHandlerFunc(inspector *inspeq.Inspector) http.HandlerFunc {
|
2020-12-16 23:35:36 +08:00
|
|
|
return func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
qname := mux.Vars(r)["qname"]
|
2021-01-13 03:59:44 +08:00
|
|
|
if _, err := inspector.RunAllArchivedTasks(qname); err != nil {
|
2020-12-16 23:35:36 +08:00
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
w.WriteHeader(http.StatusNoContent)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-29 11:47:58 +08:00
|
|
|
func newArchiveAllPendingTasksHandlerFunc(inspector *inspeq.Inspector) http.HandlerFunc {
|
2021-01-21 13:30:27 +08:00
|
|
|
return func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
qname := mux.Vars(r)["qname"]
|
|
|
|
if _, err := inspector.ArchiveAllPendingTasks(qname); err != nil {
|
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
w.WriteHeader(http.StatusNoContent)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-29 11:47:58 +08:00
|
|
|
func newArchiveAllScheduledTasksHandlerFunc(inspector *inspeq.Inspector) http.HandlerFunc {
|
2020-12-19 22:51:46 +08:00
|
|
|
return func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
qname := mux.Vars(r)["qname"]
|
2021-01-13 03:59:44 +08:00
|
|
|
if _, err := inspector.ArchiveAllScheduledTasks(qname); err != nil {
|
2020-12-19 22:51:46 +08:00
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
w.WriteHeader(http.StatusNoContent)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-29 11:47:58 +08:00
|
|
|
func newArchiveAllRetryTasksHandlerFunc(inspector *inspeq.Inspector) http.HandlerFunc {
|
2020-12-19 22:51:46 +08:00
|
|
|
return func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
qname := mux.Vars(r)["qname"]
|
2021-01-13 03:59:44 +08:00
|
|
|
if _, err := inspector.ArchiveAllRetryTasks(qname); err != nil {
|
2020-12-19 22:51:46 +08:00
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
w.WriteHeader(http.StatusNoContent)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-12 22:52:48 +08:00
|
|
|
// request body used for all batch delete tasks endpoints.
|
|
|
|
type batchDeleteTasksRequest struct {
|
2020-12-13 23:51:40 +08:00
|
|
|
TaskKeys []string `json:"task_keys"`
|
2020-12-12 22:52:48 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Note: Redis does not have any rollback mechanism, so it's possible
|
|
|
|
// to have partial success when doing a batch operation.
|
|
|
|
// For this reason this response contains a list of succeeded keys
|
|
|
|
// and a list of failed keys.
|
|
|
|
type batchDeleteTasksResponse struct {
|
|
|
|
// task keys that were successfully deleted.
|
2020-12-13 23:51:40 +08:00
|
|
|
DeletedKeys []string `json:"deleted_keys"`
|
2020-12-12 22:52:48 +08:00
|
|
|
|
|
|
|
// task keys that were not deleted.
|
2020-12-13 23:51:40 +08:00
|
|
|
FailedKeys []string `json:"failed_keys"`
|
2020-12-12 22:52:48 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Maximum request body size in bytes.
|
|
|
|
// Allow up to 1MB in size.
|
|
|
|
const maxRequestBodySize = 1000000
|
|
|
|
|
2021-01-29 11:47:58 +08:00
|
|
|
func newBatchDeleteTasksHandlerFunc(inspector *inspeq.Inspector) http.HandlerFunc {
|
2020-12-12 22:52:48 +08:00
|
|
|
return func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
r.Body = http.MaxBytesReader(w, r.Body, maxRequestBodySize)
|
|
|
|
dec := json.NewDecoder(r.Body)
|
|
|
|
dec.DisallowUnknownFields()
|
|
|
|
|
|
|
|
var req batchDeleteTasksRequest
|
|
|
|
if err := dec.Decode(&req); err != nil {
|
|
|
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
qname := mux.Vars(r)["qname"]
|
2020-12-13 23:51:40 +08:00
|
|
|
resp := batchDeleteTasksResponse{
|
|
|
|
// avoid null in the json response
|
|
|
|
DeletedKeys: make([]string, 0),
|
|
|
|
FailedKeys: make([]string, 0),
|
|
|
|
}
|
|
|
|
for _, key := range req.TaskKeys {
|
2020-12-12 22:52:48 +08:00
|
|
|
if err := inspector.DeleteTaskByKey(qname, key); err != nil {
|
|
|
|
log.Printf("error: could not delete task with key %q: %v", key, err)
|
2020-12-13 23:51:40 +08:00
|
|
|
resp.FailedKeys = append(resp.FailedKeys, key)
|
2020-12-12 22:52:48 +08:00
|
|
|
} else {
|
2020-12-13 23:51:40 +08:00
|
|
|
resp.DeletedKeys = append(resp.DeletedKeys, key)
|
2020-12-12 22:52:48 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if err := json.NewEncoder(w).Encode(resp); err != nil {
|
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-15 22:16:58 +08:00
|
|
|
type batchRunTasksRequest struct {
|
|
|
|
TaskKeys []string `json:"task_keys"`
|
|
|
|
}
|
|
|
|
|
|
|
|
type batchRunTasksResponse struct {
|
|
|
|
// task keys that were successfully moved to the pending state.
|
|
|
|
PendingKeys []string `json:"pending_keys"`
|
|
|
|
// task keys that were not able to move to the pending state.
|
|
|
|
ErrorKeys []string `json:"error_keys"`
|
|
|
|
}
|
|
|
|
|
2021-01-29 11:47:58 +08:00
|
|
|
func newBatchRunTasksHandlerFunc(inspector *inspeq.Inspector) http.HandlerFunc {
|
2020-12-15 22:16:58 +08:00
|
|
|
return func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
r.Body = http.MaxBytesReader(w, r.Body, maxRequestBodySize)
|
|
|
|
dec := json.NewDecoder(r.Body)
|
|
|
|
dec.DisallowUnknownFields()
|
|
|
|
|
|
|
|
var req batchRunTasksRequest
|
|
|
|
if err := dec.Decode(&req); err != nil {
|
|
|
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
qname := mux.Vars(r)["qname"]
|
|
|
|
resp := batchRunTasksResponse{
|
|
|
|
// avoid null in the json response
|
|
|
|
PendingKeys: make([]string, 0),
|
|
|
|
ErrorKeys: make([]string, 0),
|
|
|
|
}
|
|
|
|
for _, key := range req.TaskKeys {
|
|
|
|
if err := inspector.RunTaskByKey(qname, key); err != nil {
|
|
|
|
log.Printf("error: could not run task with key %q: %v", key, err)
|
|
|
|
resp.ErrorKeys = append(resp.ErrorKeys, key)
|
|
|
|
} else {
|
|
|
|
resp.PendingKeys = append(resp.PendingKeys, key)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if err := json.NewEncoder(w).Encode(resp); err != nil {
|
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-13 03:59:44 +08:00
|
|
|
type batchArchiveTasksRequest struct {
|
2020-12-19 22:51:46 +08:00
|
|
|
TaskKeys []string `json:"task_keys"`
|
|
|
|
}
|
|
|
|
|
2021-01-13 03:59:44 +08:00
|
|
|
type batchArchiveTasksResponse struct {
|
|
|
|
// task keys that were successfully moved to the archived state.
|
|
|
|
ArchivedKeys []string `json:"archived_keys"`
|
|
|
|
// task keys that were not able to move to the archived state.
|
2020-12-19 22:51:46 +08:00
|
|
|
ErrorKeys []string `json:"error_keys"`
|
|
|
|
}
|
|
|
|
|
2021-01-29 11:47:58 +08:00
|
|
|
func newBatchArchiveTasksHandlerFunc(inspector *inspeq.Inspector) http.HandlerFunc {
|
2020-12-19 22:51:46 +08:00
|
|
|
return func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
r.Body = http.MaxBytesReader(w, r.Body, maxRequestBodySize)
|
|
|
|
dec := json.NewDecoder(r.Body)
|
|
|
|
dec.DisallowUnknownFields()
|
|
|
|
|
2021-01-13 03:59:44 +08:00
|
|
|
var req batchArchiveTasksRequest
|
2020-12-19 22:51:46 +08:00
|
|
|
if err := dec.Decode(&req); err != nil {
|
|
|
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
qname := mux.Vars(r)["qname"]
|
2021-01-13 03:59:44 +08:00
|
|
|
resp := batchArchiveTasksResponse{
|
2020-12-19 22:51:46 +08:00
|
|
|
// avoid null in the json response
|
2021-01-13 03:59:44 +08:00
|
|
|
ArchivedKeys: make([]string, 0),
|
|
|
|
ErrorKeys: make([]string, 0),
|
2020-12-19 22:51:46 +08:00
|
|
|
}
|
|
|
|
for _, key := range req.TaskKeys {
|
2021-01-13 03:59:44 +08:00
|
|
|
if err := inspector.ArchiveTaskByKey(qname, key); err != nil {
|
|
|
|
log.Printf("error: could not archive task with key %q: %v", key, err)
|
2020-12-19 22:51:46 +08:00
|
|
|
resp.ErrorKeys = append(resp.ErrorKeys, key)
|
|
|
|
} else {
|
2021-01-13 03:59:44 +08:00
|
|
|
resp.ArchivedKeys = append(resp.ArchivedKeys, key)
|
2020-12-19 22:51:46 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if err := json.NewEncoder(w).Encode(resp); err != nil {
|
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-24 22:54:00 +08:00
|
|
|
// getPageOptions read page size and number from the request url if set,
|
|
|
|
// otherwise it returns the default value.
|
|
|
|
func getPageOptions(r *http.Request) (pageSize, pageNum int) {
|
|
|
|
pageSize = 20 // default page size
|
|
|
|
pageNum = 1 // default page num
|
|
|
|
q := r.URL.Query()
|
|
|
|
if s := q.Get("size"); s != "" {
|
|
|
|
if n, err := strconv.Atoi(s); err == nil {
|
|
|
|
pageSize = n
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if s := q.Get("page"); s != "" {
|
|
|
|
if n, err := strconv.Atoi(s); err == nil {
|
|
|
|
pageNum = n
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return pageSize, pageNum
|
|
|
|
}
|