diff --git a/artifacts/deploy/webhook-configuration.yaml b/artifacts/deploy/webhook-configuration.yaml index ece489305591..b0459fb5bf40 100644 --- a/artifacts/deploy/webhook-configuration.yaml +++ b/artifacts/deploy/webhook-configuration.yaml @@ -209,3 +209,17 @@ webhooks: sideEffects: None admissionReviewVersions: [ "v1" ] timeoutSeconds: 10 + - name: cronfederatedhpa.karmada.io + rules: + - operations: ["CREATE", "UPDATE"] + apiGroups: ["autoscaling.karmada.io"] + apiVersions: ["*"] + resources: ["cronfederatedhpas"] + scope: "Namespaced" + clientConfig: + url: https://karmada-webhook.karmada-system.svc:443/validate-cronfederatedhpa + caBundle: {{caBundle}} + failurePolicy: Fail + sideEffects: None + admissionReviewVersions: [ "v1" ] + timeoutSeconds: 10 diff --git a/cluster/images/Dockerfile b/cluster/images/Dockerfile index 7413777c4e58..6d198aceeded 100644 --- a/cluster/images/Dockerfile +++ b/cluster/images/Dockerfile @@ -3,5 +3,7 @@ FROM alpine:3.18.2 ARG BINARY RUN apk add --no-cache ca-certificates +#tzdata is used to parse the time zone information when using CronFederatedHPA +RUN apk add --no-cache tzdata COPY ${BINARY} /bin/${BINARY} diff --git a/cluster/images/buildx.Dockerfile b/cluster/images/buildx.Dockerfile index 48c2a54e8e03..a1e1223a5261 100644 --- a/cluster/images/buildx.Dockerfile +++ b/cluster/images/buildx.Dockerfile @@ -4,5 +4,7 @@ ARG BINARY ARG TARGETPLATFORM RUN apk add --no-cache ca-certificates +#tzdata is used to parse the time zone information when using CronFederatedHPA +RUN apk add --no-cache tzdata COPY ${TARGETPLATFORM}/${BINARY} /bin/${BINARY} diff --git a/cmd/controller-manager/app/controllermanager.go b/cmd/controller-manager/app/controllermanager.go index b71e1319f3eb..bf0361d92027 100644 --- a/cmd/controller-manager/app/controllermanager.go +++ b/cmd/controller-manager/app/controllermanager.go @@ -37,6 +37,7 @@ import ( "github.com/karmada-io/karmada/pkg/controllers/binding" "github.com/karmada-io/karmada/pkg/controllers/cluster" controllerscontext "github.com/karmada-io/karmada/pkg/controllers/context" + "github.com/karmada-io/karmada/pkg/controllers/cronfederatedhpa" "github.com/karmada-io/karmada/pkg/controllers/execution" "github.com/karmada-io/karmada/pkg/controllers/federatedhpa" metricsclient "github.com/karmada-io/karmada/pkg/controllers/federatedhpa/metrics" @@ -204,6 +205,7 @@ func init() { controllers["gracefulEviction"] = startGracefulEvictionController controllers["applicationFailover"] = startApplicationFailoverController controllers["federatedHorizontalPodAutoscaler"] = startFederatedHorizontalPodAutoscalerController + controllers["cronFederatedHorizontalPodAutoscaler"] = startCronFederatedHorizontalPodAutoscalerController } func startClusterController(ctx controllerscontext.Context) (enabled bool, err error) { @@ -591,6 +593,18 @@ func startFederatedHorizontalPodAutoscalerController(ctx controllerscontext.Cont return true, nil } +func startCronFederatedHorizontalPodAutoscalerController(ctx controllerscontext.Context) (enabled bool, err error) { + cronFHPAController := cronfederatedhpa.CronFHPAController{ + Client: ctx.Mgr.GetClient(), + EventRecorder: ctx.Mgr.GetEventRecorderFor(cronfederatedhpa.ControllerName), + RateLimiterOptions: ctx.Opts.RateLimiterOptions, + } + if err = cronFHPAController.SetupWithManager(ctx.Mgr); err != nil { + return false, err + } + return true, nil +} + // setupControllers initialize controllers and setup one by one. func setupControllers(mgr controllerruntime.Manager, opts *options.Options, stopChan <-chan struct{}) { restConfig := mgr.GetConfig() diff --git a/cmd/webhook/app/webhook.go b/cmd/webhook/app/webhook.go index a816d9ef0ef7..4f4041c62705 100644 --- a/cmd/webhook/app/webhook.go +++ b/cmd/webhook/app/webhook.go @@ -25,6 +25,7 @@ import ( "github.com/karmada-io/karmada/pkg/webhook/clusteroverridepolicy" "github.com/karmada-io/karmada/pkg/webhook/clusterpropagationpolicy" "github.com/karmada-io/karmada/pkg/webhook/configuration" + "github.com/karmada-io/karmada/pkg/webhook/cronfederatedhpa" "github.com/karmada-io/karmada/pkg/webhook/federatedhpa" "github.com/karmada-io/karmada/pkg/webhook/federatedresourcequota" "github.com/karmada-io/karmada/pkg/webhook/multiclusteringress" @@ -129,6 +130,7 @@ func Run(ctx context.Context, opts *options.Options) error { hookServer.Register("/validate-resourceinterpreterwebhookconfiguration", &webhook.Admission{Handler: &configuration.ValidatingAdmission{}}) hookServer.Register("/validate-federatedresourcequota", &webhook.Admission{Handler: &federatedresourcequota.ValidatingAdmission{}}) hookServer.Register("/validate-federatedhpa", &webhook.Admission{Handler: &federatedhpa.ValidatingAdmission{}}) + hookServer.Register("/validate-cronfederatedhpa", &webhook.Admission{Handler: &cronfederatedhpa.ValidatingAdmission{}}) hookServer.Register("/validate-resourceinterpretercustomization", &webhook.Admission{Handler: &resourceinterpretercustomization.ValidatingAdmission{Client: hookManager.GetClient()}}) hookServer.Register("/validate-multiclusteringress", &webhook.Admission{Handler: &multiclusteringress.ValidatingAdmission{}}) hookServer.Register("/mutate-federatedhpa", &webhook.Admission{Handler: &federatedhpa.MutatingAdmission{}}) diff --git a/go.mod b/go.mod index a4a8f81557b8..92317208ac35 100644 --- a/go.mod +++ b/go.mod @@ -3,9 +3,11 @@ module github.com/karmada-io/karmada go 1.20 require ( + github.com/adhocore/gronx v1.6.3 github.com/distribution/distribution/v3 v3.0.0-20210507173845-9329f6a62b67 github.com/emirpasic/gods v1.18.1 github.com/evanphx/json-patch/v5 v5.6.0 + github.com/go-co-op/gocron v1.30.1 github.com/gogo/protobuf v1.3.2 github.com/golang/mock v1.6.0 github.com/google/go-cmp v0.5.9 @@ -133,7 +135,8 @@ require ( github.com/prometheus/common v0.37.0 // indirect github.com/prometheus/procfs v0.8.0 // indirect github.com/rivo/uniseg v0.4.2 // indirect - github.com/rogpeppe/go-internal v1.6.1 // indirect + github.com/robfig/cron/v3 v3.0.1 // indirect + github.com/rogpeppe/go-internal v1.8.1 // indirect github.com/rs/zerolog v1.26.1 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/spf13/afero v1.9.3 // indirect diff --git a/go.sum b/go.sum index f991a0625e73..56f349b627bb 100644 --- a/go.sum +++ b/go.sum @@ -81,6 +81,8 @@ github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbt github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= +github.com/adhocore/gronx v1.6.3 h1:bnm5vieTrY3QQPpsfB0hrAaeaHDpuZTUC2LLCVMLe9c= +github.com/adhocore/gronx v1.6.3/go.mod h1:7oUY1WAU8rEJWmAxXR2DN0JaO4gi9khSgKjiRypqteg= github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= @@ -246,6 +248,8 @@ github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2H github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/go-co-op/gocron v1.30.1 h1:tjWUvJl5KrcwpkEkSXFSQFr4F9h5SfV/m4+RX0cV2fs= +github.com/go-co-op/gocron v1.30.1/go.mod h1:39f6KNSGVOU1LO/ZOoZfcSxwlsJDQOKSu8erN0SH48Y= github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= @@ -649,6 +653,7 @@ github.com/pelletier/go-toml/v2 v2.0.6 h1:nrzqCb7j9cDFj2coyLNLaZuJTLjWjlaz6nvTvI github.com/pelletier/go-toml/v2 v2.0.6/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -702,11 +707,14 @@ github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40T github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.2 h1:YwD0ulJSJytLpiaWua0sBDusfsCZohxjxzVTYjwxfV8= github.com/rivo/uniseg v0.4.2/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= +github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= +github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg= +github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= github.com/rs/xid v1.3.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/rs/zerolog v1.26.1 h1:/ihwxqH+4z8UxyI70wM1z9yCvkWcfz/a3mj48k/Zngc= github.com/rs/zerolog v1.26.1/go.mod h1:/wSSJWX7lVrsOwlbyTRSOJvqRlc+WjWlfes+CiJ+tmc= diff --git a/pkg/apis/autoscaling/v1alpha1/well_known_constants.go b/pkg/apis/autoscaling/v1alpha1/well_known_constants.go index 6cd6d03451e8..698bfbd4b30d 100644 --- a/pkg/apis/autoscaling/v1alpha1/well_known_constants.go +++ b/pkg/apis/autoscaling/v1alpha1/well_known_constants.go @@ -1,6 +1,9 @@ package v1alpha1 const ( + // FederatedHPAKind is the kind of FederatedHPA in group autoscaling.karmada.io + FederatedHPAKind = "FederatedHPA" + // QuerySourceAnnotationKey is the annotation used in karmada-metrics-adapter to // record the query source cluster QuerySourceAnnotationKey = "resource.karmada.io/query-from-cluster" diff --git a/pkg/controllers/cronfederatedhpa/cronfederatedhpa_controller.go b/pkg/controllers/cronfederatedhpa/cronfederatedhpa_controller.go new file mode 100755 index 000000000000..3b159cf7f3ba --- /dev/null +++ b/pkg/controllers/cronfederatedhpa/cronfederatedhpa_controller.go @@ -0,0 +1,204 @@ +/* +Copyright 2023 The Karmada Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cronfederatedhpa + +import ( + "context" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/tools/record" + "k8s.io/klog/v2" + controllerruntime "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + + autoscalingv1alpha1 "github.com/karmada-io/karmada/pkg/apis/autoscaling/v1alpha1" + "github.com/karmada-io/karmada/pkg/sharedcli/ratelimiterflag" + "github.com/karmada-io/karmada/pkg/util/helper" +) + +const ( + // ControllerName is the controller name that will be used when reporting events. + ControllerName = "cronfederatedhpa-controller" +) + +// CronFHPAController is used to operate CronFederatedHPA. +type CronFHPAController struct { + client.Client // used to operate Cron resources. + EventRecorder record.EventRecorder + + RateLimiterOptions ratelimiterflag.Options + CronHandler *CronHandler +} + +// Reconcile performs a full reconciliation for the object referred to by the Request. +// The Controller will requeue the Request to be processed again if an error is non-nil or +// Result.Requeue is true, otherwise upon completion it will remove the work from the queue. +func (c *CronFHPAController) Reconcile(ctx context.Context, req controllerruntime.Request) (controllerruntime.Result, error) { + klog.V(4).Infof("Reconciling CronFederatedHPA %s", req.NamespacedName) + + cronFHPA := &autoscalingv1alpha1.CronFederatedHPA{} + if err := c.Client.Get(ctx, req.NamespacedName, cronFHPA); err != nil { + if apierrors.IsNotFound(err) { + klog.V(4).Infof("Begin to cleanup the cron jobs for CronFederatedHPA:%s", req.NamespacedName) + c.CronHandler.StopCronFHPAExecutor(req.NamespacedName.String()) + return controllerruntime.Result{}, nil + } + + klog.Errorf("Fail to get CronFederatedHPA(%s):%v", req.NamespacedName, err) + return controllerruntime.Result{Requeue: true}, err + } + + // If this CronFederatedHPA is deleting, stop all related cron executors + if !cronFHPA.DeletionTimestamp.IsZero() { + c.CronHandler.StopCronFHPAExecutor(req.NamespacedName.String()) + return controllerruntime.Result{}, nil + } + + origRuleSets := sets.New[string]() + for _, history := range cronFHPA.Status.ExecutionHistories { + origRuleSets.Insert(history.RuleName) + } + + // If scale target is updated, stop all the rule executors, and next steps will create the new executors + if c.CronHandler.CronFHPAScaleTargetRefUpdates(req.NamespacedName.String(), cronFHPA.Spec.ScaleTargetRef) { + c.CronHandler.StopCronFHPAExecutor(req.NamespacedName.String()) + } + + c.CronHandler.AddCronExecutorIfNotExist(req.NamespacedName.String()) + + newRuleSets := sets.New[string]() + for _, rule := range cronFHPA.Spec.Rules { + if err := c.processCronRule(cronFHPA, rule); err != nil { + return controllerruntime.Result{Requeue: true}, err + } + newRuleSets.Insert(rule.Name) + } + + // If rule is deleted, remove the rule executor from the handler + for name := range origRuleSets { + if newRuleSets.Has(name) { + continue + } + c.CronHandler.StopRuleExecutor(req.NamespacedName.String(), name) + if err := c.removeCronFHPAHistory(cronFHPA, name); err != nil { + return controllerruntime.Result{Requeue: true}, err + } + } + + return controllerruntime.Result{}, nil +} + +// SetupWithManager creates a controller and register to controller manager. +func (c *CronFHPAController) SetupWithManager(mgr controllerruntime.Manager) error { + c.CronHandler = NewCronHandler(mgr.GetClient(), mgr.GetEventRecorderFor(ControllerName)) + return controllerruntime.NewControllerManagedBy(mgr). + For(&autoscalingv1alpha1.CronFederatedHPA{}). + WithOptions(controller.Options{RateLimiter: ratelimiterflag.DefaultControllerRateLimiter(c.RateLimiterOptions)}). + Complete(c) +} + +// processCronRule processes the cron rule +func (c *CronFHPAController) processCronRule(cronFHPA *autoscalingv1alpha1.CronFederatedHPA, rule autoscalingv1alpha1.CronFederatedHPARule) error { + cronFHPAKey := helper.GetCronFederatedHPAKey(cronFHPA) + if ruleOld, exists := c.CronHandler.RuleCronExecutorExists(cronFHPAKey, rule.Name); exists { + if equality.Semantic.DeepEqual(ruleOld, rule) { + return nil + } + c.CronHandler.StopRuleExecutor(cronFHPAKey, rule.Name) + } + + if !helper.IsCronFederatedHPARuleSuspend(rule) { + if err := c.CronHandler.CreateCronJobForExecutor(cronFHPA, rule); err != nil { + c.EventRecorder.Event(cronFHPA, corev1.EventTypeWarning, "StartRuleFailed", err.Error()) + klog.Errorf("Fail to start cron for CronFederatedHPA(%s) rule(%s):%v", cronFHPAKey, rule.Name, err) + return err + } + } + + if err := c.updateRuleHistory(cronFHPA, rule); err != nil { + c.EventRecorder.Event(cronFHPA, corev1.EventTypeWarning, "UpdateCronFederatedHPAFailed", err.Error()) + return err + } + return nil +} + +// updateRuleHistory updates the rule history +func (c *CronFHPAController) updateRuleHistory(cronFHPA *autoscalingv1alpha1.CronFederatedHPA, rule autoscalingv1alpha1.CronFederatedHPARule) error { + var nextExecutionTime *metav1.Time + if !helper.IsCronFederatedHPARuleSuspend(rule) { + // If rule is not suspended, we should set the nextExecutionTime filed, or the nextExecutionTime will be nil + next, err := c.CronHandler.GetRuleNextExecuteTime(cronFHPA, rule.Name) + if err != nil { + klog.Errorf("Fail to get next execution time for CronFederatedHPA(%s/%s) rule(%s):%v", + cronFHPA.Namespace, cronFHPA.Name, rule.Name, err) + return err + } + nextExecutionTime = &metav1.Time{Time: next} + } + + exists := false + for index, history := range cronFHPA.Status.ExecutionHistories { + if history.RuleName != rule.Name { + continue + } + exists = true + cronFHPA.Status.ExecutionHistories[index].NextExecutionTime = nextExecutionTime + break + } + + if !exists { + ruleHistory := autoscalingv1alpha1.ExecutionHistory{ + RuleName: rule.Name, + NextExecutionTime: nextExecutionTime, + } + cronFHPA.Status.ExecutionHistories = append(cronFHPA.Status.ExecutionHistories, ruleHistory) + } + + if err := c.Client.Status().Update(context.Background(), cronFHPA); err != nil { + klog.Errorf("Fail to update CronFederatedHPA(%s/%s) rule(%s)'s next execution time:%v", + cronFHPA.Namespace, cronFHPA.Name, err) + return err + } + + return nil +} + +// removeCronFHPAHistory removes the rule history in status +func (c *CronFHPAController) removeCronFHPAHistory(cronFHPA *autoscalingv1alpha1.CronFederatedHPA, ruleName string) error { + exists := false + for index, history := range cronFHPA.Status.ExecutionHistories { + if history.RuleName != ruleName { + continue + } + cronFHPA.Status.ExecutionHistories = append(cronFHPA.Status.ExecutionHistories[:index], cronFHPA.Status.ExecutionHistories[index+1:]...) + exists = true + break + } + + if !exists { + return nil + } + if err := c.Client.Status().Update(context.Background(), cronFHPA); err != nil { + c.EventRecorder.Event(cronFHPA, corev1.EventTypeWarning, "UpdateCronFederatedHPAFailed", err.Error()) + klog.Errorf("Fail to remove CronFederatedHPA(%s/%s) rule(%s) history:%v", cronFHPA.Namespace, cronFHPA.Name, ruleName, err) + return err + } + + return nil +} diff --git a/pkg/controllers/cronfederatedhpa/cronfederatedhpa_handler.go b/pkg/controllers/cronfederatedhpa/cronfederatedhpa_handler.go new file mode 100755 index 000000000000..76a318037aef --- /dev/null +++ b/pkg/controllers/cronfederatedhpa/cronfederatedhpa_handler.go @@ -0,0 +1,177 @@ +/* +Copyright 2023 The Karmada Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cronfederatedhpa + +import ( + "fmt" + "sync" + "time" + _ "time/tzdata" + + "github.com/go-co-op/gocron" + autoscalingv2 "k8s.io/api/autoscaling/v2" + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/client-go/tools/record" + "k8s.io/klog/v2" + "sigs.k8s.io/controller-runtime/pkg/client" + + autoscalingv1alpha1 "github.com/karmada-io/karmada/pkg/apis/autoscaling/v1alpha1" + "github.com/karmada-io/karmada/pkg/util/helper" +) + +type RuleCron struct { + *gocron.Scheduler + autoscalingv1alpha1.CronFederatedHPARule +} + +type CronHandler struct { + client client.Client + eventRecorder record.EventRecorder + + // cronExecutorMap is [cronFederatedHPA name][rule name]RuleCron + cronExecutorMap map[string]map[string]RuleCron + executorLock sync.RWMutex + + // cronFHPAScaleTargetMap is [cronFHPA name]CrossVersionObjectReference + cronFHPAScaleTargetMap map[string]autoscalingv2.CrossVersionObjectReference + scaleTargetLock sync.RWMutex +} + +// NewCronHandler creates new cron handler +func NewCronHandler(client client.Client, eventRecorder record.EventRecorder) *CronHandler { + return &CronHandler{ + client: client, + eventRecorder: eventRecorder, + cronExecutorMap: make(map[string]map[string]RuleCron), + cronFHPAScaleTargetMap: make(map[string]autoscalingv2.CrossVersionObjectReference), + } +} + +// CronFHPAScaleTargetRefUpdates checks if the scale target changed +func (c *CronHandler) CronFHPAScaleTargetRefUpdates(cronFHPAKey string, scaleTarget autoscalingv2.CrossVersionObjectReference) bool { + c.scaleTargetLock.Lock() + defer c.scaleTargetLock.Unlock() + + origTarget, ok := c.cronFHPAScaleTargetMap[cronFHPAKey] + if !ok { + c.cronFHPAScaleTargetMap[cronFHPAKey] = scaleTarget + return false + } + + return !equality.Semantic.DeepEqual(origTarget, scaleTarget) +} + +// AddCronExecutorIfNotExist creates the executor for CronFederatedHPA if not exist +func (c *CronHandler) AddCronExecutorIfNotExist(cronFHPAKey string) { + c.executorLock.Lock() + defer c.executorLock.Unlock() + + if _, ok := c.cronExecutorMap[cronFHPAKey]; ok { + return + } + + c.cronExecutorMap[cronFHPAKey] = make(map[string]RuleCron) +} + +func (c *CronHandler) RuleCronExecutorExists(cronFHPAKey string, + ruleName string) (autoscalingv1alpha1.CronFederatedHPARule, bool) { + c.executorLock.RLock() + defer c.executorLock.RUnlock() + + if _, ok := c.cronExecutorMap[cronFHPAKey]; !ok { + return autoscalingv1alpha1.CronFederatedHPARule{}, false + } + cronRule, exists := c.cronExecutorMap[cronFHPAKey][ruleName] + return cronRule.CronFederatedHPARule, exists +} + +// StopRuleExecutor stops the executor for specific CronFederatedHPA rule +func (c *CronHandler) StopRuleExecutor(cronFHPAKey string, ruleName string) { + c.executorLock.Lock() + defer c.executorLock.Unlock() + + if _, ok := c.cronExecutorMap[cronFHPAKey]; !ok { + return + } + if _, ok := c.cronExecutorMap[cronFHPAKey][ruleName]; !ok { + return + } + c.cronExecutorMap[cronFHPAKey][ruleName].Stop() + delete(c.cronExecutorMap[cronFHPAKey], ruleName) +} + +// StopCronFHPAExecutor stops the executor for specific CronFederatedHPA +func (c *CronHandler) StopCronFHPAExecutor(cronFHPAKey string) { + c.executorLock.Lock() + defer c.executorLock.Unlock() + + if _, ok := c.cronExecutorMap[cronFHPAKey]; !ok { + return + } + for _, scheduler := range c.cronExecutorMap[cronFHPAKey] { + scheduler.Stop() + } + + delete(c.cronExecutorMap, cronFHPAKey) +} + +// CreateCronJobForExecutor creates the executor for a rule of CronFederatedHPA +func (c *CronHandler) CreateCronJobForExecutor(cronFHPA *autoscalingv1alpha1.CronFederatedHPA, + rule autoscalingv1alpha1.CronFederatedHPARule) error { + var err error + timeZone := time.Now().Location() + + if rule.TimeZone != nil { + timeZone, err = time.LoadLocation(*rule.TimeZone) + if err != nil { + // This should not happen because there is validation in webhook + klog.Errorf("Invalid CronFederatedHPA(%s/%s) rule(%s) time zone(%s):%v", + cronFHPA.Namespace, cronFHPA.Namespace, rule.Name, *rule.TimeZone, err) + return err + } + } + + scheduler := gocron.NewScheduler(timeZone) + cronJob := NewCronFederatedHPAJob(c.client, c.eventRecorder, scheduler, cronFHPA, rule) + if _, err := scheduler.Cron(rule.Schedule).Do(RunCronFederatedHPARule, cronJob); err != nil { + klog.Errorf("Create cron job for CronFederatedHPA(%s/%s) rule(%s) error:%v", + cronFHPA.Namespace, cronFHPA.Name, rule.Name, err) + return err + } + scheduler.StartAsync() + + cronFHPAKey := helper.GetCronFederatedHPAKey(cronFHPA) + c.executorLock.Lock() + defer c.executorLock.Unlock() + ruleExecutorMap := c.cronExecutorMap[cronFHPAKey] + ruleExecutorMap[rule.Name] = RuleCron{Scheduler: scheduler, CronFederatedHPARule: rule} + return nil +} + +func (c *CronHandler) GetRuleNextExecuteTime(cronFHPA *autoscalingv1alpha1.CronFederatedHPA, ruleName string) (time.Time, error) { + c.executorLock.RLock() + defer c.executorLock.RUnlock() + + if _, ok := c.cronExecutorMap[helper.GetCronFederatedHPAKey(cronFHPA)]; !ok { + return time.Time{}, fmt.Errorf("CronFederatedHPA(%s/%s) not start", cronFHPA.Namespace, cronFHPA.Name) + } + + ruleCron, exists := c.cronExecutorMap[helper.GetCronFederatedHPAKey(cronFHPA)][ruleName] + if !exists { + return time.Time{}, fmt.Errorf("CronFederatedHPA(%s/%s) rule(%s) not exist", cronFHPA.Namespace, cronFHPA.Name, ruleName) + } + + _, next := ruleCron.Scheduler.NextRun() + return next, nil +} diff --git a/pkg/controllers/cronfederatedhpa/cronfederatedhpa_job.go b/pkg/controllers/cronfederatedhpa/cronfederatedhpa_job.go new file mode 100755 index 000000000000..3220f8233b12 --- /dev/null +++ b/pkg/controllers/cronfederatedhpa/cronfederatedhpa_job.go @@ -0,0 +1,322 @@ +/* +Copyright 2023 The Karmada Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cronfederatedhpa + +import ( + "context" + "fmt" + "time" + + "github.com/go-co-op/gocron" + autoscalingv1 "k8s.io/api/autoscaling/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/retry" + "k8s.io/klog/v2" + "sigs.k8s.io/controller-runtime/pkg/client" + + autoscalingv1alpha1 "github.com/karmada-io/karmada/pkg/apis/autoscaling/v1alpha1" + "github.com/karmada-io/karmada/pkg/util" + "github.com/karmada-io/karmada/pkg/util/helper" +) + +type CronFederatedHPAJob struct { + client client.Client + eventRecorder record.EventRecorder + scheduler *gocron.Scheduler + + namespaceName types.NamespacedName + rule autoscalingv1alpha1.CronFederatedHPARule +} + +func NewCronFederatedHPAJob(client client.Client, eventRecorder record.EventRecorder, scheduler *gocron.Scheduler, + cronFHPA *autoscalingv1alpha1.CronFederatedHPA, rule autoscalingv1alpha1.CronFederatedHPARule) *CronFederatedHPAJob { + return &CronFederatedHPAJob{ + client: client, + eventRecorder: eventRecorder, + scheduler: scheduler, + namespaceName: types.NamespacedName{ + Name: cronFHPA.Name, + Namespace: cronFHPA.Namespace, + }, + rule: rule, + } +} + +func RunCronFederatedHPARule(c *CronFederatedHPAJob) { + klog.V(4).Infof("Start to handle CronFederatedHPA %s", c.namespaceName) + defer klog.V(4).Infof("End to handle CronFederatedHPA %s", c.namespaceName) + + var err error + cronFHPA := &autoscalingv1alpha1.CronFederatedHPA{} + err = c.client.Get(context.TODO(), c.namespaceName, cronFHPA) + if err != nil { + if apierrors.IsNotFound(err) { + klog.Infof("CronFederatedHPA(%s) not found", c.namespaceName) + } else { + // TODO: This may happen when the the network is down, we should do something here + // But we are not sure what to do(retry not solve the problem) + klog.Errorf("Get CronFederatedHPA(%s) failed: %v", c.namespaceName, err) + } + return + } + + if helper.IsCronFederatedHPARuleSuspend(c.rule) { + // If the rule is suspended, this job will be stopped soon + klog.V(4).Infof("CronFederatedHPA(%s) Rule(%s) is suspended, skip it", c.namespaceName, c.rule.Name) + return + } + + var scaleErr error + defer func() { + if scaleErr != nil { + c.eventRecorder.Event(cronFHPA, corev1.EventTypeWarning, "ScaleFailed", scaleErr.Error()) + err = c.addFailedExecutionHistory(cronFHPA, scaleErr.Error()) + } else { + err = c.addSuccessExecutionHistory(cronFHPA, c.rule.TargetReplicas, c.rule.TargetMinReplicas, c.rule.TargetMaxReplicas) + } + if err != nil { + c.eventRecorder.Event(cronFHPA, corev1.EventTypeWarning, "UpdateStatusFailed", err.Error()) + } + }() + + if cronFHPA.Spec.ScaleTargetRef.APIVersion == autoscalingv1alpha1.GroupVersion.String() { + if cronFHPA.Spec.ScaleTargetRef.Kind != autoscalingv1alpha1.FederatedHPAKind { + scaleErr = fmt.Errorf("CronFederatedHPA(%s) do not support scale target %s/%s", + c.namespaceName, cronFHPA.Spec.ScaleTargetRef.APIVersion, cronFHPA.Spec.ScaleTargetRef.Kind) + return + } + + scaleErr = retry.RetryOnConflict(retry.DefaultRetry, func() (err error) { + err = c.ScaleFHPA(cronFHPA) + return err + }) + return + } + + // scale workload directly + scaleErr = retry.RetryOnConflict(retry.DefaultRetry, func() (err error) { + err = c.ScaleWorkloads(cronFHPA) + return err + }) +} + +func (c *CronFederatedHPAJob) ScaleFHPA(cronFHPA *autoscalingv1alpha1.CronFederatedHPA) error { + fhpaName := types.NamespacedName{ + Namespace: cronFHPA.Namespace, + Name: cronFHPA.Spec.ScaleTargetRef.Name, + } + + fhpa := &autoscalingv1alpha1.FederatedHPA{} + err := c.client.Get(context.TODO(), fhpaName, fhpa) + if err != nil { + return err + } + + update := false + if c.rule.TargetMaxReplicas != nil && fhpa.Spec.MaxReplicas != *c.rule.TargetMaxReplicas { + fhpa.Spec.MaxReplicas = *c.rule.TargetMaxReplicas + update = true + } + if c.rule.TargetMinReplicas != nil && *fhpa.Spec.MinReplicas != *c.rule.TargetMinReplicas { + *fhpa.Spec.MinReplicas = *c.rule.TargetMinReplicas + update = true + } + + if update { + err := c.client.Update(context.TODO(), fhpa) + if err != nil { + klog.Errorf("CronFederatedHPA(%s) updates FederatedHPA(%s/%s) failed: %v", + c.namespaceName, fhpa.Namespace, fhpa.Name, err) + return err + } + klog.V(4).Infof("CronFederatedHPA(%s) scales FederatedHPA(%s/%s) successfully", + c.namespaceName, fhpa.Namespace, fhpa.Name) + return nil + } + + klog.V(4).Infof("CronFederatedHPA(%s) find nothing updated for FederatedHPA(%s/%s), skip it", + c.namespaceName, fhpa.Namespace, fhpa.Name) + return nil +} + +func (c *CronFederatedHPAJob) ScaleWorkloads(cronFHPA *autoscalingv1alpha1.CronFederatedHPA) error { + ctx := context.Background() + + scaleClient := c.client.SubResource("scale") + + targetGV, err := schema.ParseGroupVersion(cronFHPA.Spec.ScaleTargetRef.APIVersion) + if err != nil { + klog.Errorf("CronFederatedHPA(%s) parses GroupVersion(%s) failed: %v", + c.namespaceName, cronFHPA.Spec.ScaleTargetRef.APIVersion, err) + return err + } + targetGVK := schema.GroupVersionKind{ + Group: targetGV.Group, + Kind: cronFHPA.Spec.ScaleTargetRef.Kind, + Version: targetGV.Version, + } + targetResource := &unstructured.Unstructured{} + targetResource.SetGroupVersionKind(targetGVK) + err = c.client.Get(ctx, types.NamespacedName{Namespace: cronFHPA.Namespace, Name: cronFHPA.Spec.ScaleTargetRef.Name}, targetResource) + if err != nil { + klog.Errorf("Get Resource(%s/%s) failed: %v", cronFHPA.Namespace, cronFHPA.Spec.ScaleTargetRef.Name, err) + return err + } + + scaleObj := &unstructured.Unstructured{} + err = scaleClient.Get(ctx, targetResource, scaleObj) + if err != nil { + klog.Errorf("Get Scale for resource(%s/%s) failed: %v", cronFHPA.Namespace, cronFHPA.Spec.ScaleTargetRef.Name, err) + return err + } + + scale := &autoscalingv1.Scale{} + err = helper.ConvertToTypedObject(scaleObj, scale) + if err != nil { + klog.Errorf("Convert Scale failed: %v", err) + return err + } + + if scale.Spec.Replicas != *c.rule.TargetReplicas { + if err := helper.ApplyReplica(scaleObj, int64(*c.rule.TargetReplicas), util.ReplicasField); err != nil { + klog.Errorf("CronFederatedHPA(%s) applies Replicas for %s/%s failed: %v", + c.namespaceName, cronFHPA.Namespace, cronFHPA.Spec.ScaleTargetRef.Name, err) + return err + } + err := scaleClient.Update(ctx, targetResource, client.WithSubResourceBody(scaleObj)) + if err != nil { + klog.Errorf("CronFederatedHPA(%s) updates scale resource failed: %v", c.namespaceName, err) + return err + } + klog.V(4).Infof("CronFederatedHPA(%s) scales resource(%s/%s) successfully", + c.namespaceName, cronFHPA.Namespace, cronFHPA.Spec.ScaleTargetRef.Name) + return nil + } + return nil +} + +func (c *CronFederatedHPAJob) addFailedExecutionHistory( + cronFHPA *autoscalingv1alpha1.CronFederatedHPA, errMsg string) error { + _, nextExecutionTime := c.scheduler.NextRun() + + // Add success history record, return false if there is no such rule + addFailedHistoryFunc := func() bool { + exists := false + for index, rule := range cronFHPA.Status.ExecutionHistories { + if rule.RuleName != c.rule.Name { + continue + } + failedExecution := autoscalingv1alpha1.FailedExecution{ + ScheduleTime: rule.NextExecutionTime, + ExecutionTime: &metav1.Time{Time: time.Now()}, + Message: errMsg, + } + historyLimits := helper.GetCronFederatedHPAFailedHistoryLimits(c.rule) + if len(rule.FailedExecutions) > historyLimits-1 { + rule.FailedExecutions = rule.FailedExecutions[:historyLimits-1] + } + cronFHPA.Status.ExecutionHistories[index].FailedExecutions = + append([]autoscalingv1alpha1.FailedExecution{failedExecution}, rule.FailedExecutions...) + cronFHPA.Status.ExecutionHistories[index].NextExecutionTime = &metav1.Time{Time: nextExecutionTime} + exists = true + break + } + + return exists + } + + return retry.RetryOnConflict(retry.DefaultRetry, func() (err error) { + // If this history not exist, it means the rule is suspended or deleted, so just ignore it. + if exists := addFailedHistoryFunc(); !exists { + return nil + } + + updateErr := c.client.Status().Update(context.Background(), cronFHPA) + if updateErr == nil { + klog.V(4).Infof("CronFederatedHPA(%s/%s) status has been updated successfully", cronFHPA.Namespace, cronFHPA.Name) + return nil + } + + updated := &autoscalingv1alpha1.CronFederatedHPA{} + if err = c.client.Get(context.Background(), client.ObjectKey{Namespace: cronFHPA.Namespace, Name: cronFHPA.Name}, updated); err == nil { + cronFHPA = updated + } else { + klog.Errorf("Get CronFederatedHPA(%s/%s) failed: %v", cronFHPA.Namespace, cronFHPA.Name, err) + } + return updateErr + }) +} + +func (c *CronFederatedHPAJob) addSuccessExecutionHistory( + cronFHPA *autoscalingv1alpha1.CronFederatedHPA, + appliedReplicas, appliedMaxReplicas, appliedMinReplicas *int32) error { + _, nextExecutionTime := c.scheduler.NextRun() + + // Add success history record, return false if there is no such rule + addSuccessHistoryFunc := func() bool { + exists := false + for index, rule := range cronFHPA.Status.ExecutionHistories { + if rule.RuleName != c.rule.Name { + continue + } + successExecution := autoscalingv1alpha1.SuccessfulExecution{ + ScheduleTime: rule.NextExecutionTime, + ExecutionTime: &metav1.Time{Time: time.Now()}, + AppliedReplicas: appliedReplicas, + AppliedMaxReplicas: appliedMaxReplicas, + AppliedMinReplicas: appliedMinReplicas, + } + historyLimits := helper.GetCronFederatedHPASuccessHistoryLimits(c.rule) + if len(rule.SuccessfulExecutions) > historyLimits-1 { + rule.SuccessfulExecutions = rule.SuccessfulExecutions[:historyLimits-1] + } + cronFHPA.Status.ExecutionHistories[index].SuccessfulExecutions = + append([]autoscalingv1alpha1.SuccessfulExecution{successExecution}, rule.SuccessfulExecutions...) + cronFHPA.Status.ExecutionHistories[index].NextExecutionTime = &metav1.Time{Time: nextExecutionTime} + exists = true + break + } + + return exists + } + + return retry.RetryOnConflict(retry.DefaultRetry, func() (err error) { + // If this history not exist, it means the rule deleted, so just ignore it. + if exists := addSuccessHistoryFunc(); !exists { + return nil + } + + updateErr := c.client.Status().Update(context.Background(), cronFHPA) + if updateErr == nil { + klog.V(4).Infof("CronFederatedHPA(%s/%s) status has been updated successfully", cronFHPA.Namespace, cronFHPA.Name) + return err + } + + updated := &autoscalingv1alpha1.CronFederatedHPA{} + if err = c.client.Get(context.Background(), client.ObjectKey{Namespace: cronFHPA.Namespace, Name: cronFHPA.Name}, updated); err == nil { + cronFHPA = updated + } else { + klog.Errorf("Get CronFederatedHPA(%s/%s) failed: %v", cronFHPA.Namespace, cronFHPA.Name, err) + } + return updateErr + }) +} diff --git a/pkg/util/apigroup.go b/pkg/util/apigroup.go index df95354587d1..7ce03948bddd 100644 --- a/pkg/util/apigroup.go +++ b/pkg/util/apigroup.go @@ -8,6 +8,7 @@ import ( eventsv1 "k8s.io/api/events/v1" "k8s.io/apimachinery/pkg/runtime/schema" + autoscalingv1alpha1 "github.com/karmada-io/karmada/pkg/apis/autoscaling/v1alpha1" clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1" configv1alpha1 "github.com/karmada-io/karmada/pkg/apis/config/v1alpha1" networkingv1alpha1 "github.com/karmada-io/karmada/pkg/apis/networking/v1alpha1" @@ -44,6 +45,7 @@ func NewSkippedResourceConfig() *SkippedResourceConfig { r.DisableGroup(workv1alpha1.GroupVersion.Group) r.DisableGroup(configv1alpha1.GroupVersion.Group) r.DisableGroup(networkingv1alpha1.GroupVersion.Group) + r.DisableGroup(autoscalingv1alpha1.GroupVersion.Group) // disable event by default r.DisableGroup(eventsv1.GroupName) r.DisableGroupVersionKind(corev1EventGVK) diff --git a/pkg/util/helper/cronfederatedhpa.go b/pkg/util/helper/cronfederatedhpa.go new file mode 100644 index 000000000000..2070fbc3b65f --- /dev/null +++ b/pkg/util/helper/cronfederatedhpa.go @@ -0,0 +1,48 @@ +/* +Copyright 2023 The Karmada Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package helper + +import ( + "k8s.io/apimachinery/pkg/types" + + autoscalingv1alpha1 "github.com/karmada-io/karmada/pkg/apis/autoscaling/v1alpha1" +) + +func IsCronFederatedHPARuleSuspend(rule autoscalingv1alpha1.CronFederatedHPARule) bool { + if rule.Suspend == nil { + return false + } + return *rule.Suspend +} + +func GetCronFederatedHPASuccessHistoryLimits(rule autoscalingv1alpha1.CronFederatedHPARule) int { + if rule.SuccessfulHistoryLimit == nil { + return 3 + } + return int(*rule.SuccessfulHistoryLimit) +} + +func GetCronFederatedHPAFailedHistoryLimits(rule autoscalingv1alpha1.CronFederatedHPARule) int { + if rule.FailedHistoryLimit == nil { + return 3 + } + return int(*rule.FailedHistoryLimit) +} + +func GetCronFederatedHPAKey(cronFHPA *autoscalingv1alpha1.CronFederatedHPA) string { + namespacedName := types.NamespacedName{Namespace: cronFHPA.Namespace, Name: cronFHPA.Name} + return namespacedName.String() +} diff --git a/pkg/webhook/cronfederatedhpa/validation.go b/pkg/webhook/cronfederatedhpa/validation.go new file mode 100755 index 000000000000..ccd923e5153d --- /dev/null +++ b/pkg/webhook/cronfederatedhpa/validation.go @@ -0,0 +1,133 @@ +/* +Copyright 2023 The Karmada Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cronfederatedhpa + +import ( + "context" + "fmt" + "net/http" + "time" + _ "time/tzdata" + + "github.com/adhocore/gronx" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/klog/v2" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + autoscalingv1alpha1 "github.com/karmada-io/karmada/pkg/apis/autoscaling/v1alpha1" +) + +// ValidatingAdmission validates CronFederatedHPA object when creating/updating. +type ValidatingAdmission struct { + decoder *admission.Decoder +} + +// Check if our ValidatingAdmission implements necessary interface +var _ admission.Handler = &ValidatingAdmission{} +var _ admission.DecoderInjector = &ValidatingAdmission{} + +// Handle implements admission.Handler interface. +// It yields a response to an AdmissionRequest. +func (v *ValidatingAdmission) Handle(_ context.Context, req admission.Request) admission.Response { + cronFHPA := &autoscalingv1alpha1.CronFederatedHPA{} + + err := v.decoder.Decode(req, cronFHPA) + if err != nil { + return admission.Errored(http.StatusBadRequest, err) + } + klog.V(2).Infof("Validating CronFederatedHPA(%s) for request: %s", klog.KObj(cronFHPA).String(), req.Operation) + + if errs := v.validateCronFederatedHPASpec(cronFHPA); len(errs) != 0 { + return admission.Denied(errs.ToAggregate().Error()) + } + + return admission.Allowed("") +} + +// InjectDecoder implements admission.DecoderInjector interface. +// A decoder will be automatically injected. +func (v *ValidatingAdmission) InjectDecoder(d *admission.Decoder) error { + v.decoder = d + return nil +} + +// validateCronFederatedHPASpec validates CronFederatedHPA spec +func (v *ValidatingAdmission) validateCronFederatedHPASpec(cronFHPA *autoscalingv1alpha1.CronFederatedHPA) field.ErrorList { + errs := field.ErrorList{} + scaleFHPA := false + + scaleTargetRef := cronFHPA.Spec.ScaleTargetRef + if scaleTargetRef.APIVersion == autoscalingv1alpha1.GroupVersion.String() { + if scaleTargetRef.Kind != autoscalingv1alpha1.FederatedHPAKind { + kindFieldPath := field.NewPath("spec").Child("scaleTargetRef").Child("kind") + fieldError := field.Invalid(kindFieldPath, scaleTargetRef.Kind, + fmt.Sprintf("invalid scaleTargetRef kind: %s, only support %s", scaleTargetRef.Kind, autoscalingv1alpha1.FederatedHPAKind)) + errs = append(errs, fieldError) + return errs + } + scaleFHPA = true + } + + errs = append(errs, v.validateCronFederatedHPARules(cronFHPA.Spec.Rules, scaleFHPA, scaleTargetRef.Kind)...) + + return errs +} + +// validateCronFederatedHPARules validates CronFederatedHPA rules +func (v *ValidatingAdmission) validateCronFederatedHPARules(rules []autoscalingv1alpha1.CronFederatedHPARule, + scaleFHPA bool, scaleTargetKind string) field.ErrorList { + errs := field.ErrorList{} + + ruleFieldPath := field.NewPath("spec").Child("rules") + ruleNameSet := sets.NewString() + for index, rule := range rules { + if ruleNameSet.Has(rule.Name) { + errs = append(errs, field.Duplicate(field.NewPath("spec"). + Child("rules").Index(index).Child("name"), rule.Name)) + } + ruleNameSet.Insert(rule.Name) + + // Validate cron format + cronValidator := gronx.New() + if !cronValidator.IsValid(rule.Schedule) { + errs = append(errs, field.Invalid(ruleFieldPath.Index(index).Child("schedule"), rule.Schedule, "invalid cron format")) + } + + // Validate timezone + if rule.TimeZone != nil { + _, err := time.LoadLocation(*rule.TimeZone) + if err != nil { + errs = append(errs, field.Invalid(ruleFieldPath.Index(index).Child("timeZone"), rule.TimeZone, err.Error())) + } + } + + if scaleFHPA { + // Validate targetMinReplicas and targetMaxReplicas + if rule.TargetMinReplicas == nil && rule.TargetMaxReplicas == nil { + errMsg := "targetMinReplicas and targetMaxReplicas cannot be nil at the same time if you want to scale FederatedHPA" + errs = append(errs, field.Invalid(ruleFieldPath.Index(index), "", errMsg)) + } + continue + } + + // Validate targetReplicas + if rule.TargetReplicas == nil { + errMsg := fmt.Sprintf("targetReplicas cannot be nil if you want to scale %s", scaleTargetKind) + errs = append(errs, field.Invalid(ruleFieldPath.Index(index), "", errMsg)) + } + } + + return errs +} diff --git a/vendor/github.com/adhocore/gronx/.editorconfig b/vendor/github.com/adhocore/gronx/.editorconfig new file mode 100644 index 000000000000..e0e124aee1d9 --- /dev/null +++ b/vendor/github.com/adhocore/gronx/.editorconfig @@ -0,0 +1,13 @@ +root = true + +[*] +indent_style = space +indent_size = 4 +end_of_line = lf +charset = utf-8 +trim_trailing_whitespace = true +insert_final_newline = true + +[*.go] +indent_style = tab +tab_width = 2 diff --git a/vendor/github.com/adhocore/gronx/.gitignore b/vendor/github.com/adhocore/gronx/.gitignore new file mode 100644 index 000000000000..12ae865c861f --- /dev/null +++ b/vendor/github.com/adhocore/gronx/.gitignore @@ -0,0 +1,11 @@ +.idea/ +.DS_Store +*~ +*.out +vendor/ +dist/ +.env +bin/ +*.php +test/*.go +*.txt diff --git a/vendor/github.com/adhocore/gronx/.goreleaser.yml b/vendor/github.com/adhocore/gronx/.goreleaser.yml new file mode 100644 index 000000000000..b52f6d99703e --- /dev/null +++ b/vendor/github.com/adhocore/gronx/.goreleaser.yml @@ -0,0 +1,67 @@ +project_name: tasker + +release: + prerelease: auto + name_template: "Version v{{.Version}}" + # draft: true + mode: "keep-existing" + +before: + hooks: + - go mod tidy + +builds: + - <<: &build_defaults + binary: bin/tasker + main: ./cmd/tasker + ldflags: + - -X main.Version={{.Version}} + env: + - CGO_ENABLED=0 + id: macOS + goos: [darwin] + goarch: [amd64, arm64] + + - <<: *build_defaults + id: linux + goos: [linux] + goarch: [386, arm, amd64, arm64] + + - <<: *build_defaults + id: windows + goos: [windows] + goarch: [amd64] + +archives: + - id: nix + builds: [macOS, linux] + <<: &archive_defaults + name_template: "{{ .ProjectName }}_{{ .Version }}_{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}" + wrap_in_directory: true + rlcp: true + format: tar.gz + files: + - LICENSE + + - id: windows + builds: [windows] + <<: *archive_defaults + wrap_in_directory: false + format: zip + files: + - LICENSE + +checksum: + name_template: 'checksums.txt' + algorithm: sha256 + +changelog: + skip: true + use: github + sort: desc + filters: + exclude: + - '^doc:' + - '^dev:' + - '^build:' + - '^ci:' diff --git a/vendor/github.com/adhocore/gronx/CHANGELOG.md b/vendor/github.com/adhocore/gronx/CHANGELOG.md new file mode 100644 index 000000000000..9224d685e909 --- /dev/null +++ b/vendor/github.com/adhocore/gronx/CHANGELOG.md @@ -0,0 +1,119 @@ +## [v0.2.7](https://github.com/adhocore/gronx/releases/tag/v0.2.7) (2022-06-28) + +### Miscellaneous +- **Workflow**: Run tests on 1.18x (Jitendra) +- Tests for go v1.17.x, add codecov (Jitendra) + + +## [v0.2.6](https://github.com/adhocore/gronx/releases/tag/v0.2.6) (2021-10-14) + +### Miscellaneous +- Fix 'with' languages (Jitendra Adhikari) [_a813b55_](https://github.com/adhocore/gronx/commit/a813b55) +- Init/setup github codeql (Jitendra Adhikari) [_fe2aa5a_](https://github.com/adhocore/gronx/commit/fe2aa5a) + + +## [v0.2.5](https://github.com/adhocore/gronx/releases/tag/v0.2.5) (2021-07-25) + +### Bug Fixes +- **Tasker**: The clause should be using OR (Jitendra Adhikari) [_b813b85_](https://github.com/adhocore/gronx/commit/b813b85) + + +## [v0.2.4](https://github.com/adhocore/gronx/releases/tag/v0.2.4) (2021-05-05) + +### Features +- **Pkg.tasker**: Capture cmd output in tasker logger, error in stderr (Jitendra Adhikari) [_0da0aae_](https://github.com/adhocore/gronx/commit/0da0aae) + +### Internal Refactors +- **Cmd.tasker**: Taskify is now method of tasker (Jitendra Adhikari) [_8b1373b_](https://github.com/adhocore/gronx/commit/8b1373b) + + +## [v0.2.3](https://github.com/adhocore/gronx/releases/tag/v0.2.3) (2021-05-04) + +### Bug Fixes +- **Pkg.tasker**: Sleep 100ms so abort can be bailed asap, remove dup msg (Jitendra Adhikari) [_d868920_](https://github.com/adhocore/gronx/commit/d868920) + +### Miscellaneous +- Allow leeway period at the end (Jitendra Adhikari) [_5ebf923_](https://github.com/adhocore/gronx/commit/5ebf923) + + +## [v0.2.2](https://github.com/adhocore/gronx/releases/tag/v0.2.2) (2021-05-03) + +### Bug Fixes +- **Pkg.tasker**: DoRun checks if timed out before run (Jitendra Adhikari) [_f27a657_](https://github.com/adhocore/gronx/commit/f27a657) + +### Internal Refactors +- **Pkg.tasker**: Use dateFormat var, update final tick phrase (Jitendra Adhikari) [_fad0271_](https://github.com/adhocore/gronx/commit/fad0271) + + +## [v0.2.1](https://github.com/adhocore/gronx/releases/tag/v0.2.1) (2021-05-02) + +### Bug Fixes +- **Pkg.tasker**: Deprecate sleep dur if next tick timeout (Jitendra Adhikari) [_3de45a1_](https://github.com/adhocore/gronx/commit/3de45a1) + + +## [v0.2.0](https://github.com/adhocore/gronx/releases/tag/v0.2.0) (2021-05-02) + +### Features +- **Cmd.tasker**: Add tasker for standalone usage as task daemon (Jitendra Adhikari) [_0d99409_](https://github.com/adhocore/gronx/commit/0d99409) +- **Pkg.tasker**: Add parser for tasker pkg (Jitendra Adhikari) [_e7f1811_](https://github.com/adhocore/gronx/commit/e7f1811) +- **Pkg.tasker**: Add tasker pkg (Jitendra Adhikari) [_a57b1c4_](https://github.com/adhocore/gronx/commit/a57b1c4) + +### Bug Fixes +- **Pkg.tasker**: Use log.New() instead (Jitendra Adhikari) [_0cf2c07_](https://github.com/adhocore/gronx/commit/0cf2c07) +- **Validator**: This check is not really required (Jitendra Adhikari) [_c3d75e3_](https://github.com/adhocore/gronx/commit/c3d75e3) + +### Internal Refactors +- **Gronx**: Add public methods for internal usage, expose spaceRe (Jitendra Adhikari) [_94eb20b_](https://github.com/adhocore/gronx/commit/94eb20b) + +### Miscellaneous +- **Pkg.tasker**: Use file perms as octal (Jitendra Adhikari) [_83f258d_](https://github.com/adhocore/gronx/commit/83f258d) +- **Workflow**: Include all tests in action (Jitendra Adhikari) [_7328cbf_](https://github.com/adhocore/gronx/commit/7328cbf) + +### Documentations +- Add task mangager and tasker docs/usages (Jitendra Adhikari) [_e77aa5f_](https://github.com/adhocore/gronx/commit/e77aa5f) + + +## [v0.1.4](https://github.com/adhocore/gronx/releases/tag/v0.1.4) (2021-04-25) + +### Miscellaneous +- **Mod**: 1.13 is okay too (Jitendra Adhikari) [_6c328e7_](https://github.com/adhocore/gronx/commit/6c328e7) +- Try go 1.13.x (Jitendra Adhikari) [_b017ec4_](https://github.com/adhocore/gronx/commit/b017ec4) + +### Documentations +- Practical usage (Jitendra Adhikari) [_9572e61_](https://github.com/adhocore/gronx/commit/9572e61) + + +## [v0.1.3](https://github.com/adhocore/gronx/releases/tag/v0.1.3) (2021-04-22) + +### Internal Refactors +- **Checker**: Preserve error, for pos 2 & 4 bail only on due or err (Jitendra Adhikari) [_39a9cd5_](https://github.com/adhocore/gronx/commit/39a9cd5) +- **Validator**: Do not discard error from strconv (Jitendra Adhikari) [_3b0f444_](https://github.com/adhocore/gronx/commit/3b0f444) + + +## [v0.1.2](https://github.com/adhocore/gronx/releases/tag/v0.1.2) (2021-04-21) + +### Features +- Add IsValid() (Jitendra Adhikari) [_150687b_](https://github.com/adhocore/gronx/commit/150687b) + +### Documentations +- IsValid usage (Jitendra Adhikari) [_b747116_](https://github.com/adhocore/gronx/commit/b747116) + + +## [v0.1.1](https://github.com/adhocore/gronx/releases/tag/v0.1.1) (2021-04-21) + +### Features +- Add main gronx api (Jitendra Adhikari) [_1b3b108_](https://github.com/adhocore/gronx/commit/1b3b108) +- Add cron segment checker (Jitendra Adhikari) [_a56be7c_](https://github.com/adhocore/gronx/commit/a56be7c) +- Add validator (Jitendra Adhikari) [_455a024_](https://github.com/adhocore/gronx/commit/455a024) + +### Miscellaneous +- **Workflow**: Update actions (Jitendra Adhikari) [_8b54cc3_](https://github.com/adhocore/gronx/commit/8b54cc3) +- Init module (Jitendra Adhikari) [_bada37d_](https://github.com/adhocore/gronx/commit/bada37d) +- Add license (Jitendra Adhikari) [_5f20b96_](https://github.com/adhocore/gronx/commit/5f20b96) +- **Gh**: Add meta files (Jitendra Adhikari) [_35a1310_](https://github.com/adhocore/gronx/commit/35a1310) +- **Workflow**: Add lint/test actions (Jitendra Adhikari) [_884d5cb_](https://github.com/adhocore/gronx/commit/884d5cb) +- Add editorconfig (Jitendra Adhikari) [_8b75494_](https://github.com/adhocore/gronx/commit/8b75494) + +### Documentations +- On cron expressions (Jitendra Adhikari) [_547fd72_](https://github.com/adhocore/gronx/commit/547fd72) +- Add readme (Jitendra Adhikari) [_3955e88_](https://github.com/adhocore/gronx/commit/3955e88) diff --git a/vendor/github.com/adhocore/gronx/LICENSE b/vendor/github.com/adhocore/gronx/LICENSE new file mode 100644 index 000000000000..f114876442ae --- /dev/null +++ b/vendor/github.com/adhocore/gronx/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021-2099 Jitendra Adhikari + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/adhocore/gronx/README.md b/vendor/github.com/adhocore/gronx/README.md new file mode 100644 index 000000000000..6093eb44a65d --- /dev/null +++ b/vendor/github.com/adhocore/gronx/README.md @@ -0,0 +1,328 @@ +# adhocore/gronx + +[![Latest Version](https://img.shields.io/github/release/adhocore/gronx.svg?style=flat-square)](https://github.com/adhocore/gronx/releases) +[![Software License](https://img.shields.io/badge/license-MIT-brightgreen.svg?style=flat-square)](LICENSE) +[![Go Report](https://goreportcard.com/badge/github.com/adhocore/gronx)](https://goreportcard.com/report/github.com/adhocore/gronx) +[![Test](https://github.com/adhocore/gronx/actions/workflows/test-action.yml/badge.svg)](https://github.com/adhocore/gronx/actions/workflows/test-action.yml) +[![Lint](https://github.com/adhocore/gronx/actions/workflows/lint-action.yml/badge.svg)](https://github.com/adhocore/gronx/actions/workflows/lint-action.yml) +[![Codecov](https://img.shields.io/codecov/c/github/adhocore/gronx/main.svg?style=flat-square)](https://codecov.io/gh/adhocore/gronx) +[![Support](https://img.shields.io/static/v1?label=Support&message=%E2%9D%A4&logo=GitHub)](https://github.com/sponsors/adhocore) +[![Tweet](https://img.shields.io/twitter/url/http/shields.io.svg?style=social)](https://twitter.com/intent/tweet?text=Lightweight+fast+and+deps+free+cron+expression+parser+for+Golang&url=https://github.com/adhocore/gronx&hashtags=go,golang,parser,cron,cronexpr,cronparser) + +`gronx` is Golang [cron expression](#cron-expression) parser ported from [adhocore/cron-expr](https://github.com/adhocore/php-cron-expr) with task runner +and daemon that supports crontab like task list file. Use it programatically in Golang or as standalone binary instead of crond. + +- Zero dependency. +- Very **fast** because it bails early in case a segment doesn't match. +- Built in crontab like daemon. +- Supports time granularity of Seconds. + +Find gronx in [pkg.go.dev](https://pkg.go.dev/github.com/adhocore/gronx). + +## Installation + +```sh +go get -u github.com/adhocore/gronx +``` + +## Usage + +```go +import ( + "time" + + "github.com/adhocore/gronx" +) + +gron := gronx.New() +expr := "* * * * *" + +// check if expr is even valid, returns bool +gron.IsValid(expr) // true + +// check if expr is due for current time, returns bool and error +gron.IsDue(expr) // true|false, nil + +// check if expr is due for given time +gron.IsDue(expr, time.Date(2021, time.April, 1, 1, 1, 0, 0, time.UTC)) // true|false, nil +``` + +### Batch Due Check + +If you have multiple cron expressions to check due on same reference time use `BatchDue()`: +```go +gron := gronx.New() +exprs := []string{"* * * * *", "0 */5 * * * *"} + +// gives []gronx.Expr{} array, each item has Due flag and Err enountered. +dues := gron.BatchDue(exprs) + +for _, expr := range dues { + if expr.Err != nil { + // Handle err + } else if expr.Due { + // Handle due + } +} + +// Or with given time +ref := time.Now() +gron.BatchDue(exprs, ref) +``` + +### Next Tick + +To find out when is the cron due next (in near future): +```go +allowCurrent = true // includes current time as well +nextTime, err := gron.NextTick(expr, allowCurrent) // gives time.Time, error + +// OR, next tick after certain reference time +refTime = time.Date(2022, time.November, 1, 1, 1, 0, 0, time.UTC) +allowCurrent = false // excludes the ref time +nextTime, err := gron.NextTickAfter(expr, refTime, allowCurrent) // gives time.Time, error +``` + +### Prev Tick + +To find out when was the cron due previously (in near past): +```go +allowCurrent = true // includes current time as well +prevTime, err := gron.PrevTick(expr, allowCurrent) // gives time.Time, error + +// OR, prev tick before certain reference time +refTime = time.Date(2022, time.November, 1, 1, 1, 0, 0, time.UTC) +allowCurrent = false // excludes the ref time +nextTime, err := gron.PrevTickBefore(expr, refTime, allowCurrent) // gives time.Time, error +``` + +> The working of `PrevTick*()` and `NextTick*()` are mostly the same except the direction. +> They differ in lookback or lookahead. + +### Standalone Daemon + +In a more practical level, you would use this tool to manage and invoke jobs in app itself and not +mess around with `crontab` for each and every new tasks/jobs. + +In crontab just put one entry with `* * * * *` which points to your Go entry point that uses this tool. +Then in that entry point you would invoke different tasks if the corresponding Cron expr is due. +Simple map structure would work for this. + +Check the section below for more sophisticated way of managing tasks automatically using `gronx` daemon called `tasker`. + +--- +### Go Tasker + +Tasker is a task manager that can be programatically used in Golang applications. It runs as a daemon and invokes tasks scheduled with cron expression: +```go +package main + +import ( + "context" + "time" + + "github.com/adhocore/gronx/pkg/tasker" +) + +func main() { + taskr := tasker.New(tasker.Option{ + Verbose: true, + // optional: defaults to local + Tz: "Asia/Bangkok", + // optional: defaults to stderr log stream + Out: "/full/path/to/output-file", + }) + + // add task to run every minute + taskr.Task("* * * * *", func(ctx context.Context) (int, error) { + // do something ... + + // then return exit code and error, for eg: if everything okay + return 0, nil + }).Task("*/5 * * * *", func(ctx context.Context) (int, error) { // every 5 minutes + // you can also log the output to Out file as configured in Option above: + taskr.Log.Printf("done something in %d s", 2) + + return 0, nil + }) + + // run task without overlap, set concurrent flag to false: + concurrent := false + taskr.Task("* * * * * *", , tasker.Taskify("sleep 2", tasker.Option{}), concurrent) + + // every 10 minute with arbitrary command + taskr.Task("@10minutes", taskr.Taskify("command --option val -- args", tasker.Option{Shell: "/bin/sh -c"})) + + // ... add more tasks + + // optionally if you want tasker to stop after 2 hour, pass the duration with Until(): + taskr.Until(2 * time.Hour) + + // finally run the tasker, it ticks sharply on every minute and runs all the tasks due on that time! + // it exits gracefully when ctrl+c is received making sure pending tasks are completed. + taskr.Run() +} +``` + +#### Concurrency + +By default the tasks can run concurrently i.e if previous run is still not finished +but it is now due again, it will run again. +If you want to run only one instance of a task at a time, set concurrent flag to false: + +```go +taskr := tasker.New(tasker.Option{}) + +concurrent := false +expr, task := "* * * * * *", tasker.Taskify("php -r 'sleep(2);'") +taskr.Task(expr, task, concurrent) +``` + +### Task Daemon + +It can also be used as standalone task daemon instead of programmatic usage for Golang application. + +First, just install tasker command: +```sh +go install github.com/adhocore/gronx/cmd/tasker@latest +``` + +Or you can also download latest prebuilt binary from [release](https://github.com/adhocore/gronx/releases/latest) for platform of your choice. + +Then prepare a taskfile ([example](./tests/../test/taskfile.txt)) in crontab format +(or can even point to existing crontab). +> `user` is not supported: it is just cron expr followed by the command. + +Finally run the task daemon like so +``` +tasker -file path/to/taskfile +``` +> You can pass more options to control the behavior of task daemon, see below. + +#### Tasker command options: + +```txt +-file string + The task file in crontab format +-out string + The fullpath to file where output from tasks are sent to +-shell string + The shell to use for running tasks (default "/usr/bin/bash") +-tz string + The timezone to use for tasks (default "Local") +-until int + The timeout for task daemon in minutes +-verbose + The verbose mode outputs as much as possible +``` + +Examples: +```sh +tasker -verbose -file path/to/taskfile -until 120 # run until next 120min (i.e 2hour) with all feedbacks echoed back +tasker -verbose -file path/to/taskfile -out path/to/output # with all feedbacks echoed to the output file +tasker -tz America/New_York -file path/to/taskfile -shell zsh # run all tasks using zsh shell based on NY timezone +``` + +> File extension of taskfile for (`-file` option) does not matter: can be any or none. +> The directory for outfile (`-out` option) must exist, file is created by task daemon. + +> Same timezone applies for all tasks currently and it might support overriding timezone per task in future release. + +#### Notes on Windows + +In Windows if it doesn't find `bash.exe` or `git-bash.exe` it will use `powershell`. +`powershell` may not be compatible with Unix flavored commands. Also to note: +you can't do chaining with `cmd1 && cmd2` but rather `cmd1 ; cmd2`. + +--- +### Cron Expression + +A complete cron expression consists of 7 segments viz: +``` + +``` + +However only 5 will do and this is most commonly used. 5 segments are interpreted as: +``` + +``` +in which case a default value of 0 is prepended for `` position. + +In a 6 segments expression, if 6th segment matches `` (i.e 4 digits at least) it will be interpreted as: +``` + +``` +and a default value of 0 is prepended for `` position. + +For each segments you can have **multiple choices** separated by comma: +> Eg: `0 0,30 * * * *` means either 0th or 30th minute. + +To specify **range of values** you can use dash: +> Eg: `0 10-15 * * * *` means 10th, 11th, 12th, 13th, 14th and 15th minute. + +To specify **range of step** you can combine a dash and slash: +> Eg: `0 10-15/2 * * * *` means every 2 minutes between 10 and 15 i.e 10th, 12th and 14th minute. + +For the `` and `` segment, there are additional [**modifiers**](#modifiers) (optional). + +And if you want, you can mix the multiple choices, ranges and steps in a single expression: +> `0 5,12-20/4,55 * * * *` matches if any one of `5` or `12-20/4` or `55` matches the minute. + +### Real Abbreviations + +You can use real abbreviations (3 chars) for month and week days. eg: `JAN`, `dec`, `fri`, `SUN` + +### Tags + +Following tags are available and they are converted to real cron expressions before parsing: + +- *@yearly* or *@annually* - every year +- *@monthly* - every month +- *@daily* - every day +- *@weekly* - every week +- *@hourly* - every hour +- *@5minutes* - every 5 minutes +- *@10minutes* - every 10 minutes +- *@15minutes* - every 15 minutes +- *@30minutes* - every 30 minutes +- *@always* - every minute +- *@everysecond* - every second + +> For BC reasons, `@always` still means every minute for now, in future release it may mean every seconds instead. + +```go +// Use tags like so: +gron.IsDue("@hourly") +gron.IsDue("@5minutes") +``` + +### Modifiers + +Following modifiers supported + +- *Day of Month / 3rd of 5 segments / 4th of 6+ segments:* + - `L` stands for last day of month (eg: `L` could mean 29th for February in leap year) + - `W` stands for closest week day (eg: `10W` is closest week days (MON-FRI) to 10th date) +- *Day of Week / 5th of 5 segments / 6th of 6+ segments:* + - `L` stands for last weekday of month (eg: `2L` is last monday) + - `#` stands for nth day of week in the month (eg: `1#2` is second sunday) + +--- +## License + +> © [MIT](./LICENSE) | 2021-2099, Jitendra Adhikari + +## Credits + +This project is ported from [adhocore/cron-expr](https://github.com/adhocore/php-cron-expr) and +release managed by [please](https://github.com/adhocore/please). + +--- +### Other projects + +My other golang projects you might find interesting and useful: + +- [**urlsh**](https://github.com/adhocore/urlsh) - URL shortener and bookmarker service with UI, API, Cache, Hits Counter and forwarder using postgres and redis in backend, bulma in frontend; has [web](https://urlssh.xyz) and cli client +- [**fast**](https://github.com/adhocore/fast) - Check your internet speed with ease and comfort right from the terminal +- [**goic**](https://github.com/adhocore/goic) - Go Open ID Connect, is OpenID connect client library for Golang, supports the Authorization Code Flow of OpenID Connect specification. +- [**chin**](https://github.com/adhocore/chin) - A Go lang command line tool to show a spinner as user waits for some long running jobs to finish. diff --git a/vendor/github.com/adhocore/gronx/VERSION b/vendor/github.com/adhocore/gronx/VERSION new file mode 100644 index 000000000000..34707cbb1632 --- /dev/null +++ b/vendor/github.com/adhocore/gronx/VERSION @@ -0,0 +1 @@ +v0.2.7 diff --git a/vendor/github.com/adhocore/gronx/batch.go b/vendor/github.com/adhocore/gronx/batch.go new file mode 100644 index 000000000000..63d85ec2d0ac --- /dev/null +++ b/vendor/github.com/adhocore/gronx/batch.go @@ -0,0 +1,51 @@ +package gronx + +import ( + "strings" + "time" +) + +// Expr represents an item in array for batch check +type Expr struct { + Expr string + Due bool + Err error +} + +// BatchDue checks if multiple expressions are due for given time (or now). +// It returns []Expr with filled in Due and Err values. +func (g *Gronx) BatchDue(exprs []string, ref ...time.Time) []Expr { + ref = append(ref, time.Now()) + g.C.SetRef(ref[0]) + + var segs []string + + cache, batch := map[string]Expr{}, make([]Expr, len(exprs)) + for i := range exprs { + batch[i].Expr = exprs[i] + segs, batch[i].Err = Segments(exprs[i]) + key := strings.Join(segs, " ") + if batch[i].Err != nil { + cache[key] = batch[i] + continue + } + + if c, ok := cache[key]; ok { + batch[i] = c + batch[i].Expr = exprs[i] + continue + } + + due := true + for pos, seg := range segs { + if seg != "*" && seg != "?" { + if due, batch[i].Err = g.C.CheckDue(seg, pos); !due || batch[i].Err != nil { + break + } + } + } + batch[i].Due = due + cache[key] = batch[i] + } + return batch +} diff --git a/vendor/github.com/adhocore/gronx/checker.go b/vendor/github.com/adhocore/gronx/checker.go new file mode 100644 index 000000000000..78fd0cca981f --- /dev/null +++ b/vendor/github.com/adhocore/gronx/checker.go @@ -0,0 +1,131 @@ +package gronx + +import ( + "fmt" + "strconv" + "strings" + "time" +) + +// Checker is interface for cron segment due check. +type Checker interface { + GetRef() time.Time + SetRef(ref time.Time) + CheckDue(segment string, pos int) (bool, error) +} + +// SegmentChecker is factory implementation of Checker. +type SegmentChecker struct { + ref time.Time +} + +// GetRef returns the current reference time +func (c *SegmentChecker) GetRef() time.Time { + return c.ref +} + +// SetRef sets the reference time for which to check if a cron expression is due. +func (c *SegmentChecker) SetRef(ref time.Time) { + c.ref = ref +} + +// CheckDue checks if the cron segment at given position is due. +// It returns bool or error if any. +func (c *SegmentChecker) CheckDue(segment string, pos int) (due bool, err error) { + ref, last := c.GetRef(), -1 + val, loc := valueByPos(ref, pos), ref.Location() + isMonth, isWeekDay := pos == 3, pos == 5 + + for _, offset := range strings.Split(segment, ",") { + mod := (isMonth || isWeekDay) && strings.ContainsAny(offset, "LW#") + if due, err = c.isOffsetDue(offset, val, pos); due || (!mod && err != nil) { + return + } + if !mod { + continue + } + if last == -1 { + last = time.Date(ref.Year(), ref.Month(), 1, 0, 0, 0, 0, loc).AddDate(0, 1, 0).Add(-time.Nanosecond).Day() + } + if isMonth { + due, err = isValidMonthDay(offset, last, ref) + } else if isWeekDay { + due, err = isValidWeekDay(offset, last, ref) + } + if due || err != nil { + return due, err + } + } + + return false, nil +} + +func (c *SegmentChecker) isOffsetDue(offset string, val, pos int) (bool, error) { + if offset == "*" || offset == "?" { + return true, nil + } + + bounds, isWeekDay := boundsByPos(pos), pos == 5 + if strings.Contains(offset, "/") { + return inStep(val, offset, bounds) + } + if strings.Contains(offset, "-") { + if isWeekDay { + offset = strings.Replace(offset, "7-", "0-", 1) + } + return inRange(val, offset, bounds) + } + + if !isWeekDay && (val == 0 || offset == "0") { + return offset == "0" && val == 0, nil + } + + nval, err := strconv.Atoi(offset) + if err != nil { + return false, err + } + if nval < bounds[0] || nval > bounds[1] { + return false, fmt.Errorf("segment#%d: '%s' out of bounds(%d, %d)", pos, offset, bounds[0], bounds[1]) + } + + return nval == val || (isWeekDay && nval == 7 && val == 0), nil +} + +func valueByPos(ref time.Time, pos int) (val int) { + switch pos { + case 0: + val = ref.Second() + case 1: + val = ref.Minute() + case 2: + val = ref.Hour() + case 3: + val = ref.Day() + case 4: + val = int(ref.Month()) + case 5: + val = int(ref.Weekday()) + case 6: + val = ref.Year() + } + return +} + +func boundsByPos(pos int) (bounds []int) { + bounds = []int{0, 0} + switch pos { + case 0, 1: + bounds = []int{0, 59} + case 2: + bounds = []int{0, 23} + case 3: + bounds = []int{1, 31} + case 4: + bounds = []int{1, 12} + case 5: + bounds = []int{0, 7} + case 6: + bounds = []int{1, 9999} + } + return +} diff --git a/vendor/github.com/adhocore/gronx/gronx.go b/vendor/github.com/adhocore/gronx/gronx.go new file mode 100644 index 000000000000..f86f904d5167 --- /dev/null +++ b/vendor/github.com/adhocore/gronx/gronx.go @@ -0,0 +1,128 @@ +package gronx + +import ( + "errors" + "regexp" + "strings" + "time" +) + +var literals = strings.NewReplacer( + "SUN", "0", "MON", "1", "TUE", "2", "WED", "3", "THU", "4", "FRI", "5", "SAT", "6", + "JAN", "1", "FEB", "2", "MAR", "3", "APR", "4", "MAY", "5", "JUN", "6", "JUL", "7", + "AUG", "8", "SEP", "9", "OCT", "10", "NOV", "11", "DEC", "12", +) + +var expressions = map[string]string{ + "@yearly": "0 0 1 1 *", + "@annually": "0 0 1 1 *", + "@monthly": "0 0 1 * *", + "@weekly": "0 0 * * 0", + "@daily": "0 0 * * *", + "@hourly": "0 * * * *", + "@always": "* * * * *", + "@5minutes": "*/5 * * * *", + "@10minutes": "*/10 * * * *", + "@15minutes": "*/15 * * * *", + "@30minutes": "0,30 * * * *", + + "@everysecond": "* * * * * *", +} + +// SpaceRe is regex for whitespace. +var SpaceRe = regexp.MustCompile(`\s+`) +var yearRe = regexp.MustCompile(`\d{4}`) + +func normalize(expr string) []string { + expr = strings.Trim(expr, " \t") + if e, ok := expressions[strings.ToLower(expr)]; ok { + expr = e + } + + expr = SpaceRe.ReplaceAllString(expr, " ") + expr = literals.Replace(strings.ToUpper(expr)) + + return strings.Split(strings.ReplaceAll(expr, " ", " "), " ") +} + +// Gronx is the main program. +type Gronx struct { + C Checker +} + +// New initializes Gronx with factory defaults. +func New() Gronx { + return Gronx{&SegmentChecker{}} +} + +// IsDue checks if cron expression is due for given reference time (or now). +// It returns bool or error if any. +func (g *Gronx) IsDue(expr string, ref ...time.Time) (bool, error) { + ref = append(ref, time.Now()) + g.C.SetRef(ref[0]) + + segs, err := Segments(expr) + if err != nil { + return false, err + } + + return g.SegmentsDue(segs) +} + +func (g *Gronx) isDue(expr string, ref time.Time) bool { + due, err := g.IsDue(expr, ref) + return err == nil && due +} + +// Segments splits expr into array array of cron parts. +// If expression contains 5 parts or 6th part is year like, it prepends a second. +// It returns array or error. +func Segments(expr string) ([]string, error) { + segs := normalize(expr) + slen := len(segs) + if slen < 5 || slen > 7 { + return []string{}, errors.New("expr should contain 5-7 segments separated by space") + } + + // Prepend second if required + prepend := slen == 5 || (slen == 6 && yearRe.MatchString(segs[5])) + if prepend { + segs = append([]string{"0"}, segs...) + } + + return segs, nil +} + +// SegmentsDue checks if all cron parts are due. +// It returns bool. You should use IsDue(expr) instead. +func (g *Gronx) SegmentsDue(segs []string) (bool, error) { + for pos, seg := range segs { + if seg == "*" || seg == "?" { + continue + } + + if due, err := g.C.CheckDue(seg, pos); !due { + return due, err + } + } + + return true, nil +} + +// IsValid checks if cron expression is valid. +// It returns bool. +func (g *Gronx) IsValid(expr string) bool { + segs, err := Segments(expr) + if err != nil { + return false + } + + g.C.SetRef(time.Now()) + for pos, seg := range segs { + if _, err := g.C.CheckDue(seg, pos); err != nil { + return false + } + } + + return true +} diff --git a/vendor/github.com/adhocore/gronx/next.go b/vendor/github.com/adhocore/gronx/next.go new file mode 100644 index 000000000000..9b940ffb549a --- /dev/null +++ b/vendor/github.com/adhocore/gronx/next.go @@ -0,0 +1,135 @@ +package gronx + +import ( + "errors" + "fmt" + "regexp" + "strconv" + "strings" + "time" +) + +// CronDateFormat is Y-m-d H:i (seconds are not significant) +const CronDateFormat = "2006-01-02 15:04" + +// FullDateFormat is Y-m-d H:i:s (with seconds) +const FullDateFormat = "2006-01-02 15:04:05" + +// NextTick gives next run time from now +func NextTick(expr string, inclRefTime bool) (time.Time, error) { + return NextTickAfter(expr, time.Now(), inclRefTime) +} + +// NextTickAfter gives next run time from the provided time.Time +func NextTickAfter(expr string, start time.Time, inclRefTime bool) (time.Time, error) { + gron, next := New(), start.Truncate(time.Second) + due, err := gron.IsDue(expr, start) + if err != nil || (due && inclRefTime) { + return start, err + } + + segments, _ := Segments(expr) + if len(segments) > 6 && isUnreachableYear(segments[6], next, inclRefTime, false) { + return next, fmt.Errorf("unreachable year segment: %s", segments[6]) + } + + next, err = loop(gron, segments, next, inclRefTime, false) + // Ignore superfluous err + if err != nil && gron.isDue(expr, next) { + err = nil + } + return next, err +} + +func loop(gron Gronx, segments []string, start time.Time, incl bool, reverse bool) (next time.Time, err error) { + iter, next, bumped := 500, start, false +over: + for iter > 0 { + iter-- + for pos, seg := range segments { + if seg == "*" || seg == "?" { + continue + } + if next, bumped, err = bumpUntilDue(gron.C, seg, pos, next, reverse); bumped { + goto over + } + } + if !incl && next.Format(FullDateFormat) == start.Format(FullDateFormat) { + delta := time.Second + if reverse { + delta = -time.Second + } + next, _, err = bumpUntilDue(gron.C, segments[0], 0, next.Add(delta), reverse) + continue + } + return + } + return start, errors.New("tried so hard") +} + +var dashRe = regexp.MustCompile(`/.*$`) + +func isUnreachableYear(year string, ref time.Time, incl bool, reverse bool) bool { + if year == "*" || year == "?" { + return false + } + + edge, inc := ref.Year(), 1 + if !incl { + if reverse { + inc = -1 + } + edge += inc + } + for _, offset := range strings.Split(year, ",") { + if strings.Index(offset, "*/") == 0 || strings.Index(offset, "0/") == 0 { + return false + } + for _, part := range strings.Split(dashRe.ReplaceAllString(offset, ""), "-") { + val, err := strconv.Atoi(part) + if err != nil || (!reverse && val >= edge) || (reverse && val < edge) { + return false + } + } + } + return true +} + +var limit = map[int]int{0: 60, 1: 60, 2: 24, 3: 31, 4: 12, 5: 366, 6: 100} + +func bumpUntilDue(c Checker, segment string, pos int, ref time.Time, reverse bool) (time.Time, bool, error) { + // + iter := limit[pos] + for iter > 0 { + c.SetRef(ref) + if ok, _ := c.CheckDue(segment, pos); ok { + return ref, iter != limit[pos], nil + } + ref = bump(ref, pos, reverse) + iter-- + } + return ref, false, errors.New("tried so hard") +} + +func bump(ref time.Time, pos int, reverse bool) time.Time { + factor := 1 + if reverse { + factor = -1 + } + + switch pos { + case 0: + ref = ref.Add(time.Duration(factor) * time.Second) + case 1: + ref = ref.Add(time.Duration(factor) * time.Minute) + case 2: + ref = ref.Add(time.Duration(factor) * time.Hour) + case 3, 5: + ref = ref.AddDate(0, 0, factor) + case 4: + ref = ref.AddDate(0, factor, 0) + case 6: + ref = ref.AddDate(factor, 0, 0) + } + return ref +} diff --git a/vendor/github.com/adhocore/gronx/prev.go b/vendor/github.com/adhocore/gronx/prev.go new file mode 100644 index 000000000000..d900bf73938e --- /dev/null +++ b/vendor/github.com/adhocore/gronx/prev.go @@ -0,0 +1,32 @@ +package gronx + +import ( + "fmt" + "time" +) + +// PrevTick gives previous run time before now +func PrevTick(expr string, inclRefTime bool) (time.Time, error) { + return PrevTickBefore(expr, time.Now(), inclRefTime) +} + +// PrevTickBefore gives previous run time before given reference time +func PrevTickBefore(expr string, start time.Time, inclRefTime bool) (time.Time, error) { + gron, prev := New(), start.Truncate(time.Second) + due, err := gron.IsDue(expr, start) + if err != nil || (due && inclRefTime) { + return prev, err + } + + segments, _ := Segments(expr) + if len(segments) > 6 && isUnreachableYear(segments[6], prev, inclRefTime, true) { + return prev, fmt.Errorf("unreachable year segment: %s", segments[6]) + } + + prev, err = loop(gron, segments, prev, inclRefTime, true) + // Ignore superfluous err + if err != nil && gron.isDue(expr, prev) { + err = nil + } + return prev, err +} diff --git a/vendor/github.com/adhocore/gronx/validator.go b/vendor/github.com/adhocore/gronx/validator.go new file mode 100644 index 000000000000..d772ad7fc592 --- /dev/null +++ b/vendor/github.com/adhocore/gronx/validator.go @@ -0,0 +1,146 @@ +package gronx + +import ( + "errors" + "fmt" + "strconv" + "strings" + "time" +) + +func inStep(val int, s string, bounds []int) (bool, error) { + parts := strings.Split(s, "/") + step, err := strconv.Atoi(parts[1]) + if err != nil { + return false, err + } + if step == 0 { + return false, errors.New("step can't be 0") + } + + if strings.Index(s, "*/") == 0 || strings.Index(s, "0/") == 0 { + return val%step == 0, nil + } + + sub, end := strings.Split(parts[0], "-"), val + start, err := strconv.Atoi(sub[0]) + if err != nil { + return false, err + } + + if len(sub) > 1 { + end, err = strconv.Atoi(sub[1]) + if err != nil { + return false, err + } + } + + if (len(sub) > 1 && end < start) || start < bounds[0] || end > bounds[1] { + return false, fmt.Errorf("step '%s' out of bounds(%d, %d)", parts[0], bounds[0], bounds[1]) + } + + return inStepRange(val, start, end, step), nil +} + +func inRange(val int, s string, bounds []int) (bool, error) { + parts := strings.Split(s, "-") + start, err := strconv.Atoi(parts[0]) + if err != nil { + return false, err + } + + end, err := strconv.Atoi(parts[1]) + if err != nil { + return false, err + } + + if end < start || start < bounds[0] || end > bounds[1] { + return false, fmt.Errorf("range '%s' out of bounds(%d, %d)", s, bounds[0], bounds[1]) + } + + return start <= val && val <= end, nil +} + +func inStepRange(val, start, end, step int) bool { + for i := start; i <= end && i <= val; i += step { + if i == val { + return true + } + } + return false +} + +func isValidMonthDay(val string, last int, ref time.Time) (valid bool, err error) { + day, loc := ref.Day(), ref.Location() + if val == "L" { + return day == last, nil + } + + pos := strings.Index(val, "W") + if pos < 1 { + return false, errors.New("invalid offset value: " + val) + } + + nval, err := strconv.Atoi(val[0:pos]) + if err != nil { + return false, err + } + + for _, i := range []int{0, -1, 1, -2, 2} { + incr := i + nval + if incr > 0 && incr <= last { + iref := time.Date(ref.Year(), ref.Month(), incr, ref.Hour(), ref.Minute(), ref.Second(), 0, loc) + week := int(iref.Weekday()) + + if week > 0 && week < 6 && iref.Month() == ref.Month() { + valid = day == iref.Day() + break + } + } + } + + return valid, nil +} + +func isValidWeekDay(val string, last int, ref time.Time) (bool, error) { + loc := ref.Location() + if pos := strings.Index(strings.ReplaceAll(val, "7L", "0L"), "L"); pos > 0 { + nval, err := strconv.Atoi(val[0:pos]) + if err != nil { + return false, err + } + + for i := 0; i < 7; i++ { + decr := last - i + dref := time.Date(ref.Year(), ref.Month(), decr, ref.Hour(), ref.Minute(), ref.Second(), ref.Nanosecond(), loc) + + if int(dref.Weekday()) == nval { + return ref.Day() == decr, nil + } + } + + return false, nil + } + + pos := strings.Index(val, "#") + parts := strings.Split(strings.ReplaceAll(val, "7#", "0#"), "#") + if pos < 1 || len(parts) < 2 { + return false, errors.New("invalid offset value: " + val) + } + + day, err := strconv.Atoi(parts[0]) + if err != nil { + return false, err + } + + nth, err := strconv.Atoi(parts[1]) + if err != nil { + return false, err + } + + if day < 0 || day > 7 || nth < 1 || nth > 5 || int(ref.Weekday()) != day { + return false, nil + } + + return ref.Day()/7 == nth-1, nil +} diff --git a/vendor/github.com/go-co-op/gocron/.gitignore b/vendor/github.com/go-co-op/gocron/.gitignore new file mode 100644 index 000000000000..f6409f90d104 --- /dev/null +++ b/vendor/github.com/go-co-op/gocron/.gitignore @@ -0,0 +1,19 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test +local_testing + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +vendor/ + +# IDE project files +.idea diff --git a/vendor/github.com/go-co-op/gocron/.golangci.yaml b/vendor/github.com/go-co-op/gocron/.golangci.yaml new file mode 100644 index 000000000000..7c86335f52ff --- /dev/null +++ b/vendor/github.com/go-co-op/gocron/.golangci.yaml @@ -0,0 +1,50 @@ +run: + timeout: 2m + issues-exit-code: 1 + tests: true + +issues: + max-same-issues: 100 + exclude-rules: + - path: _test\.go + linters: + - bodyclose + - errcheck + - gosec + +linters: + enable: + - bodyclose + - errcheck + - gofmt + - gofumpt + - goimports + - gosec + - gosimple + - govet + - ineffassign + - misspell + - revive + - staticcheck + - typecheck + - unused + +output: + # colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number" + format: colored-line-number + # print lines of code with issue, default is true + print-issued-lines: true + # print linter name in the end of issue text, default is true + print-linter-name: true + # make issues output unique by line, default is true + uniq-by-line: true + # add a prefix to the output file references; default is no prefix + path-prefix: "" + # sorts results by: filepath, line and column + sort-results: true + +linters-settings: + golint: + min-confidence: 0.8 + +fix: true diff --git a/vendor/github.com/go-co-op/gocron/CODE_OF_CONDUCT.md b/vendor/github.com/go-co-op/gocron/CODE_OF_CONDUCT.md new file mode 100644 index 000000000000..7d913b55b867 --- /dev/null +++ b/vendor/github.com/go-co-op/gocron/CODE_OF_CONDUCT.md @@ -0,0 +1,73 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone. And we mean everyone! + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and kind language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or + advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team initially on Slack to coordinate private communication. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html + +[homepage]: https://www.contributor-covenant.org + +For answers to common questions about this code of conduct, see +https://www.contributor-covenant.org/faq diff --git a/vendor/github.com/go-co-op/gocron/CONTRIBUTING.md b/vendor/github.com/go-co-op/gocron/CONTRIBUTING.md new file mode 100644 index 000000000000..b2d3be83f6fd --- /dev/null +++ b/vendor/github.com/go-co-op/gocron/CONTRIBUTING.md @@ -0,0 +1,40 @@ +# Contributing to gocron + +Thank you for coming to contribute to gocron! We welcome new ideas, PRs and general feedback. + +## Reporting Bugs + +If you find a bug then please let the project know by opening an issue after doing the following: + +- Do a quick search of the existing issues to make sure the bug isn't already reported +- Try and make a minimal list of steps that can reliably reproduce the bug you are experiencing +- Collect as much information as you can to help identify what the issue is (project version, configuration files, etc) + +## Suggesting Enhancements + +If you have a use case that you don't see a way to support yet, we would welcome the feedback in an issue. Before opening the issue, please consider: + +- Is this a common use case? +- Is it simple to understand? + +You can help us out by doing the following before raising a new issue: + +- Check that the feature hasn't been requested already by searching existing issues +- Try and reduce your enhancement into a single, concise and deliverable request, rather than a general idea +- Explain your own use cases as the basis of the request + +## Adding Features + +Pull requests are always welcome. However, before going through the trouble of implementing a change it's worth creating a bug or feature request issue. +This allows us to discuss the changes and make sure they are a good fit for the project. + +Please always make sure a pull request has been: + +- Unit tested with `make test` +- Linted with `make lint` +- Vetted with `make vet` +- Formatted with `make fmt` or validated with `make check-fmt` + +## Writing Tests + +Tests should follow the [table driven test pattern](https://dave.cheney.net/2013/06/09/writing-table-driven-tests-in-go). See other tests in the code base for additional examples. diff --git a/vendor/github.com/go-co-op/gocron/LICENSE b/vendor/github.com/go-co-op/gocron/LICENSE new file mode 100644 index 000000000000..3357d57d7e03 --- /dev/null +++ b/vendor/github.com/go-co-op/gocron/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2014, 辣椒面 + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/go-co-op/gocron/Makefile b/vendor/github.com/go-co-op/gocron/Makefile new file mode 100644 index 000000000000..1e16aef6e3a2 --- /dev/null +++ b/vendor/github.com/go-co-op/gocron/Makefile @@ -0,0 +1,13 @@ +.PHONY: fmt check-fmt lint vet test + +GO_PKGS := $(shell go list -f {{.Dir}} ./...) + +fmt: + @go list -f {{.Dir}} ./... | xargs -I{} gofmt -w -s {} + +lint: + @grep "^func " example_test.go | sort -c + @golangci-lint run + +test: + @go test -race -v $(GO_FLAGS) -count=1 $(GO_PKGS) diff --git a/vendor/github.com/go-co-op/gocron/README.md b/vendor/github.com/go-co-op/gocron/README.md new file mode 100644 index 000000000000..59f49190694e --- /dev/null +++ b/vendor/github.com/go-co-op/gocron/README.md @@ -0,0 +1,205 @@ +# gocron: A Golang Job Scheduling Package. + +[![CI State](https://github.com/go-co-op/gocron/actions/workflows/go_test.yml/badge.svg?branch=main&event=push)](https://github.com/go-co-op/gocron/actions) +![Go Report Card](https://goreportcard.com/badge/github.com/go-co-op/gocron) [![Go Doc](https://godoc.org/github.com/go-co-op/gocron?status.svg)](https://pkg.go.dev/github.com/go-co-op/gocron) + +gocron is a job scheduling package which lets you run Go functions at pre-determined intervals +using a simple, human-friendly syntax. + +gocron is a Golang scheduler implementation similar to the Ruby module +[clockwork](https://github.com/tomykaira/clockwork) and the Python job scheduling package [schedule](https://github.com/dbader/schedule). + +See also these two great articles that were used for design input: + +- [Rethinking Cron](http://adam.herokuapp.com/past/2010/4/13/rethinking_cron/) +- [Replace Cron with Clockwork](http://adam.herokuapp.com/past/2010/6/30/replace_cron_with_clockwork/) + +If you want to chat, you can find us at Slack! +[](https://gophers.slack.com/archives/CQ7T0T1FW) + +## Concepts + +- **Scheduler**: The scheduler tracks all the jobs assigned to it and makes sure they are passed to the executor when + ready to be run. The scheduler is able to manage overall aspects of job behavior like limiting how many jobs + are running at one time. +- **Job**: The job is simply aware of the task (go function) it's provided and is therefore only able to perform + actions related to that task like preventing itself from overruning a previous task that is taking a long time. +- **Executor**: The executor, as it's name suggests, is simply responsible for calling the task (go function) that + the job hands to it when sent by the scheduler. + +## Examples + +```golang +s := gocron.NewScheduler(time.UTC) + +// Every starts the job immediately and then runs at the +// specified interval +job, err := s.Every(5).Seconds().Do(func(){ ... }) +if err != nil { + // handle the error related to setting up the job +} + +// strings parse to duration +s.Every("5m").Do(func(){ ... }) + +s.Every(5).Days().Do(func(){ ... }) + +s.Every(1).Month(1, 2, 3).Do(func(){ ... }) + +// set time +s.Every(1).Day().At("10:30").Do(func(){ ... }) + +// set multiple times +s.Every(1).Day().At("10:30;08:00").Do(func(){ ... }) + +s.Every(1).Day().At("10:30").At("08:00").Do(func(){ ... }) + +// Schedule each last day of the month +s.Every(1).MonthLastDay().Do(func(){ ... }) + +// Or each last day of every other month +s.Every(2).MonthLastDay().Do(func(){ ... }) + +// cron expressions supported +s.Cron("*/1 * * * *").Do(task) // every minute + +// cron second-level expressions supported +s.CronWithSeconds("*/1 * * * * *").Do(task) // every second + +// you can start running the scheduler in two different ways: +// starts the scheduler asynchronously +s.StartAsync() +// starts the scheduler and blocks current execution path +s.StartBlocking() +``` + +For more examples, take a look in our [go docs](https://pkg.go.dev/github.com/go-co-op/gocron#pkg-examples) + +## Options + +| Interval | Supported schedule options | +| ------------ | ------------------------------------------------------------------- | +| sub-second | `StartAt()` | +| milliseconds | `StartAt()` | +| seconds | `StartAt()` | +| minutes | `StartAt()` | +| hours | `StartAt()` | +| days | `StartAt()`, `At()` | +| weeks | `StartAt()`, `At()`, `Weekday()` (and all week day named functions) | +| months | `StartAt()`, `At()` | + +There are several options available to restrict how jobs run: + +| Mode | Function | Behavior | +|---------------------|---------------------------|------------------------------------------------------------------------------------------------------| +| Default | | jobs are rescheduled at every interval | +| Job singleton | `SingletonMode()` | a long running job will not be rescheduled until the current run is completed | +| Scheduler limit | `SetMaxConcurrentJobs()` | set a collective maximum number of concurrent jobs running across the scheduler | +| Distributed locking | `WithDistributedLocker()` | prevents the same job from being run more than once when running multiple instances of the scheduler | + +## Distributed Locker Implementations + +- Redis: [redislock](https://github.com/go-co-op/gocron-redis-lock) `go get github.com/go-co-op/gocron-redis-lock` + +## Tags + +Jobs may have arbitrary tags added which can be useful when tracking many jobs. +The scheduler supports both enforcing tags to be unique and when not unique, +running all jobs with a given tag. + +```golang +s := gocron.NewScheduler(time.UTC) +s.TagsUnique() + +_, _ = s.Every(1).Week().Tag("foo").Do(task) +_, err := s.Every(1).Week().Tag("foo").Do(task) +// error!!! + +s := gocron.NewScheduler(time.UTC) + +s.Every(2).Day().Tag("tag").At("10:00").Do(task) +s.Every(1).Minute().Tag("tag").Do(task) +s.RunByTag("tag") +// both jobs will run +``` + +## FAQ + +- Q: I'm running multiple pods on a distributed environment. How can I make a job not run once per pod causing duplication? + - We recommend using your own lock solution within the jobs themselves (you could use [Redis](https://redis.io/topics/distlock), for example) + - A2: Use the scheduler option `WithDistributedLocker` and either use an implemented [backend](#distributed-locker-implementations) + or implement your own and contribute it back in a PR! + +- Q: I've removed my job from the scheduler, but how can I stop a long-running job that has already been triggered? + - A: We recommend using a means of canceling your job, e.g. a `context.WithCancel()`. + - A2: You can listen to the job context Done channel to know when the job has been canceled + ```golang + task := func(in string, job gocron.Job) { + fmt.Printf("this job's last run: %s this job's next run: %s\n", job.LastRun(), job.NextRun()) + fmt.Printf("in argument is %s\n", in) + + ticker := time.NewTicker(100 * time.Millisecond) + defer ticker.Stop() + + for { + select { + case <-job.Context().Done(): + fmt.Printf("function has been canceled, performing cleanup and exiting gracefully\n") + return + case <-ticker.C: + fmt.Printf("performing a hard job that takes a long time that I want to kill whenever I want\n") + } + } + } + + var err error + s := gocron.NewScheduler(time.UTC) + s.SingletonModeAll() + j, err := s.Every(1).Hour().Tag("myJob").DoWithJobDetails(task, "foo") + if err != nil { + log.Fatalln("error scheduling job", err) + } + + s.StartAsync() + + // Simulate some more work + time.Sleep(time.Second) + + // I want to stop the job, together with the underlying goroutine + fmt.Printf("now I want to kill the job\n") + err = s.RemoveByTag("myJob") + if err != nil { + log.Fatalln("error removing job by tag", err) + } + + // Wait a bit so that we can see that the job is exiting gracefully + time.Sleep(time.Second) + fmt.Printf("Job: %#v, Error: %#v", j, err) + ``` + +--- + +Looking to contribute? Try to follow these guidelines: + +- Use issues for everything +- For a small change, just send a PR! +- For bigger changes, please open an issue for discussion before sending a PR. +- PRs should have: tests, documentation and examples (if it makes sense) +- You can also contribute by: + - Reporting issues + - Suggesting new features or enhancements + - Improving/fixing documentation + +--- + +## Design + +![design-diagram](https://user-images.githubusercontent.com/19351306/110375142-2ba88680-8017-11eb-80c3-554cc746b165.png) + +[Jetbrains](https://www.jetbrains.com/?from=gocron) supports this project with GoLand licenses. We appreciate their support for free and open source software! + +## Star History + +[![Star History Chart](https://api.star-history.com/svg?repos=go-co-op/gocron&type=Date)](https://star-history.com/#go-co-op/gocron&Date) + + diff --git a/vendor/github.com/go-co-op/gocron/SECURITY.md b/vendor/github.com/go-co-op/gocron/SECURITY.md new file mode 100644 index 000000000000..6b9864123ff8 --- /dev/null +++ b/vendor/github.com/go-co-op/gocron/SECURITY.md @@ -0,0 +1,15 @@ +# Security Policy + +## Supported Versions + +The current plan is to maintain version 1 as long as possible incorporating any necessary security patches. + +| Version | Supported | +| ------- | ------------------ | +| 1.x.x | :white_check_mark: | + +## Reporting a Vulnerability + +Vulnerabilities can be reported by [opening an issue](https://github.com/go-co-op/gocron/issues/new/choose) or reaching out on Slack: [](https://gophers.slack.com/archives/CQ7T0T1FW) + +We will do our best to addrerss any vulnerabilities in an expeditious manner. diff --git a/vendor/github.com/go-co-op/gocron/executor.go b/vendor/github.com/go-co-op/gocron/executor.go new file mode 100644 index 000000000000..76d2352adab2 --- /dev/null +++ b/vendor/github.com/go-co-op/gocron/executor.go @@ -0,0 +1,273 @@ +package gocron + +import ( + "context" + "sync" + "time" + + "go.uber.org/atomic" +) + +const ( + // RescheduleMode - the default is that if a limit on maximum + // concurrent jobs is set and the limit is reached, a job will + // skip it's run and try again on the next occurrence in the schedule + RescheduleMode limitMode = iota + + // WaitMode - if a limit on maximum concurrent jobs is set + // and the limit is reached, a job will wait to try and run + // until a spot in the limit is freed up. + // + // Note: this mode can produce unpredictable results as + // job execution order isn't guaranteed. For example, a job that + // executes frequently may pile up in the wait queue and be executed + // many times back to back when the queue opens. + // + // Warning: do not use this mode if your jobs will continue to stack + // up beyond the ability of the limit workers to keep up. An example of + // what NOT to do: + // + // s.Every("1s").Do(func() { + // // this will result in an ever-growing number of goroutines + // // blocked trying to send to the buffered channel + // time.Sleep(10 * time.Minute) + // }) + + WaitMode +) + +type executor struct { + jobFunctions chan jobFunction // the chan upon which the jobFunctions are passed in from the scheduler + ctx context.Context // used to tell the executor to stop + cancel context.CancelFunc // used to tell the executor to stop + wg *sync.WaitGroup // used by the scheduler to wait for the executor to stop + jobsWg *sync.WaitGroup // used by the executor to wait for all jobs to finish + singletonWgs *sync.Map // used by the executor to wait for the singleton runners to complete + skipExecution *atomic.Bool // used to pause the execution of jobs + + limitMode limitMode // when SetMaxConcurrentJobs() is set upon the scheduler + limitModeMaxRunningJobs int // stores the maximum number of concurrently running jobs + limitModeFuncsRunning *atomic.Int64 // tracks the count of limited mode funcs running + limitModeFuncWg *sync.WaitGroup // allow the executor to wait for limit mode functions to wrap up + limitModeQueue chan jobFunction // pass job functions to the limit mode workers + limitModeQueueMu *sync.Mutex // for protecting the limitModeQueue + limitModeRunningJobs *atomic.Int64 // tracks the count of running jobs to check against the max + stopped *atomic.Bool // allow workers to drain the buffered limitModeQueue + + distributedLocker Locker // support running jobs across multiple instances +} + +func newExecutor() executor { + e := executor{ + jobFunctions: make(chan jobFunction, 1), + singletonWgs: &sync.Map{}, + limitModeFuncsRunning: atomic.NewInt64(0), + limitModeFuncWg: &sync.WaitGroup{}, + limitModeRunningJobs: atomic.NewInt64(0), + limitModeQueueMu: &sync.Mutex{}, + } + return e +} + +func runJob(f jobFunction) { + f.runStartCount.Add(1) + f.isRunning.Store(true) + callJobFunc(f.eventListeners.onBeforeJobExecution) + _ = callJobFuncWithParams(f.eventListeners.beforeJobRuns, []interface{}{f.getName()}) + err := callJobFuncWithParams(f.function, f.parameters) + if err != nil { + _ = callJobFuncWithParams(f.eventListeners.onError, []interface{}{f.getName(), err}) + } else { + _ = callJobFuncWithParams(f.eventListeners.noError, []interface{}{f.getName()}) + } + _ = callJobFuncWithParams(f.eventListeners.afterJobRuns, []interface{}{f.getName()}) + callJobFunc(f.eventListeners.onAfterJobExecution) + f.isRunning.Store(false) + f.runFinishCount.Add(1) +} + +func (jf *jobFunction) singletonRunner() { + jf.singletonRunnerOn.Store(true) + jf.singletonWgMu.Lock() + jf.singletonWg.Add(1) + jf.singletonWgMu.Unlock() + for { + select { + case <-jf.ctx.Done(): + jf.singletonWg.Done() + jf.singletonRunnerOn.Store(false) + jf.singletonQueueMu.Lock() + jf.singletonQueue = make(chan struct{}, 1000) + jf.singletonQueueMu.Unlock() + jf.stopped.Store(false) + return + case <-jf.singletonQueue: + if !jf.stopped.Load() { + runJob(*jf) + } + } + } +} + +func (e *executor) limitModeRunner() { + for { + select { + case <-e.ctx.Done(): + e.limitModeFuncsRunning.Inc() + e.limitModeFuncWg.Done() + return + case jf := <-e.limitModeQueue: + if !e.stopped.Load() { + e.runJob(jf) + } + } + } +} + +func (e *executor) start() { + e.wg = &sync.WaitGroup{} + e.wg.Add(1) + + stopCtx, cancel := context.WithCancel(context.Background()) + e.ctx = stopCtx + e.cancel = cancel + + e.jobsWg = &sync.WaitGroup{} + + e.stopped = atomic.NewBool(false) + e.skipExecution = atomic.NewBool(false) + + e.limitModeQueueMu.Lock() + e.limitModeQueue = make(chan jobFunction, 1000) + e.limitModeQueueMu.Unlock() + go e.run() +} + +func (e *executor) runJob(f jobFunction) { + switch f.runConfig.mode { + case defaultMode: + lockKey := f.jobName + if lockKey == "" { + lockKey = f.funcName + } + if e.distributedLocker != nil { + l, err := e.distributedLocker.Lock(f.ctx, lockKey) + if err != nil || l == nil { + return + } + defer func() { + durationToNextRun := time.Until(f.jobFuncNextRun) + if durationToNextRun > time.Second*5 { + durationToNextRun = time.Second * 5 + } + if durationToNextRun > time.Millisecond*100 { + timer := time.NewTimer(time.Duration(float64(durationToNextRun) * 0.9)) + defer timer.Stop() + + select { + case <-e.ctx.Done(): + case <-timer.C: + } + } + _ = l.Unlock(f.ctx) + }() + } + runJob(f) + case singletonMode: + e.singletonWgs.Store(f.singletonWg, f.singletonWgMu) + + if !f.singletonRunnerOn.Load() { + go f.singletonRunner() + } + f.singletonQueueMu.Lock() + f.singletonQueue <- struct{}{} + f.singletonQueueMu.Unlock() + } +} + +func (e *executor) run() { + for { + select { + case f := <-e.jobFunctions: + if e.stopped.Load() || e.skipExecution.Load() { + continue + } + + if e.limitModeMaxRunningJobs > 0 { + countRunning := e.limitModeFuncsRunning.Load() + if countRunning < int64(e.limitModeMaxRunningJobs) { + diff := int64(e.limitModeMaxRunningJobs) - countRunning + for i := int64(0); i < diff; i++ { + e.limitModeFuncWg.Add(1) + go e.limitModeRunner() + e.limitModeFuncsRunning.Inc() + } + } + } + + e.jobsWg.Add(1) + go func() { + defer e.jobsWg.Done() + + panicHandlerMutex.RLock() + defer panicHandlerMutex.RUnlock() + + if panicHandler != nil { + defer func() { + if r := recover(); r != nil { + panicHandler(f.funcName, r) + } + }() + } + + if e.limitModeMaxRunningJobs > 0 { + switch e.limitMode { + case RescheduleMode: + if e.limitModeRunningJobs.Load() < int64(e.limitModeMaxRunningJobs) { + select { + case e.limitModeQueue <- f: + case <-e.ctx.Done(): + } + } + case WaitMode: + select { + case e.limitModeQueue <- f: + case <-e.ctx.Done(): + } + } + return + } + + e.runJob(f) + }() + case <-e.ctx.Done(): + e.jobsWg.Wait() + e.wg.Done() + return + } + } +} + +func (e *executor) stop() { + e.stopped.Store(true) + e.cancel() + e.wg.Wait() + if e.singletonWgs != nil { + e.singletonWgs.Range(func(key, value interface{}) bool { + wg, wgOk := key.(*sync.WaitGroup) + mu, muOk := value.(*sync.Mutex) + if wgOk && muOk { + mu.Lock() + wg.Wait() + mu.Unlock() + } + return true + }) + } + if e.limitModeMaxRunningJobs > 0 { + e.limitModeFuncWg.Wait() + e.limitModeQueueMu.Lock() + e.limitModeQueue = nil + e.limitModeQueueMu.Unlock() + } +} diff --git a/vendor/github.com/go-co-op/gocron/gocron.go b/vendor/github.com/go-co-op/gocron/gocron.go new file mode 100644 index 000000000000..8ca2499268ed --- /dev/null +++ b/vendor/github.com/go-co-op/gocron/gocron.go @@ -0,0 +1,146 @@ +// Package gocron : A Golang Job Scheduling Package. +// +// An in-process scheduler for periodic jobs that uses the builder pattern +// for configuration. gocron lets you run Golang functions periodically +// at pre-determined intervals using a simple, human-friendly syntax. +package gocron + +import ( + "errors" + "fmt" + "reflect" + "regexp" + "runtime" + "sync" + "time" +) + +// PanicHandlerFunc represents a type that can be set to handle panics occurring +// during job execution. +type PanicHandlerFunc func(jobName string, recoverData interface{}) + +// The global panic handler +var ( + panicHandler PanicHandlerFunc + panicHandlerMutex = sync.RWMutex{} +) + +// SetPanicHandler sets the global panicHandler to the given function. +// Leaving it nil or setting it to nil disables automatic panic handling. +// If the panicHandler is not nil, any panic that occurs during executing a job will be recovered +// and the panicHandlerFunc will be called with the job's funcName and the recover data. +func SetPanicHandler(handler PanicHandlerFunc) { + panicHandlerMutex.Lock() + defer panicHandlerMutex.Unlock() + panicHandler = handler +} + +// Error declarations for gocron related errors +var ( + ErrNotAFunction = errors.New("gocron: only functions can be scheduled into the job queue") + ErrNotScheduledWeekday = errors.New("gocron: job not scheduled weekly on a weekday") + ErrJobNotFoundWithTag = errors.New("gocron: no jobs found with given tag") + ErrUnsupportedTimeFormat = errors.New("gocron: the given time format is not supported") + ErrInvalidInterval = errors.New("gocron: .Every() interval must be greater than 0") + ErrInvalidIntervalType = errors.New("gocron: .Every() interval must be int, time.Duration, or string") + ErrInvalidIntervalUnitsSelection = errors.New("gocron: .Every(time.Duration) and .Cron() cannot be used with units (e.g. .Seconds())") + ErrInvalidFunctionParameters = errors.New("gocron: length of function parameters must match job function parameters") + + ErrAtTimeNotSupported = errors.New("gocron: the At() method is not supported for this time unit") + ErrWeekdayNotSupported = errors.New("gocron: weekday is not supported for time unit") + ErrInvalidDayOfMonthEntry = errors.New("gocron: only days 1 through 28 are allowed for monthly schedules") + ErrTagsUnique = func(tag string) error { return fmt.Errorf("gocron: a non-unique tag was set on the job: %s", tag) } + ErrWrongParams = errors.New("gocron: wrong list of params") + ErrDoWithJobDetails = errors.New("gocron: DoWithJobDetails expects a function whose last parameter is a gocron.Job") + ErrUpdateCalledWithoutJob = errors.New("gocron: a call to Scheduler.Update() requires a call to Scheduler.Job() first") + ErrCronParseFailure = errors.New("gocron: cron expression failed to be parsed") + ErrInvalidDaysOfMonthDuplicateValue = errors.New("gocron: duplicate days of month is not allowed in Month() and Months() methods") +) + +func wrapOrError(toWrap error, err error) error { + var returnErr error + if toWrap != nil && !errors.Is(err, toWrap) { + returnErr = fmt.Errorf("%s: %w", err, toWrap) + } else { + returnErr = err + } + return returnErr +} + +// regex patterns for supported time formats +var ( + timeWithSeconds = regexp.MustCompile(`(?m)^\d{1,2}:\d\d:\d\d$`) + timeWithoutSeconds = regexp.MustCompile(`(?m)^\d{1,2}:\d\d$`) +) + +type schedulingUnit int + +const ( + // default unit is seconds + milliseconds schedulingUnit = iota + seconds + minutes + hours + days + weeks + months + duration + crontab +) + +func callJobFunc(jobFunc interface{}) { + if jobFunc == nil { + return + } + f := reflect.ValueOf(jobFunc) + if !f.IsZero() { + f.Call([]reflect.Value{}) + } +} + +func callJobFuncWithParams(jobFunc interface{}, params []interface{}) error { + if jobFunc == nil { + return nil + } + f := reflect.ValueOf(jobFunc) + if f.IsZero() { + return nil + } + if len(params) != f.Type().NumIn() { + return nil + } + in := make([]reflect.Value, len(params)) + for k, param := range params { + in[k] = reflect.ValueOf(param) + } + vals := f.Call(in) + for _, val := range vals { + i := val.Interface() + if err, ok := i.(error); ok { + return err + } + } + return nil +} + +func getFunctionName(fn interface{}) string { + return runtime.FuncForPC(reflect.ValueOf(fn).Pointer()).Name() +} + +func parseTime(t string) (hour, min, sec int, err error) { + var timeLayout string + switch { + case timeWithSeconds.Match([]byte(t)): + timeLayout = "15:04:05" + case timeWithoutSeconds.Match([]byte(t)): + timeLayout = "15:04" + default: + return 0, 0, 0, ErrUnsupportedTimeFormat + } + + parsedTime, err := time.Parse(timeLayout, t) + if err != nil { + return 0, 0, 0, ErrUnsupportedTimeFormat + } + return parsedTime.Hour(), parsedTime.Minute(), parsedTime.Second(), nil +} diff --git a/vendor/github.com/go-co-op/gocron/job.go b/vendor/github.com/go-co-op/gocron/job.go new file mode 100644 index 000000000000..4744bac04cd2 --- /dev/null +++ b/vendor/github.com/go-co-op/gocron/job.go @@ -0,0 +1,602 @@ +package gocron + +import ( + "context" + "fmt" + "math/rand" + "sort" + "sync" + "time" + + "github.com/robfig/cron/v3" + "go.uber.org/atomic" +) + +// Job struct stores the information necessary to run a Job +type Job struct { + mu *jobMutex + jobFunction + interval int // interval * unit between runs + random // details for randomness + duration time.Duration // time duration between runs + unit schedulingUnit // time units, e.g. 'minutes', 'hours'... + startsImmediately bool // if the Job should run upon scheduler start + atTimes []time.Duration // optional time(s) at which this Job runs when interval is day + startAtTime time.Time // optional time at which the Job starts + error error // error related to Job + + scheduledWeekdays []time.Weekday // Specific days of the week to start on + daysOfTheMonth []int // Specific days of the month to run the job + tags []string // allow the user to tag Jobs with certain labels + timer *time.Timer // handles running tasks at specific time + cronSchedule cron.Schedule // stores the schedule when a task uses cron + runWithDetails bool // when true the job is passed as the last arg of the jobFunc +} + +type jobRunTimes struct { + jobRunTimesMu *sync.Mutex + previousRun time.Time // datetime of the run before last run + lastRun time.Time // datetime of last run + nextRun time.Time // datetime of next run +} + +type random struct { + rand *rand.Rand + randomizeInterval bool // whether the interval is random + randomIntervalRange [2]int // random interval range +} + +type jobFunction struct { + *jobRunTimes // tracking all the markers for job run times + eventListeners // additional functions to allow run 'em during job performing + function interface{} // task's function + parameters []interface{} // task's function parameters + parametersLen int // length of the passed parameters + jobName string // key of the distributed lock + funcName string // the name of the function - e.g. main.func1 + runConfig runConfig // configuration for how many times to run the job + singletonQueueMu *sync.Mutex // mutex for singletonQueue + singletonQueue chan struct{} // queues jobs for the singleton runner to handle + singletonRunnerOn *atomic.Bool // whether the runner function for singleton is running + ctx context.Context // for cancellation + cancel context.CancelFunc // for cancellation + isRunning *atomic.Bool // whether the job func is currently being run + runStartCount *atomic.Int64 // number of times the job was started + runFinishCount *atomic.Int64 // number of times the job was finished + singletonWg *sync.WaitGroup // used by singleton runner + singletonWgMu *sync.Mutex // use to protect the singletonWg + stopped *atomic.Bool // tracks whether the job is currently stopped + jobFuncNextRun time.Time // the next time the job is scheduled to run +} + +type eventListeners struct { + onAfterJobExecution interface{} // deprecated + onBeforeJobExecution interface{} // deprecated + beforeJobRuns func(jobName string) // called before the job executes + afterJobRuns func(jobName string) // called after the job executes + onError func(jobName string, err error) // called when the job returns an error + noError func(jobName string) // called when no error is returned +} + +type jobMutex struct { + sync.RWMutex +} + +func (jf *jobFunction) copy() jobFunction { + cp := jobFunction{ + jobRunTimes: jf.jobRunTimes, + eventListeners: jf.eventListeners, + function: jf.function, + parameters: nil, + parametersLen: jf.parametersLen, + funcName: jf.funcName, + jobName: jf.jobName, + runConfig: jf.runConfig, + singletonQueue: jf.singletonQueue, + singletonQueueMu: jf.singletonQueueMu, + ctx: jf.ctx, + cancel: jf.cancel, + isRunning: jf.isRunning, + runStartCount: jf.runStartCount, + runFinishCount: jf.runFinishCount, + singletonWg: jf.singletonWg, + singletonWgMu: jf.singletonWgMu, + singletonRunnerOn: jf.singletonRunnerOn, + stopped: jf.stopped, + jobFuncNextRun: jf.jobFuncNextRun, + } + cp.parameters = append(cp.parameters, jf.parameters...) + return cp +} + +func (jf *jobFunction) getName() string { + if jf.jobName != "" { + return jf.jobName + } + return jf.funcName +} + +type runConfig struct { + finiteRuns bool + maxRuns int + mode mode +} + +// mode is the Job's running mode +type mode int8 + +const ( + // defaultMode disable any mode + defaultMode mode = iota + + // singletonMode switch to single job mode + singletonMode +) + +// newJob creates a new Job with the provided interval +func newJob(interval int, startImmediately bool, singletonMode bool) *Job { + ctx, cancel := context.WithCancel(context.Background()) + job := &Job{ + mu: &jobMutex{}, + interval: interval, + unit: seconds, + jobFunction: jobFunction{ + jobRunTimes: &jobRunTimes{ + jobRunTimesMu: &sync.Mutex{}, + lastRun: time.Time{}, + nextRun: time.Time{}, + }, + ctx: ctx, + cancel: cancel, + isRunning: atomic.NewBool(false), + runStartCount: atomic.NewInt64(0), + runFinishCount: atomic.NewInt64(0), + singletonRunnerOn: atomic.NewBool(false), + stopped: atomic.NewBool(false), + }, + tags: []string{}, + startsImmediately: startImmediately, + } + if singletonMode { + job.SingletonMode() + } + return job +} + +// Name sets the name of the current job. +// +// If the scheduler is running using WithDistributedLocker(), +// the job name is used as the distributed lock key. +func (j *Job) Name(name string) { + j.mu.Lock() + defer j.mu.Unlock() + j.jobName = name +} + +func (j *Job) setRandomInterval(a, b int) { + j.random.rand = rand.New(rand.NewSource(time.Now().UnixNano())) // nolint + + j.random.randomizeInterval = true + if a < b { + j.random.randomIntervalRange[0] = a + j.random.randomIntervalRange[1] = b + 1 + } else { + j.random.randomIntervalRange[0] = b + j.random.randomIntervalRange[1] = a + 1 + } +} + +func (j *Job) getRandomInterval() int { + randNum := j.rand.Intn(j.randomIntervalRange[1] - j.randomIntervalRange[0]) + return j.randomIntervalRange[0] + randNum +} + +func (j *Job) getInterval() int { + if j.randomizeInterval { + return j.getRandomInterval() + } + return j.interval +} + +func (j *Job) neverRan() bool { + jobLastRun := j.LastRun() + return jobLastRun.IsZero() +} + +func (j *Job) getStartsImmediately() bool { + return j.startsImmediately +} + +func (j *Job) setStartsImmediately(b bool) { + j.startsImmediately = b +} + +func (j *Job) setTimer(t *time.Timer) { + j.mu.Lock() + defer j.mu.Unlock() + j.timer = t +} + +func (j *Job) getFirstAtTime() time.Duration { + var t time.Duration + if len(j.atTimes) > 0 { + t = j.atTimes[0] + } + + return t +} + +func (j *Job) getAtTime(lastRun time.Time) time.Duration { + var r time.Duration + if len(j.atTimes) == 0 { + return r + } + + if len(j.atTimes) == 1 { + return j.atTimes[0] + } + + if lastRun.IsZero() { + r = j.atTimes[0] + } else { + for _, d := range j.atTimes { + nt := time.Date(lastRun.Year(), lastRun.Month(), lastRun.Day(), 0, 0, 0, 0, lastRun.Location()).Add(d) + if nt.After(lastRun) { + r = d + break + } + } + } + + return r +} + +func (j *Job) addAtTime(t time.Duration) { + if len(j.atTimes) == 0 { + j.atTimes = append(j.atTimes, t) + return + } + exist := false + index := sort.Search(len(j.atTimes), func(i int) bool { + atTime := j.atTimes[i] + b := atTime >= t + if b { + exist = atTime == t + } + return b + }) + + // ignore if present + if exist { + return + } + + j.atTimes = append(j.atTimes, time.Duration(0)) + copy(j.atTimes[index+1:], j.atTimes[index:]) + j.atTimes[index] = t +} + +func (j *Job) getStartAtTime() time.Time { + j.mu.RLock() + defer j.mu.RUnlock() + return j.startAtTime +} + +func (j *Job) setStartAtTime(t time.Time) { + j.mu.Lock() + defer j.mu.Unlock() + j.startAtTime = t +} + +func (j *Job) getUnit() schedulingUnit { + j.mu.RLock() + defer j.mu.RUnlock() + return j.unit +} + +func (j *Job) setUnit(t schedulingUnit) { + j.mu.Lock() + defer j.mu.Unlock() + j.unit = t +} + +func (j *Job) getDuration() time.Duration { + j.mu.RLock() + defer j.mu.RUnlock() + return j.duration +} + +func (j *Job) setDuration(t time.Duration) { + j.mu.Lock() + defer j.mu.Unlock() + j.duration = t +} + +// hasTags returns true if all tags are matched on this Job +func (j *Job) hasTags(tags ...string) bool { + // Build map of all Job tags for easy comparison + jobTags := map[string]int{} + for _, tag := range j.tags { + jobTags[tag] = 0 + } + + // Loop through required tags and if one doesn't exist, return false + for _, tag := range tags { + _, ok := jobTags[tag] + if !ok { + return false + } + } + return true +} + +// Error returns an error if one occurred while creating the Job. +// If multiple errors occurred, they will be wrapped and can be +// checked using the standard unwrap options. +func (j *Job) Error() error { + return j.error +} + +// Context returns the job's context. The context controls cancellation. +func (j *Job) Context() context.Context { + return j.ctx +} + +// Tag allows you to add arbitrary labels to a Job that do not +// impact the functionality of the Job +func (j *Job) Tag(tags ...string) { + j.tags = append(j.tags, tags...) +} + +// Untag removes a tag from a Job +func (j *Job) Untag(t string) { + var newTags []string + for _, tag := range j.tags { + if t != tag { + newTags = append(newTags, tag) + } + } + + j.tags = newTags +} + +// Tags returns the tags attached to the Job +func (j *Job) Tags() []string { + return j.tags +} + +// EventListener functions utilize the job's name and are triggered +// by or in the condition that the name suggests +type EventListener func(j *Job) + +// BeforeJobRuns is called before the job is run +func BeforeJobRuns(eventListenerFunc func(jobName string)) EventListener { + return func(j *Job) { + j.mu.Lock() + defer j.mu.Unlock() + j.eventListeners.beforeJobRuns = eventListenerFunc + } +} + +// AfterJobRuns is called after the job is run +// This is called even when an error is returned +func AfterJobRuns(eventListenerFunc func(jobName string)) EventListener { + return func(j *Job) { + j.mu.Lock() + defer j.mu.Unlock() + j.eventListeners.afterJobRuns = eventListenerFunc + } +} + +// WhenJobReturnsError is called when the job returns an error +func WhenJobReturnsError(eventListenerFunc func(jobName string, err error)) EventListener { + return func(j *Job) { + j.mu.Lock() + defer j.mu.Unlock() + j.eventListeners.onError = eventListenerFunc + } +} + +// WhenJobReturnsNoError is called when the job does not return an error +// the function must accept a single parameter, which is an error +func WhenJobReturnsNoError(eventListenerFunc func(jobName string)) EventListener { + return func(j *Job) { + j.mu.Lock() + defer j.mu.Unlock() + j.eventListeners.noError = eventListenerFunc + } +} + +// RegisterEventListeners accepts EventListeners and registers them for the job +// The event listeners are then called at the times described by each listener. +func (j *Job) RegisterEventListeners(eventListeners ...EventListener) { + for _, el := range eventListeners { + el(j) + } +} + +// Deprecated: SetEventListeners accepts two functions that will be called, one before and one after the job is run +func (j *Job) SetEventListeners(onBeforeJobExecution interface{}, onAfterJobExecution interface{}) { + j.eventListeners = eventListeners{ + onBeforeJobExecution: onBeforeJobExecution, + onAfterJobExecution: onAfterJobExecution, + } +} + +// ScheduledTime returns the time of the Job's next scheduled run +func (j *Job) ScheduledTime() time.Time { + j.mu.RLock() + defer j.mu.RUnlock() + return j.nextRun +} + +// ScheduledAtTime returns the specific time of day the Job will run at. +// If multiple times are set, the earliest time will be returned. +func (j *Job) ScheduledAtTime() string { + if len(j.atTimes) == 0 { + return "00:00" + } + + return fmt.Sprintf("%02d:%02d", j.getFirstAtTime()/time.Hour, (j.getFirstAtTime()%time.Hour)/time.Minute) +} + +// ScheduledAtTimes returns the specific times of day the Job will run at +func (j *Job) ScheduledAtTimes() []string { + r := make([]string, len(j.atTimes)) + for i, t := range j.atTimes { + r[i] = fmt.Sprintf("%02d:%02d", t/time.Hour, (t%time.Hour)/time.Minute) + } + + return r +} + +// Weekday returns which day of the week the Job will run on and +// will return an error if the Job is not scheduled weekly +func (j *Job) Weekday() (time.Weekday, error) { + if len(j.scheduledWeekdays) == 0 { + return time.Sunday, ErrNotScheduledWeekday + } + return j.scheduledWeekdays[0], nil +} + +// Weekdays returns a slice of time.Weekday that the Job will run in a week and +// will return an error if the Job is not scheduled weekly +func (j *Job) Weekdays() []time.Weekday { + // appending on j.scheduledWeekdays may cause a side effect + if len(j.scheduledWeekdays) == 0 { + return []time.Weekday{time.Sunday} + } + + return j.scheduledWeekdays +} + +// LimitRunsTo limits the number of executions of this job to n. +// Upon reaching the limit, the job is removed from the scheduler. +// +// Note: If a job is added to a running scheduler and this method is then used +// you may see the job run more than the set limit as job is scheduled immediately +// by default upon being added to the scheduler. It is recommended to use the +// LimitRunsTo() func on the scheduler chain when scheduling the job. +// For example: scheduler.LimitRunsTo(1).Do() +func (j *Job) LimitRunsTo(n int) { + j.mu.Lock() + defer j.mu.Unlock() + j.runConfig.finiteRuns = true + j.runConfig.maxRuns = n +} + +// SingletonMode prevents a new job from starting if the prior job has not yet +// completed it's run +// Note: If a job is added to a running scheduler and this method is then used +// you may see the job run overrun itself as job is scheduled immediately +// by default upon being added to the scheduler. It is recommended to use the +// SingletonMode() func on the scheduler chain when scheduling the job. +func (j *Job) SingletonMode() { + j.mu.Lock() + defer j.mu.Unlock() + j.runConfig.mode = singletonMode + + j.jobFunction.singletonWgMu = &sync.Mutex{} + j.jobFunction.singletonWgMu.Lock() + j.jobFunction.singletonWg = &sync.WaitGroup{} + j.jobFunction.singletonWgMu.Unlock() + + j.jobFunction.singletonQueueMu = &sync.Mutex{} + j.jobFunction.singletonQueueMu.Lock() + j.jobFunction.singletonQueue = make(chan struct{}, 100) + j.jobFunction.singletonQueueMu.Unlock() +} + +// shouldRun evaluates if this job should run again +// based on the runConfig +func (j *Job) shouldRun() bool { + j.mu.RLock() + defer j.mu.RUnlock() + return !j.runConfig.finiteRuns || j.runStartCount.Load() < int64(j.runConfig.maxRuns) +} + +// LastRun returns the time the job was run last +func (j *Job) LastRun() time.Time { + j.jobRunTimesMu.Lock() + defer j.jobRunTimesMu.Unlock() + return j.lastRun +} + +func (j *Job) setLastRun(t time.Time) { + j.previousRun = j.lastRun + j.lastRun = t +} + +// NextRun returns the time the job will run next +func (j *Job) NextRun() time.Time { + j.jobRunTimesMu.Lock() + defer j.jobRunTimesMu.Unlock() + return j.nextRun +} + +func (j *Job) setNextRun(t time.Time) { + j.jobRunTimesMu.Lock() + defer j.jobRunTimesMu.Unlock() + j.nextRun = t + j.jobFunction.jobFuncNextRun = t +} + +// PreviousRun returns the job run time previous to LastRun +func (j *Job) PreviousRun() time.Time { + j.jobRunTimesMu.Lock() + defer j.jobRunTimesMu.Unlock() + return j.previousRun +} + +// RunCount returns the number of times the job has been started +func (j *Job) RunCount() int { + j.mu.Lock() + defer j.mu.Unlock() + return int(j.runStartCount.Load()) +} + +// FinishedRunCount returns the number of times the job has finished running +func (j *Job) FinishedRunCount() int { + j.mu.Lock() + defer j.mu.Unlock() + return int(j.runFinishCount.Load()) +} + +func (j *Job) stop() { + j.mu.Lock() + defer j.mu.Unlock() + if j.timer != nil { + j.timer.Stop() + } + if j.cancel != nil { + j.cancel() + j.ctx, j.cancel = context.WithCancel(context.Background()) + } + j.stopped.Store(true) +} + +// IsRunning reports whether any instances of the job function are currently running +func (j *Job) IsRunning() bool { + return j.isRunning.Load() +} + +// you must Lock the job before calling copy +func (j *Job) copy() Job { + return Job{ + mu: &jobMutex{}, + jobFunction: j.jobFunction, + interval: j.interval, + duration: j.duration, + unit: j.unit, + startsImmediately: j.startsImmediately, + atTimes: j.atTimes, + startAtTime: j.startAtTime, + error: j.error, + scheduledWeekdays: j.scheduledWeekdays, + daysOfTheMonth: j.daysOfTheMonth, + tags: j.tags, + timer: j.timer, + cronSchedule: j.cronSchedule, + runWithDetails: j.runWithDetails, + } +} diff --git a/vendor/github.com/go-co-op/gocron/locker.go b/vendor/github.com/go-co-op/gocron/locker.go new file mode 100644 index 000000000000..dc713f9b3ce4 --- /dev/null +++ b/vendor/github.com/go-co-op/gocron/locker.go @@ -0,0 +1,23 @@ +package gocron + +import ( + "context" + "errors" +) + +var ( + ErrFailedToConnectToRedis = errors.New("gocron: failed to connect to redis") + ErrFailedToObtainLock = errors.New("gocron: failed to obtain lock") + ErrFailedToReleaseLock = errors.New("gocron: failed to release lock") +) + +// Locker represents the required interface to lock jobs when running multiple schedulers. +type Locker interface { + // Lock if an error is returned by lock, the job will not be scheduled. + Lock(ctx context.Context, key string) (Lock, error) +} + +// Lock represents an obtained lock +type Lock interface { + Unlock(ctx context.Context) error +} diff --git a/vendor/github.com/go-co-op/gocron/scheduler.go b/vendor/github.com/go-co-op/gocron/scheduler.go new file mode 100644 index 000000000000..9d7f6147be88 --- /dev/null +++ b/vendor/github.com/go-co-op/gocron/scheduler.go @@ -0,0 +1,1447 @@ +package gocron + +import ( + "context" + "fmt" + "reflect" + "sort" + "strings" + "sync" + "time" + + "github.com/robfig/cron/v3" + "go.uber.org/atomic" +) + +type limitMode int8 + +// Scheduler struct stores a list of Jobs and the location of time used by the Scheduler, +// and implements the sort. any for sorting Jobs, by the time of jobFuncNextRun +type Scheduler struct { + jobsMutex sync.RWMutex + jobs []*Job + + locationMutex sync.RWMutex + location *time.Location + running *atomic.Bool // represents if the scheduler is running at the moment or not + + time TimeWrapper // wrapper around time.Time + timer func(d time.Duration, f func()) *time.Timer + executor *executor // executes jobs passed via chan + + tags sync.Map // for storing tags when unique tags is set + + tagsUnique bool // defines whether tags should be unique + updateJob bool // so the scheduler knows to create a new job or update the current + waitForInterval bool // defaults jobs to waiting for first interval to start + singletonMode bool // defaults all jobs to use SingletonMode() + + startBlockingStopChanMutex sync.Mutex + startBlockingStopChan chan struct{} // stops the scheduler + + // tracks whether we're in a chain of scheduling methods for a job + // a chain is started with any of the scheduler methods that operate + // upon a job and are ended with one of [ Do(), Update() ] - note that + // Update() calls Do(), so really they all end with Do(). + // This allows the caller to begin with any job related scheduler method + // and only with one of [ Every(), EveryRandom(), Cron(), CronWithSeconds(), MonthFirstWeekday() ] + inScheduleChain bool +} + +// days in a week +const allWeekDays = 7 + +// NewScheduler creates a new Scheduler +func NewScheduler(loc *time.Location) *Scheduler { + executor := newExecutor() + + return &Scheduler{ + jobs: make([]*Job, 0), + location: loc, + running: atomic.NewBool(false), + time: &trueTime{}, + executor: &executor, + tagsUnique: false, + timer: afterFunc, + } +} + +// SetMaxConcurrentJobs limits how many jobs can be running at the same time. +// This is useful when running resource intensive jobs and a precise start time is not critical. +// +// Note: WaitMode and RescheduleMode provide details on usage and potential risks. +func (s *Scheduler) SetMaxConcurrentJobs(n int, mode limitMode) { + s.executor.limitModeMaxRunningJobs = n + s.executor.limitMode = mode +} + +// StartBlocking starts all jobs and blocks the current thread. +// This blocking method can be stopped with Stop() from a separate goroutine. +func (s *Scheduler) StartBlocking() { + s.StartAsync() + s.startBlockingStopChanMutex.Lock() + s.startBlockingStopChan = make(chan struct{}, 1) + s.startBlockingStopChanMutex.Unlock() + + <-s.startBlockingStopChan + + s.startBlockingStopChanMutex.Lock() + s.startBlockingStopChan = nil + s.startBlockingStopChanMutex.Unlock() +} + +// StartAsync starts all jobs without blocking the current thread +func (s *Scheduler) StartAsync() { + if !s.IsRunning() { + s.start() + } +} + +// start starts the scheduler, scheduling and running jobs +func (s *Scheduler) start() { + s.executor.start() + s.setRunning(true) + s.runJobs(s.Jobs()) +} + +func (s *Scheduler) runJobs(jobs []*Job) { + for _, job := range jobs { + ctx, cancel := context.WithCancel(context.Background()) + job.mu.Lock() + job.ctx = ctx + job.cancel = cancel + job.mu.Unlock() + s.runContinuous(job) + } +} + +func (s *Scheduler) setRunning(b bool) { + s.running.Store(b) +} + +// IsRunning returns true if the scheduler is running +func (s *Scheduler) IsRunning() bool { + return s.running.Load() +} + +// Jobs returns the list of Jobs from the Scheduler +func (s *Scheduler) Jobs() []*Job { + s.jobsMutex.RLock() + defer s.jobsMutex.RUnlock() + return s.jobs +} + +// Name sets the name of the current job. +// +// If the scheduler is running using WithDistributedLocker(), the job name is used +// as the distributed lock key. If the job name is not set, the function name is used as the distributed lock key. +func (s *Scheduler) Name(name string) *Scheduler { + job := s.getCurrentJob() + job.jobName = name + return s +} + +func (s *Scheduler) setJobs(jobs []*Job) { + s.jobsMutex.Lock() + defer s.jobsMutex.Unlock() + s.jobs = jobs +} + +// Len returns the number of Jobs in the Scheduler - implemented for sort +func (s *Scheduler) Len() int { + s.jobsMutex.RLock() + defer s.jobsMutex.RUnlock() + return len(s.jobs) +} + +// Swap places each job into the other job's position given +// the provided job indexes. +func (s *Scheduler) Swap(i, j int) { + s.jobsMutex.Lock() + defer s.jobsMutex.Unlock() + s.jobs[i], s.jobs[j] = s.jobs[j], s.jobs[i] +} + +// Less compares the next run of jobs based on their index. +// Returns true if the second job is after the first. +func (s *Scheduler) Less(first, second int) bool { + return s.Jobs()[second].NextRun().Unix() >= s.Jobs()[first].NextRun().Unix() +} + +// ChangeLocation changes the default time location +func (s *Scheduler) ChangeLocation(newLocation *time.Location) { + s.locationMutex.Lock() + defer s.locationMutex.Unlock() + s.location = newLocation +} + +// Location provides the current location set on the scheduler +func (s *Scheduler) Location() *time.Location { + s.locationMutex.RLock() + defer s.locationMutex.RUnlock() + return s.location +} + +type nextRun struct { + duration time.Duration + dateTime time.Time +} + +// scheduleNextRun Compute the instant when this Job should run next +func (s *Scheduler) scheduleNextRun(job *Job) (bool, nextRun) { + now := s.now() + if !s.jobPresent(job) { + return false, nextRun{} + } + + lastRun := now + + if job.neverRan() { + // Increment startAtTime to the future + if !job.startAtTime.IsZero() && job.startAtTime.Before(now) { + dur := s.durationToNextRun(job.startAtTime, job).duration + job.setStartAtTime(job.startAtTime.Add(dur)) + if job.startAtTime.Before(now) { + diff := now.Sub(job.startAtTime) + dur := s.durationToNextRun(job.startAtTime, job).duration + var count time.Duration + if dur != 0 { + count = diff / dur + if diff%dur != 0 { + count++ + } + } + job.setStartAtTime(job.startAtTime.Add(dur * count)) + } + } + } else { + lastRun = job.NextRun() + } + + if !job.shouldRun() { + s.RemoveByReference(job) + return false, nextRun{} + } + + next := s.durationToNextRun(lastRun, job) + + jobNextRun := job.NextRun() + if jobNextRun.After(now) { + job.setLastRun(now) + } else { + job.setLastRun(jobNextRun) + } + + if next.dateTime.IsZero() { + next.dateTime = lastRun.Add(next.duration) + job.setNextRun(next.dateTime) + } else { + job.setNextRun(next.dateTime) + } + return true, next +} + +// durationToNextRun calculate how much time to the next run, depending on unit +func (s *Scheduler) durationToNextRun(lastRun time.Time, job *Job) nextRun { + // job can be scheduled with .StartAt() + if job.getFirstAtTime() == 0 && job.getStartAtTime().After(lastRun) { + sa := job.getStartAtTime() + if job.unit == days || job.unit == weeks || job.unit == months { + job.addAtTime( + time.Duration(sa.Hour())*time.Hour + + time.Duration(sa.Minute())*time.Minute + + time.Duration(sa.Second())*time.Second, + ) + } + return nextRun{duration: sa.Sub(s.now()), dateTime: sa} + } + + var next nextRun + switch job.getUnit() { + case milliseconds, seconds, minutes, hours: + next.duration = s.calculateDuration(job) + case days: + next = s.calculateDays(job, lastRun) + case weeks: + if len(job.scheduledWeekdays) != 0 { // weekday selected, Every().Monday(), for example + next = s.calculateWeekday(job, lastRun) + } else { + next = s.calculateWeeks(job, lastRun) + } + if next.dateTime.Before(job.getStartAtTime()) { + return s.durationToNextRun(job.getStartAtTime(), job) + } + case months: + next = s.calculateMonths(job, lastRun) + case duration: + next.duration = job.getDuration() + case crontab: + next.dateTime = job.cronSchedule.Next(lastRun) + next.duration = next.dateTime.Sub(lastRun) + } + return next +} + +func (s *Scheduler) calculateMonths(job *Job, lastRun time.Time) nextRun { + // Special case: the last day of the month + if len(job.daysOfTheMonth) == 1 && job.daysOfTheMonth[0] == -1 { + return calculateNextRunForLastDayOfMonth(s, job, lastRun) + } + + if len(job.daysOfTheMonth) != 0 { // calculate days to job.daysOfTheMonth + + nextRunDateMap := make(map[int]nextRun) + for _, day := range job.daysOfTheMonth { + nextRunDateMap[day] = calculateNextRunForMonth(s, job, lastRun, day) + } + + nextRunResult := nextRun{} + for _, val := range nextRunDateMap { + if nextRunResult.dateTime.IsZero() { + nextRunResult = val + } else if nextRunResult.dateTime.Sub(val.dateTime).Milliseconds() > 0 { + nextRunResult = val + } + } + + return nextRunResult + } + next := s.roundToMidnightAndAddDSTAware(lastRun, job.getFirstAtTime()).AddDate(0, job.getInterval(), 0) + return nextRun{duration: until(lastRun, next), dateTime: next} +} + +func calculateNextRunForLastDayOfMonth(s *Scheduler, job *Job, lastRun time.Time) nextRun { + // Calculate the last day of the next month, by adding job.interval+1 months (i.e. the + // first day of the month after the next month), and subtracting one day, unless the + // last run occurred before the end of the month. + addMonth := job.getInterval() + atTime := job.getAtTime(lastRun) + if testDate := lastRun.AddDate(0, 0, 1); testDate.Month() != lastRun.Month() && + !s.roundToMidnightAndAddDSTAware(lastRun, atTime).After(lastRun) { + // Our last run was on the last day of this month. + addMonth++ + atTime = job.getFirstAtTime() + } + + next := time.Date(lastRun.Year(), lastRun.Month(), 1, 0, 0, 0, 0, s.Location()). + Add(atTime). + AddDate(0, addMonth, 0). + AddDate(0, 0, -1) + return nextRun{duration: until(lastRun, next), dateTime: next} +} + +func calculateNextRunForMonth(s *Scheduler, job *Job, lastRun time.Time, dayOfMonth int) nextRun { + atTime := job.getAtTime(lastRun) + natTime := atTime + + hours, minutes, seconds := s.deconstructDuration(atTime) + jobDay := time.Date(lastRun.Year(), lastRun.Month(), dayOfMonth, hours, minutes, seconds, 0, s.Location()) + + difference := absDuration(lastRun.Sub(jobDay)) + next := lastRun + if jobDay.Before(lastRun) { // shouldn't run this month; schedule for next interval minus day difference + next = next.AddDate(0, job.getInterval(), -0) + next = next.Add(-difference) + natTime = job.getFirstAtTime() + } else { + if job.getInterval() == 1 && !jobDay.Equal(lastRun) { // every month counts current month + next = next.AddDate(0, job.getInterval()-1, 0) + } else { // should run next month interval + next = next.AddDate(0, job.getInterval(), 0) + natTime = job.getFirstAtTime() + } + next = next.Add(difference) + } + if atTime != natTime { + next = next.Add(-atTime).Add(natTime) + } + return nextRun{duration: until(lastRun, next), dateTime: next} +} + +func (s *Scheduler) calculateWeekday(job *Job, lastRun time.Time) nextRun { + daysToWeekday := s.remainingDaysToWeekday(lastRun, job) + totalDaysDifference := s.calculateTotalDaysDifference(lastRun, daysToWeekday, job) + acTime := job.getAtTime(lastRun) + if totalDaysDifference > 0 { + acTime = job.getFirstAtTime() + } + next := s.roundToMidnightAndAddDSTAware(lastRun, acTime).AddDate(0, 0, totalDaysDifference) + return nextRun{duration: until(lastRun, next), dateTime: next} +} + +func (s *Scheduler) calculateWeeks(job *Job, lastRun time.Time) nextRun { + totalDaysDifference := int(job.getInterval()) * 7 + + var next time.Time + + atTimes := job.atTimes + for _, at := range atTimes { + n := s.roundToMidnightAndAddDSTAware(lastRun, at) + if n.After(s.now()) { + next = n + break + } + } + + if next.IsZero() { + next = s.roundToMidnightAndAddDSTAware(lastRun, job.getFirstAtTime()).AddDate(0, 0, totalDaysDifference) + } + + return nextRun{duration: until(lastRun, next), dateTime: next} +} + +func (s *Scheduler) calculateTotalDaysDifference(lastRun time.Time, daysToWeekday int, job *Job) int { + if job.getInterval() > 1 { + // just count weeks after the first jobs were done + if job.RunCount() < len(job.Weekdays()) { + return daysToWeekday + } + if daysToWeekday > 0 { + return int(job.getInterval())*7 - (allWeekDays - daysToWeekday) + } + return int(job.getInterval()) * 7 + } + + if daysToWeekday == 0 { // today, at future time or already passed + lastRunAtTime := time.Date(lastRun.Year(), lastRun.Month(), lastRun.Day(), 0, 0, 0, 0, s.Location()).Add(job.getAtTime(lastRun)) + if lastRun.Before(lastRunAtTime) { + return 0 + } + return 7 + } + return daysToWeekday +} + +func (s *Scheduler) calculateDays(job *Job, lastRun time.Time) nextRun { + if job.getInterval() == 1 { + lastRunDayPlusJobAtTime := s.roundToMidnightAndAddDSTAware(lastRun, job.getAtTime(lastRun)) + + if shouldRunToday(lastRun, lastRunDayPlusJobAtTime) { + return nextRun{duration: until(lastRun, lastRunDayPlusJobAtTime), dateTime: lastRunDayPlusJobAtTime} + } + } + + nextRunAtTime := s.roundToMidnightAndAddDSTAware(lastRun, job.getFirstAtTime()).AddDate(0, 0, job.getInterval()).In(s.Location()) + return nextRun{duration: until(lastRun, nextRunAtTime), dateTime: nextRunAtTime} +} + +func until(from time.Time, until time.Time) time.Duration { + return until.Sub(from) +} + +func shouldRunToday(lastRun time.Time, atTime time.Time) bool { + return lastRun.Before(atTime) +} + +func in(scheduleWeekdays []time.Weekday, weekday time.Weekday) bool { + in := false + + for _, weekdayInSchedule := range scheduleWeekdays { + if int(weekdayInSchedule) == int(weekday) { + in = true + break + } + } + return in +} + +func (s *Scheduler) calculateDuration(job *Job) time.Duration { + interval := job.getInterval() + switch job.getUnit() { + case milliseconds: + return time.Duration(interval) * time.Millisecond + case seconds: + return time.Duration(interval) * time.Second + case minutes: + return time.Duration(interval) * time.Minute + default: + return time.Duration(interval) * time.Hour + } +} + +func (s *Scheduler) remainingDaysToWeekday(lastRun time.Time, job *Job) int { + weekDays := job.Weekdays() + sort.Slice(weekDays, func(i, j int) bool { + return weekDays[i] < weekDays[j] + }) + + equals := false + lastRunWeekday := lastRun.Weekday() + index := sort.Search(len(weekDays), func(i int) bool { + b := weekDays[i] >= lastRunWeekday + if b { + equals = weekDays[i] == lastRunWeekday + } + return b + }) + // check atTime + if equals { + if s.roundToMidnightAndAddDSTAware(lastRun, job.getAtTime(lastRun)).After(lastRun) { + return 0 + } + index++ + } + + if index < len(weekDays) { + return int(weekDays[index] - lastRunWeekday) + } + + return int(weekDays[0]) + allWeekDays - int(lastRunWeekday) +} + +// absDuration returns the abs time difference +func absDuration(a time.Duration) time.Duration { + if a >= 0 { + return a + } + return -a +} + +func (s *Scheduler) deconstructDuration(d time.Duration) (hours int, minutes int, seconds int) { + hours = int(d.Seconds()) / int(time.Hour/time.Second) + minutes = (int(d.Seconds()) % int(time.Hour/time.Second)) / int(time.Minute/time.Second) + seconds = int(d.Seconds()) % int(time.Minute/time.Second) + return +} + +// roundToMidnightAndAddDSTAware truncates time to midnight and "adds" duration in a DST aware manner +func (s *Scheduler) roundToMidnightAndAddDSTAware(t time.Time, d time.Duration) time.Time { + hours, minutes, seconds := s.deconstructDuration(d) + return time.Date(t.Year(), t.Month(), t.Day(), hours, minutes, seconds, 0, s.Location()) +} + +// NextRun datetime when the next Job should run. +func (s *Scheduler) NextRun() (*Job, time.Time) { + if len(s.Jobs()) <= 0 { + return nil, s.now() + } + + sort.Sort(s) + + return s.Jobs()[0], s.Jobs()[0].NextRun() +} + +// EveryRandom schedules a new period Job that runs at random intervals +// between the provided lower (inclusive) and upper (inclusive) bounds. +// The default unit is Seconds(). Call a different unit in the chain +// if you would like to change that. For example, Minutes(), Hours(), etc. +func (s *Scheduler) EveryRandom(lower, upper int) *Scheduler { + job := s.getCurrentJob() + + job.setRandomInterval(lower, upper) + return s +} + +// Every schedules a new periodic Job with an interval. +// Interval can be an int, time.Duration or a string that +// parses with time.ParseDuration(). +// Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". +// +// The job is run immediately, unless StartAt or At is set. +func (s *Scheduler) Every(interval interface{}) *Scheduler { + job := s.getCurrentJob() + + switch interval := interval.(type) { + case int: + job.interval = interval + if interval <= 0 { + job.error = wrapOrError(job.error, ErrInvalidInterval) + } + case time.Duration: + job.interval = 0 + job.setDuration(interval) + job.setUnit(duration) + case string: + d, err := time.ParseDuration(interval) + if err != nil { + job.error = wrapOrError(job.error, err) + } + job.setDuration(d) + job.setUnit(duration) + default: + job.error = wrapOrError(job.error, ErrInvalidIntervalType) + } + + return s +} + +func (s *Scheduler) run(job *Job) { + if !s.IsRunning() { + return + } + + job.mu.Lock() + + if job.function == nil { + job.mu.Unlock() + s.Remove(job) + return + } + + defer job.mu.Unlock() + + if job.runWithDetails { + switch len(job.parameters) { + case job.parametersLen: + job.parameters = append(job.parameters, job.copy()) + case job.parametersLen + 1: + job.parameters[job.parametersLen] = job.copy() + default: + // something is really wrong and we should never get here + job.error = wrapOrError(job.error, ErrInvalidFunctionParameters) + return + } + } + + s.executor.jobFunctions <- job.jobFunction.copy() +} + +func (s *Scheduler) runContinuous(job *Job) { + shouldRun, next := s.scheduleNextRun(job) + if !shouldRun { + return + } + + if !job.getStartsImmediately() { + job.setStartsImmediately(true) + } else { + s.run(job) + } + nr := next.dateTime.Sub(s.now()) + if nr < 0 { + job.setLastRun(s.now()) + shouldRun, next := s.scheduleNextRun(job) + if !shouldRun { + return + } + nr = next.dateTime.Sub(s.now()) + } + + job.setTimer(s.timer(nr, func() { + if !next.dateTime.IsZero() { + for { + n := s.now().UnixNano() - next.dateTime.UnixNano() + if n >= 0 { + break + } + select { + case <-s.executor.ctx.Done(): + case <-time.After(time.Duration(n)): + } + } + } + s.runContinuous(job) + })) +} + +// RunAll run all Jobs regardless if they are scheduled to run or not +func (s *Scheduler) RunAll() { + s.RunAllWithDelay(0) +} + +// RunAllWithDelay runs all jobs with the provided delay in between each job +func (s *Scheduler) RunAllWithDelay(d time.Duration) { + for _, job := range s.Jobs() { + s.run(job) + s.time.Sleep(d) + } +} + +// RunByTag runs all the jobs containing a specific tag +// regardless of whether they are scheduled to run or not +func (s *Scheduler) RunByTag(tag string) error { + return s.RunByTagWithDelay(tag, 0) +} + +// RunByTagWithDelay is same as RunByTag but introduces a delay between +// each job execution +func (s *Scheduler) RunByTagWithDelay(tag string, d time.Duration) error { + jobs, err := s.FindJobsByTag(tag) + if err != nil { + return err + } + for _, job := range jobs { + s.run(job) + s.time.Sleep(d) + } + return nil +} + +// Remove specific Job by function +// +// Removing a job stops that job's timer. However, if a job has already +// been started by the job's timer before being removed, the only way to stop +// it through gocron is to use DoWithJobDetails and access the job's Context which +// informs you when the job has been canceled. +// +// Alternatively, the job function would need to have implemented a means of +// stopping, e.g. using a context.WithCancel() passed as params to Do method. +// +// The above are based on what the underlying library suggests https://pkg.go.dev/time#Timer.Stop. +func (s *Scheduler) Remove(job interface{}) { + fName := getFunctionName(job) + j := s.findJobByTaskName(fName) + s.removeJobsUniqueTags(j) + s.removeByCondition(func(someJob *Job) bool { + return someJob.funcName == fName + }) +} + +// RemoveByReference removes specific Job by reference +func (s *Scheduler) RemoveByReference(job *Job) { + s.removeJobsUniqueTags(job) + s.removeByCondition(func(someJob *Job) bool { + job.mu.RLock() + defer job.mu.RUnlock() + return someJob == job + }) +} + +func (s *Scheduler) findJobByTaskName(name string) *Job { + for _, job := range s.Jobs() { + if job.funcName == name { + return job + } + } + return nil +} + +func (s *Scheduler) removeJobsUniqueTags(job *Job) { + if job == nil { + return + } + if s.tagsUnique && len(job.tags) > 0 { + for _, tag := range job.tags { + s.tags.Delete(tag) + } + } +} + +func (s *Scheduler) removeByCondition(shouldRemove func(*Job) bool) { + retainedJobs := make([]*Job, 0) + for _, job := range s.Jobs() { + if !shouldRemove(job) { + retainedJobs = append(retainedJobs, job) + } else { + job.stop() + } + } + s.setJobs(retainedJobs) +} + +// RemoveByTag will remove Jobs that match the given tag. +func (s *Scheduler) RemoveByTag(tag string) error { + return s.RemoveByTags(tag) +} + +// RemoveByTags will remove Jobs that match all given tags. +func (s *Scheduler) RemoveByTags(tags ...string) error { + jobs, err := s.FindJobsByTag(tags...) + if err != nil { + return err + } + + for _, job := range jobs { + s.RemoveByReference(job) + } + return nil +} + +// RemoveByTagsAny will remove Jobs that match any one of the given tags. +func (s *Scheduler) RemoveByTagsAny(tags ...string) error { + var errs error + mJob := make(map[*Job]struct{}) + for _, tag := range tags { + jobs, err := s.FindJobsByTag(tag) + if err != nil { + errs = wrapOrError(errs, fmt.Errorf("%s: %s", err.Error(), tag)) + } + for _, job := range jobs { + mJob[job] = struct{}{} + } + } + + for job := range mJob { + s.RemoveByReference(job) + } + + return errs +} + +// FindJobsByTag will return a slice of Jobs that match all given tags +func (s *Scheduler) FindJobsByTag(tags ...string) ([]*Job, error) { + var jobs []*Job + +Jobs: + for _, job := range s.Jobs() { + if job.hasTags(tags...) { + jobs = append(jobs, job) + continue Jobs + } + } + + if len(jobs) > 0 { + return jobs, nil + } + return nil, ErrJobNotFoundWithTag +} + +// MonthFirstWeekday sets the job to run the first specified weekday of the month +func (s *Scheduler) MonthFirstWeekday(weekday time.Weekday) *Scheduler { + _, month, day := s.time.Now(time.UTC).Date() + + if day < 7 { + return s.Cron(fmt.Sprintf("0 0 %d %d %d", day, month, weekday)) + } + + return s.Cron(fmt.Sprintf("0 0 %d %d %d", day, month+1, weekday)) +} + +// LimitRunsTo limits the number of executions of this job to n. +// Upon reaching the limit, the job is removed from the scheduler. +func (s *Scheduler) LimitRunsTo(i int) *Scheduler { + job := s.getCurrentJob() + job.LimitRunsTo(i) + return s +} + +// SingletonMode prevents a new job from starting if the prior job has not yet +// completed its run +// +// Warning: do not use this mode if your jobs will continue to stack +// up beyond the ability of the limit workers to keep up. An example of +// what NOT to do: +// +// s.Every("1s").SingletonMode().Do(func() { +// // this will result in an ever-growing number of goroutines +// // blocked trying to send to the buffered channel +// time.Sleep(10 * time.Minute) +// }) +func (s *Scheduler) SingletonMode() *Scheduler { + job := s.getCurrentJob() + job.SingletonMode() + return s +} + +// SingletonModeAll prevents new jobs from starting if the prior instance of the +// particular job has not yet completed its run +// +// Warning: do not use this mode if your jobs will continue to stack +// up beyond the ability of the limit workers to keep up. An example of +// what NOT to do: +// +// s := gocron.NewScheduler(time.UTC) +// s.SingletonModeAll() +// +// s.Every("1s").Do(func() { +// // this will result in an ever-growing number of goroutines +// // blocked trying to send to the buffered channel +// time.Sleep(10 * time.Minute) +// }) +func (s *Scheduler) SingletonModeAll() { + s.singletonMode = true +} + +// TaskPresent checks if specific job's function was added to the scheduler. +func (s *Scheduler) TaskPresent(j interface{}) bool { + for _, job := range s.Jobs() { + if job.funcName == getFunctionName(j) { + return true + } + } + return false +} + +// To avoid the recursive read lock on s.Jobs() and this function, +// creating this new function and distributing the lock between jobPresent, _jobPresent +func (s *Scheduler) _jobPresent(j *Job, jobs []*Job) bool { + s.jobsMutex.RLock() + defer s.jobsMutex.RUnlock() + for _, job := range jobs { + if job == j { + return true + } + } + return false +} + +func (s *Scheduler) jobPresent(j *Job) bool { + return s._jobPresent(j, s.Jobs()) +} + +// Clear clears all Jobs from this scheduler +func (s *Scheduler) Clear() { + for _, job := range s.Jobs() { + job.stop() + } + s.setJobs(make([]*Job, 0)) + // If unique tags was enabled, delete all the tags loaded in the tags sync.Map + if s.tagsUnique { + s.tags.Range(func(key interface{}, value interface{}) bool { + s.tags.Delete(key) + return true + }) + } +} + +// Stop stops the scheduler. This is a no-op if the scheduler is already stopped. +// It waits for all running jobs to finish before returning, so it is safe to assume that running jobs will finish when calling this. +func (s *Scheduler) Stop() { + if s.IsRunning() { + s.stop() + } +} + +func (s *Scheduler) stop() { + s.stopJobs(s.jobs) + s.executor.stop() + s.StopBlockingChan() + s.setRunning(false) +} + +func (s *Scheduler) stopJobs(jobs []*Job) { + for _, job := range jobs { + job.stop() + } +} + +func (s *Scheduler) doCommon(jobFun interface{}, params ...interface{}) (*Job, error) { + job := s.getCurrentJob() + s.inScheduleChain = false + + jobUnit := job.getUnit() + jobLastRun := job.LastRun() + if job.getAtTime(jobLastRun) != 0 && (jobUnit <= hours || jobUnit >= duration) { + job.error = wrapOrError(job.error, ErrAtTimeNotSupported) + } + + if len(job.scheduledWeekdays) != 0 && jobUnit != weeks { + job.error = wrapOrError(job.error, ErrWeekdayNotSupported) + } + + if job.unit != crontab && job.getInterval() == 0 { + if job.unit != duration { + job.error = wrapOrError(job.error, ErrInvalidInterval) + } + } + + if job.error != nil { + // delete the job from the scheduler as this job + // cannot be executed + s.RemoveByReference(job) + return nil, job.error + } + + typ := reflect.TypeOf(jobFun) + if typ.Kind() != reflect.Func { + // delete the job for the same reason as above + s.RemoveByReference(job) + return nil, ErrNotAFunction + } + + fname := getFunctionName(jobFun) + if job.funcName != fname { + job.function = jobFun + job.parameters = params + job.funcName = fname + } + + f := reflect.ValueOf(jobFun) + expectedParamLength := f.Type().NumIn() + if job.runWithDetails { + expectedParamLength-- + } + + if len(params) != expectedParamLength { + s.RemoveByReference(job) + job.error = wrapOrError(job.error, ErrWrongParams) + return nil, job.error + } + + if job.runWithDetails && f.Type().In(len(params)).Kind() != reflect.ValueOf(*job).Kind() { + s.RemoveByReference(job) + job.error = wrapOrError(job.error, ErrDoWithJobDetails) + return nil, job.error + } + + // we should not schedule if not running since we can't foresee how long it will take for the scheduler to start + if s.IsRunning() { + s.runContinuous(job) + } + + return job, nil +} + +// Do specifies the jobFunc that should be called every time the Job runs +func (s *Scheduler) Do(jobFun interface{}, params ...interface{}) (*Job, error) { + return s.doCommon(jobFun, params...) +} + +// DoWithJobDetails specifies the jobFunc that should be called every time the Job runs +// and additionally passes the details of the current job to the jobFunc. +// The last argument of the function must be a gocron.Job that will be passed by +// the scheduler when the function is called. +func (s *Scheduler) DoWithJobDetails(jobFun interface{}, params ...interface{}) (*Job, error) { + job := s.getCurrentJob() + job.runWithDetails = true + job.parametersLen = len(params) + return s.doCommon(jobFun, params...) +} + +// At schedules the Job at a specific time of day in the form "HH:MM:SS" or "HH:MM" +// or time.Time (note that only the hours, minutes, seconds and nanos are used). +func (s *Scheduler) At(i interface{}) *Scheduler { + job := s.getCurrentJob() + + switch t := i.(type) { + case string: + for _, tt := range strings.Split(t, ";") { + hour, min, sec, err := parseTime(tt) + if err != nil { + job.error = wrapOrError(job.error, err) + return s + } + // save atTime start as duration from midnight + job.addAtTime(time.Duration(hour)*time.Hour + time.Duration(min)*time.Minute + time.Duration(sec)*time.Second) + } + case time.Time: + job.addAtTime(time.Duration(t.Hour())*time.Hour + time.Duration(t.Minute())*time.Minute + time.Duration(t.Second())*time.Second + time.Duration(t.Nanosecond())*time.Nanosecond) + default: + job.error = wrapOrError(job.error, ErrUnsupportedTimeFormat) + } + job.startsImmediately = false + return s +} + +// Tag will add a tag when creating a job. +func (s *Scheduler) Tag(t ...string) *Scheduler { + job := s.getCurrentJob() + + if s.tagsUnique { + for _, tag := range t { + if _, ok := s.tags.Load(tag); ok { + job.error = wrapOrError(job.error, ErrTagsUnique(tag)) + return s + } + s.tags.Store(tag, struct{}{}) + } + } + + job.tags = append(job.tags, t...) + return s +} + +// GetAllTags returns all tags. +func (s *Scheduler) GetAllTags() []string { + var tags []string + for _, job := range s.Jobs() { + tags = append(tags, job.Tags()...) + } + return tags +} + +// StartAt schedules the next run of the Job. If this time is in the past, the configured interval will be used +// to calculate the next future time +func (s *Scheduler) StartAt(t time.Time) *Scheduler { + job := s.getCurrentJob() + job.setStartAtTime(t) + job.startsImmediately = false + return s +} + +// setUnit sets the unit type +func (s *Scheduler) setUnit(unit schedulingUnit) { + job := s.getCurrentJob() + currentUnit := job.getUnit() + if currentUnit == duration || currentUnit == crontab { + job.error = wrapOrError(job.error, ErrInvalidIntervalUnitsSelection) + return + } + job.setUnit(unit) +} + +// Millisecond sets the unit with seconds +func (s *Scheduler) Millisecond() *Scheduler { + return s.Milliseconds() +} + +// Milliseconds sets the unit with seconds +func (s *Scheduler) Milliseconds() *Scheduler { + s.setUnit(milliseconds) + return s +} + +// Second sets the unit with seconds +func (s *Scheduler) Second() *Scheduler { + return s.Seconds() +} + +// Seconds sets the unit with seconds +func (s *Scheduler) Seconds() *Scheduler { + s.setUnit(seconds) + return s +} + +// Minute sets the unit with minutes +func (s *Scheduler) Minute() *Scheduler { + return s.Minutes() +} + +// Minutes sets the unit with minutes +func (s *Scheduler) Minutes() *Scheduler { + s.setUnit(minutes) + return s +} + +// Hour sets the unit with hours +func (s *Scheduler) Hour() *Scheduler { + return s.Hours() +} + +// Hours sets the unit with hours +func (s *Scheduler) Hours() *Scheduler { + s.setUnit(hours) + return s +} + +// Day sets the unit with days +func (s *Scheduler) Day() *Scheduler { + s.setUnit(days) + return s +} + +// Days set the unit with days +func (s *Scheduler) Days() *Scheduler { + s.setUnit(days) + return s +} + +// Week sets the unit with weeks +func (s *Scheduler) Week() *Scheduler { + s.setUnit(weeks) + return s +} + +// Weeks sets the unit with weeks +func (s *Scheduler) Weeks() *Scheduler { + s.setUnit(weeks) + return s +} + +// Month sets the unit with months +func (s *Scheduler) Month(daysOfMonth ...int) *Scheduler { + return s.Months(daysOfMonth...) +} + +// MonthLastDay sets the unit with months at every last day of the month +func (s *Scheduler) MonthLastDay() *Scheduler { + return s.Months(-1) +} + +// Months sets the unit with months +// Note: Only days 1 through 28 are allowed for monthly schedules +// Note: Multiple add same days of month cannot be allowed +// Note: -1 is a special value and can only occur as single argument +func (s *Scheduler) Months(daysOfTheMonth ...int) *Scheduler { + job := s.getCurrentJob() + + if len(daysOfTheMonth) == 0 { + job.error = wrapOrError(job.error, ErrInvalidDayOfMonthEntry) + } else if len(daysOfTheMonth) == 1 { + dayOfMonth := daysOfTheMonth[0] + if dayOfMonth != -1 && (dayOfMonth < 1 || dayOfMonth > 28) { + job.error = wrapOrError(job.error, ErrInvalidDayOfMonthEntry) + } + } else { + + repeatMap := make(map[int]int) + for _, dayOfMonth := range daysOfTheMonth { + + if dayOfMonth < 1 || dayOfMonth > 28 { + job.error = wrapOrError(job.error, ErrInvalidDayOfMonthEntry) + break + } + + for _, dayOfMonthInJob := range job.daysOfTheMonth { + if dayOfMonthInJob == dayOfMonth { + job.error = wrapOrError(job.error, ErrInvalidDaysOfMonthDuplicateValue) + break + } + } + + if _, ok := repeatMap[dayOfMonth]; ok { + job.error = wrapOrError(job.error, ErrInvalidDaysOfMonthDuplicateValue) + break + } + repeatMap[dayOfMonth]++ + } + } + if job.daysOfTheMonth == nil { + job.daysOfTheMonth = make([]int, 0) + } + job.daysOfTheMonth = append(job.daysOfTheMonth, daysOfTheMonth...) + job.startsImmediately = false + s.setUnit(months) + return s +} + +// NOTE: If the dayOfTheMonth for the above two functions is +// more than the number of days in that month, the extra day(s) +// spill over to the next month. Similarly, if it's less than 0, +// it will go back to the month before + +// Weekday sets the scheduledWeekdays with a specifics weekdays +func (s *Scheduler) Weekday(weekDay time.Weekday) *Scheduler { + job := s.getCurrentJob() + + if in := in(job.scheduledWeekdays, weekDay); !in { + job.scheduledWeekdays = append(job.scheduledWeekdays, weekDay) + } + + job.startsImmediately = false + s.setUnit(weeks) + return s +} + +func (s *Scheduler) Midday() *Scheduler { + return s.At("12:00") +} + +// Monday sets the start day as Monday +func (s *Scheduler) Monday() *Scheduler { + return s.Weekday(time.Monday) +} + +// Tuesday sets the start day as Tuesday +func (s *Scheduler) Tuesday() *Scheduler { + return s.Weekday(time.Tuesday) +} + +// Wednesday sets the start day as Wednesday +func (s *Scheduler) Wednesday() *Scheduler { + return s.Weekday(time.Wednesday) +} + +// Thursday sets the start day as Thursday +func (s *Scheduler) Thursday() *Scheduler { + return s.Weekday(time.Thursday) +} + +// Friday sets the start day as Friday +func (s *Scheduler) Friday() *Scheduler { + return s.Weekday(time.Friday) +} + +// Saturday sets the start day as Saturday +func (s *Scheduler) Saturday() *Scheduler { + return s.Weekday(time.Saturday) +} + +// Sunday sets the start day as Sunday +func (s *Scheduler) Sunday() *Scheduler { + return s.Weekday(time.Sunday) +} + +func (s *Scheduler) getCurrentJob() *Job { + if !s.inScheduleChain { + s.jobsMutex.Lock() + s.jobs = append(s.jobs, s.newJob(0)) + s.jobsMutex.Unlock() + s.inScheduleChain = true + } + + s.jobsMutex.RLock() + defer s.jobsMutex.RUnlock() + + return s.jobs[len(s.jobs)-1] +} + +func (s *Scheduler) now() time.Time { + return s.time.Now(s.Location()) +} + +// TagsUnique forces job tags to be unique across the scheduler +// when adding tags with (s *Scheduler) Tag(). +// This does not enforce uniqueness on tags added via +// (j *Job) Tag() +func (s *Scheduler) TagsUnique() { + s.tagsUnique = true +} + +// Job puts the provided job in focus for the purpose +// of making changes to the job with the scheduler chain +// and finalized by calling Update() +func (s *Scheduler) Job(j *Job) *Scheduler { + jobs := s.Jobs() + for index, job := range jobs { + if job == j { + // the current job is always last, so put this job there + s.Swap(len(jobs)-1, index) + } + } + s.inScheduleChain = true + s.updateJob = true + return s +} + +// Update stops the job (if running) and starts it with any updates +// that were made to the job in the scheduler chain. Job() must be +// called first to put the given job in focus. +func (s *Scheduler) Update() (*Job, error) { + job := s.getCurrentJob() + + if !s.updateJob { + return job, wrapOrError(job.error, ErrUpdateCalledWithoutJob) + } + s.updateJob = false + job.stop() + job.setStartsImmediately(false) + + if job.runWithDetails { + return s.DoWithJobDetails(job.function, job.parameters...) + } + + if job.runConfig.mode == singletonMode { + job.SingletonMode() + } + + return s.Do(job.function, job.parameters...) +} + +func (s *Scheduler) Cron(cronExpression string) *Scheduler { + return s.cron(cronExpression, false) +} + +func (s *Scheduler) CronWithSeconds(cronExpression string) *Scheduler { + return s.cron(cronExpression, true) +} + +func (s *Scheduler) cron(cronExpression string, withSeconds bool) *Scheduler { + job := s.getCurrentJob() + + var withLocation string + if strings.HasPrefix(cronExpression, "TZ=") || strings.HasPrefix(cronExpression, "CRON_TZ=") { + withLocation = cronExpression + } else { + withLocation = fmt.Sprintf("CRON_TZ=%s %s", s.location.String(), cronExpression) + } + + var ( + cronSchedule cron.Schedule + err error + ) + + if withSeconds { + p := cron.NewParser(cron.Second | cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow | cron.Descriptor) + cronSchedule, err = p.Parse(withLocation) + } else { + cronSchedule, err = cron.ParseStandard(withLocation) + } + + if err != nil { + job.error = wrapOrError(err, ErrCronParseFailure) + } + + job.cronSchedule = cronSchedule + job.setUnit(crontab) + job.startsImmediately = false + + return s +} + +func (s *Scheduler) newJob(interval int) *Job { + return newJob(interval, !s.waitForInterval, s.singletonMode) +} + +// WaitForScheduleAll defaults the scheduler to create all +// new jobs with the WaitForSchedule option as true. +// The jobs will not start immediately but rather will +// wait until their first scheduled interval. +func (s *Scheduler) WaitForScheduleAll() { + s.waitForInterval = true +} + +// WaitForSchedule sets the job to not start immediately +// but rather wait until the first scheduled interval. +func (s *Scheduler) WaitForSchedule() *Scheduler { + job := s.getCurrentJob() + job.startsImmediately = false + return s +} + +// StartImmediately sets the job to run immediately upon +// starting the scheduler or adding the job to a running +// scheduler. This overrides the jobs start status of any +// previously called methods in the chain. +// +// Note: This is the default behavior of the scheduler +// for most jobs, but is useful for overriding the default +// behavior of Cron scheduled jobs which default to +// WaitForSchedule. +func (s *Scheduler) StartImmediately() *Scheduler { + job := s.getCurrentJob() + job.startsImmediately = true + return s +} + +// CustomTime takes an in a struct that implements the TimeWrapper interface +// allowing the caller to mock the time used by the scheduler. This is useful +// for tests relying on gocron. +func (s *Scheduler) CustomTime(customTimeWrapper TimeWrapper) { + s.time = customTimeWrapper +} + +// CustomTimer takes in a function that mirrors the time.AfterFunc +// This is used to mock the time.AfterFunc function used by the scheduler +// for testing long intervals in a short amount of time. +func (s *Scheduler) CustomTimer(customTimer func(d time.Duration, f func()) *time.Timer) { + s.timer = customTimer +} + +func (s *Scheduler) StopBlockingChan() { + s.startBlockingStopChanMutex.Lock() + if s.IsRunning() && s.startBlockingStopChan != nil { + close(s.startBlockingStopChan) + } + s.startBlockingStopChanMutex.Unlock() +} + +// WithDistributedLocker prevents the same job from being run more than once +// when multiple schedulers are trying to schedule the same job. +// +// NOTE - This is currently in BETA. Please provide any feedback on your usage +// and open bugs with any issues. +// +// One strategy to reduce splay in the job execution times when using +// intervals (e.g. 1s, 1m, 1h), on each scheduler instance, is to use +// StartAt with time.Now().Round(interval) to start the job at the +// next interval boundary. +// +// Another strategy is to use the Cron or CronWithSeconds methods as they +// use the same behavior described above using StartAt. +// +// NOTE - the Locker will NOT lock jobs using the singleton options: +// SingletonMode, or SingletonModeAll +// +// NOTE - beware of potential race conditions when running the Locker +// with SetMaxConcurrentJobs and WaitMode as jobs are not guaranteed +// to be locked when each scheduler's is below its limit and able +// to run the job. +func (s *Scheduler) WithDistributedLocker(l Locker) { + s.executor.distributedLocker = l +} + +// RegisterEventListeners accepts EventListeners and registers them for all jobs +// in the scheduler at the time this function is called. +// The event listeners are then called at the times described by each listener. +// If a new job is added, an additional call to this method, or the job specific +// version must be executed in order for the new job to trigger event listeners. +func (s *Scheduler) RegisterEventListeners(eventListeners ...EventListener) { + for _, job := range s.Jobs() { + job.RegisterEventListeners(eventListeners...) + } +} + +func (s *Scheduler) PauseJobExecution(shouldPause bool) { + s.executor.skipExecution.Store(shouldPause) +} diff --git a/vendor/github.com/go-co-op/gocron/time_helper.go b/vendor/github.com/go-co-op/gocron/time_helper.go new file mode 100644 index 000000000000..487a7a2ab1fe --- /dev/null +++ b/vendor/github.com/go-co-op/gocron/time_helper.go @@ -0,0 +1,33 @@ +package gocron + +import "time" + +var _ TimeWrapper = (*trueTime)(nil) + +// TimeWrapper is an interface that wraps the Now, Sleep, and Unix methods of the time package. +// This allows the library and users to mock the time package for testing. +type TimeWrapper interface { + Now(*time.Location) time.Time + Unix(int64, int64) time.Time + Sleep(time.Duration) +} + +type trueTime struct{} + +func (t *trueTime) Now(location *time.Location) time.Time { + return time.Now().In(location) +} + +func (t *trueTime) Unix(sec int64, nsec int64) time.Time { + return time.Unix(sec, nsec) +} + +func (t *trueTime) Sleep(d time.Duration) { + time.Sleep(d) +} + +// afterFunc proxies the time.AfterFunc function. +// This allows it to be mocked for testing. +func afterFunc(d time.Duration, f func()) *time.Timer { + return time.AfterFunc(d, f) +} diff --git a/vendor/github.com/robfig/cron/v3/.gitignore b/vendor/github.com/robfig/cron/v3/.gitignore new file mode 100644 index 000000000000..00268614f045 --- /dev/null +++ b/vendor/github.com/robfig/cron/v3/.gitignore @@ -0,0 +1,22 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe diff --git a/vendor/github.com/robfig/cron/v3/.travis.yml b/vendor/github.com/robfig/cron/v3/.travis.yml new file mode 100644 index 000000000000..4f2ee4d97338 --- /dev/null +++ b/vendor/github.com/robfig/cron/v3/.travis.yml @@ -0,0 +1 @@ +language: go diff --git a/vendor/github.com/robfig/cron/v3/LICENSE b/vendor/github.com/robfig/cron/v3/LICENSE new file mode 100644 index 000000000000..3a0f627ffeb5 --- /dev/null +++ b/vendor/github.com/robfig/cron/v3/LICENSE @@ -0,0 +1,21 @@ +Copyright (C) 2012 Rob Figueiredo +All Rights Reserved. + +MIT LICENSE + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/robfig/cron/v3/README.md b/vendor/github.com/robfig/cron/v3/README.md new file mode 100644 index 000000000000..984c537c0145 --- /dev/null +++ b/vendor/github.com/robfig/cron/v3/README.md @@ -0,0 +1,125 @@ +[![GoDoc](http://godoc.org/github.com/robfig/cron?status.png)](http://godoc.org/github.com/robfig/cron) +[![Build Status](https://travis-ci.org/robfig/cron.svg?branch=master)](https://travis-ci.org/robfig/cron) + +# cron + +Cron V3 has been released! + +To download the specific tagged release, run: + + go get github.com/robfig/cron/v3@v3.0.0 + +Import it in your program as: + + import "github.com/robfig/cron/v3" + +It requires Go 1.11 or later due to usage of Go Modules. + +Refer to the documentation here: +http://godoc.org/github.com/robfig/cron + +The rest of this document describes the the advances in v3 and a list of +breaking changes for users that wish to upgrade from an earlier version. + +## Upgrading to v3 (June 2019) + +cron v3 is a major upgrade to the library that addresses all outstanding bugs, +feature requests, and rough edges. It is based on a merge of master which +contains various fixes to issues found over the years and the v2 branch which +contains some backwards-incompatible features like the ability to remove cron +jobs. In addition, v3 adds support for Go Modules, cleans up rough edges like +the timezone support, and fixes a number of bugs. + +New features: + +- Support for Go modules. Callers must now import this library as + `github.com/robfig/cron/v3`, instead of `gopkg.in/...` + +- Fixed bugs: + - 0f01e6b parser: fix combining of Dow and Dom (#70) + - dbf3220 adjust times when rolling the clock forward to handle non-existent midnight (#157) + - eeecf15 spec_test.go: ensure an error is returned on 0 increment (#144) + - 70971dc cron.Entries(): update request for snapshot to include a reply channel (#97) + - 1cba5e6 cron: fix: removing a job causes the next scheduled job to run too late (#206) + +- Standard cron spec parsing by default (first field is "minute"), with an easy + way to opt into the seconds field (quartz-compatible). Although, note that the + year field (optional in Quartz) is not supported. + +- Extensible, key/value logging via an interface that complies with + the https://github.com/go-logr/logr project. + +- The new Chain & JobWrapper types allow you to install "interceptors" to add + cross-cutting behavior like the following: + - Recover any panics from jobs + - Delay a job's execution if the previous run hasn't completed yet + - Skip a job's execution if the previous run hasn't completed yet + - Log each job's invocations + - Notification when jobs are completed + +It is backwards incompatible with both v1 and v2. These updates are required: + +- The v1 branch accepted an optional seconds field at the beginning of the cron + spec. This is non-standard and has led to a lot of confusion. The new default + parser conforms to the standard as described by [the Cron wikipedia page]. + + UPDATING: To retain the old behavior, construct your Cron with a custom + parser: + + // Seconds field, required + cron.New(cron.WithSeconds()) + + // Seconds field, optional + cron.New( + cron.WithParser( + cron.SecondOptional | cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow | cron.Descriptor)) + +- The Cron type now accepts functional options on construction rather than the + previous ad-hoc behavior modification mechanisms (setting a field, calling a setter). + + UPDATING: Code that sets Cron.ErrorLogger or calls Cron.SetLocation must be + updated to provide those values on construction. + +- CRON_TZ is now the recommended way to specify the timezone of a single + schedule, which is sanctioned by the specification. The legacy "TZ=" prefix + will continue to be supported since it is unambiguous and easy to do so. + + UPDATING: No update is required. + +- By default, cron will no longer recover panics in jobs that it runs. + Recovering can be surprising (see issue #192) and seems to be at odds with + typical behavior of libraries. Relatedly, the `cron.WithPanicLogger` option + has been removed to accommodate the more general JobWrapper type. + + UPDATING: To opt into panic recovery and configure the panic logger: + + cron.New(cron.WithChain( + cron.Recover(logger), // or use cron.DefaultLogger + )) + +- In adding support for https://github.com/go-logr/logr, `cron.WithVerboseLogger` was + removed, since it is duplicative with the leveled logging. + + UPDATING: Callers should use `WithLogger` and specify a logger that does not + discard `Info` logs. For convenience, one is provided that wraps `*log.Logger`: + + cron.New( + cron.WithLogger(cron.VerbosePrintfLogger(logger))) + + +### Background - Cron spec format + +There are two cron spec formats in common usage: + +- The "standard" cron format, described on [the Cron wikipedia page] and used by + the cron Linux system utility. + +- The cron format used by [the Quartz Scheduler], commonly used for scheduled + jobs in Java software + +[the Cron wikipedia page]: https://en.wikipedia.org/wiki/Cron +[the Quartz Scheduler]: http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/tutorial-lesson-06.html + +The original version of this package included an optional "seconds" field, which +made it incompatible with both of these formats. Now, the "standard" format is +the default format accepted, and the Quartz format is opt-in. diff --git a/vendor/github.com/robfig/cron/v3/chain.go b/vendor/github.com/robfig/cron/v3/chain.go new file mode 100644 index 000000000000..9565b418e0e7 --- /dev/null +++ b/vendor/github.com/robfig/cron/v3/chain.go @@ -0,0 +1,92 @@ +package cron + +import ( + "fmt" + "runtime" + "sync" + "time" +) + +// JobWrapper decorates the given Job with some behavior. +type JobWrapper func(Job) Job + +// Chain is a sequence of JobWrappers that decorates submitted jobs with +// cross-cutting behaviors like logging or synchronization. +type Chain struct { + wrappers []JobWrapper +} + +// NewChain returns a Chain consisting of the given JobWrappers. +func NewChain(c ...JobWrapper) Chain { + return Chain{c} +} + +// Then decorates the given job with all JobWrappers in the chain. +// +// This: +// NewChain(m1, m2, m3).Then(job) +// is equivalent to: +// m1(m2(m3(job))) +func (c Chain) Then(j Job) Job { + for i := range c.wrappers { + j = c.wrappers[len(c.wrappers)-i-1](j) + } + return j +} + +// Recover panics in wrapped jobs and log them with the provided logger. +func Recover(logger Logger) JobWrapper { + return func(j Job) Job { + return FuncJob(func() { + defer func() { + if r := recover(); r != nil { + const size = 64 << 10 + buf := make([]byte, size) + buf = buf[:runtime.Stack(buf, false)] + err, ok := r.(error) + if !ok { + err = fmt.Errorf("%v", r) + } + logger.Error(err, "panic", "stack", "...\n"+string(buf)) + } + }() + j.Run() + }) + } +} + +// DelayIfStillRunning serializes jobs, delaying subsequent runs until the +// previous one is complete. Jobs running after a delay of more than a minute +// have the delay logged at Info. +func DelayIfStillRunning(logger Logger) JobWrapper { + return func(j Job) Job { + var mu sync.Mutex + return FuncJob(func() { + start := time.Now() + mu.Lock() + defer mu.Unlock() + if dur := time.Since(start); dur > time.Minute { + logger.Info("delay", "duration", dur) + } + j.Run() + }) + } +} + +// SkipIfStillRunning skips an invocation of the Job if a previous invocation is +// still running. It logs skips to the given logger at Info level. +func SkipIfStillRunning(logger Logger) JobWrapper { + return func(j Job) Job { + var ch = make(chan struct{}, 1) + ch <- struct{}{} + return FuncJob(func() { + select { + case v := <-ch: + j.Run() + ch <- v + default: + logger.Info("skip") + } + }) + } +} diff --git a/vendor/github.com/robfig/cron/v3/constantdelay.go b/vendor/github.com/robfig/cron/v3/constantdelay.go new file mode 100644 index 000000000000..cd6e7b1be91a --- /dev/null +++ b/vendor/github.com/robfig/cron/v3/constantdelay.go @@ -0,0 +1,27 @@ +package cron + +import "time" + +// ConstantDelaySchedule represents a simple recurring duty cycle, e.g. "Every 5 minutes". +// It does not support jobs more frequent than once a second. +type ConstantDelaySchedule struct { + Delay time.Duration +} + +// Every returns a crontab Schedule that activates once every duration. +// Delays of less than a second are not supported (will round up to 1 second). +// Any fields less than a Second are truncated. +func Every(duration time.Duration) ConstantDelaySchedule { + if duration < time.Second { + duration = time.Second + } + return ConstantDelaySchedule{ + Delay: duration - time.Duration(duration.Nanoseconds())%time.Second, + } +} + +// Next returns the next time this should be run. +// This rounds so that the next activation time will be on the second. +func (schedule ConstantDelaySchedule) Next(t time.Time) time.Time { + return t.Add(schedule.Delay - time.Duration(t.Nanosecond())*time.Nanosecond) +} diff --git a/vendor/github.com/robfig/cron/v3/cron.go b/vendor/github.com/robfig/cron/v3/cron.go new file mode 100644 index 000000000000..c7e917665898 --- /dev/null +++ b/vendor/github.com/robfig/cron/v3/cron.go @@ -0,0 +1,355 @@ +package cron + +import ( + "context" + "sort" + "sync" + "time" +) + +// Cron keeps track of any number of entries, invoking the associated func as +// specified by the schedule. It may be started, stopped, and the entries may +// be inspected while running. +type Cron struct { + entries []*Entry + chain Chain + stop chan struct{} + add chan *Entry + remove chan EntryID + snapshot chan chan []Entry + running bool + logger Logger + runningMu sync.Mutex + location *time.Location + parser ScheduleParser + nextID EntryID + jobWaiter sync.WaitGroup +} + +// ScheduleParser is an interface for schedule spec parsers that return a Schedule +type ScheduleParser interface { + Parse(spec string) (Schedule, error) +} + +// Job is an interface for submitted cron jobs. +type Job interface { + Run() +} + +// Schedule describes a job's duty cycle. +type Schedule interface { + // Next returns the next activation time, later than the given time. + // Next is invoked initially, and then each time the job is run. + Next(time.Time) time.Time +} + +// EntryID identifies an entry within a Cron instance +type EntryID int + +// Entry consists of a schedule and the func to execute on that schedule. +type Entry struct { + // ID is the cron-assigned ID of this entry, which may be used to look up a + // snapshot or remove it. + ID EntryID + + // Schedule on which this job should be run. + Schedule Schedule + + // Next time the job will run, or the zero time if Cron has not been + // started or this entry's schedule is unsatisfiable + Next time.Time + + // Prev is the last time this job was run, or the zero time if never. + Prev time.Time + + // WrappedJob is the thing to run when the Schedule is activated. + WrappedJob Job + + // Job is the thing that was submitted to cron. + // It is kept around so that user code that needs to get at the job later, + // e.g. via Entries() can do so. + Job Job +} + +// Valid returns true if this is not the zero entry. +func (e Entry) Valid() bool { return e.ID != 0 } + +// byTime is a wrapper for sorting the entry array by time +// (with zero time at the end). +type byTime []*Entry + +func (s byTime) Len() int { return len(s) } +func (s byTime) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s byTime) Less(i, j int) bool { + // Two zero times should return false. + // Otherwise, zero is "greater" than any other time. + // (To sort it at the end of the list.) + if s[i].Next.IsZero() { + return false + } + if s[j].Next.IsZero() { + return true + } + return s[i].Next.Before(s[j].Next) +} + +// New returns a new Cron job runner, modified by the given options. +// +// Available Settings +// +// Time Zone +// Description: The time zone in which schedules are interpreted +// Default: time.Local +// +// Parser +// Description: Parser converts cron spec strings into cron.Schedules. +// Default: Accepts this spec: https://en.wikipedia.org/wiki/Cron +// +// Chain +// Description: Wrap submitted jobs to customize behavior. +// Default: A chain that recovers panics and logs them to stderr. +// +// See "cron.With*" to modify the default behavior. +func New(opts ...Option) *Cron { + c := &Cron{ + entries: nil, + chain: NewChain(), + add: make(chan *Entry), + stop: make(chan struct{}), + snapshot: make(chan chan []Entry), + remove: make(chan EntryID), + running: false, + runningMu: sync.Mutex{}, + logger: DefaultLogger, + location: time.Local, + parser: standardParser, + } + for _, opt := range opts { + opt(c) + } + return c +} + +// FuncJob is a wrapper that turns a func() into a cron.Job +type FuncJob func() + +func (f FuncJob) Run() { f() } + +// AddFunc adds a func to the Cron to be run on the given schedule. +// The spec is parsed using the time zone of this Cron instance as the default. +// An opaque ID is returned that can be used to later remove it. +func (c *Cron) AddFunc(spec string, cmd func()) (EntryID, error) { + return c.AddJob(spec, FuncJob(cmd)) +} + +// AddJob adds a Job to the Cron to be run on the given schedule. +// The spec is parsed using the time zone of this Cron instance as the default. +// An opaque ID is returned that can be used to later remove it. +func (c *Cron) AddJob(spec string, cmd Job) (EntryID, error) { + schedule, err := c.parser.Parse(spec) + if err != nil { + return 0, err + } + return c.Schedule(schedule, cmd), nil +} + +// Schedule adds a Job to the Cron to be run on the given schedule. +// The job is wrapped with the configured Chain. +func (c *Cron) Schedule(schedule Schedule, cmd Job) EntryID { + c.runningMu.Lock() + defer c.runningMu.Unlock() + c.nextID++ + entry := &Entry{ + ID: c.nextID, + Schedule: schedule, + WrappedJob: c.chain.Then(cmd), + Job: cmd, + } + if !c.running { + c.entries = append(c.entries, entry) + } else { + c.add <- entry + } + return entry.ID +} + +// Entries returns a snapshot of the cron entries. +func (c *Cron) Entries() []Entry { + c.runningMu.Lock() + defer c.runningMu.Unlock() + if c.running { + replyChan := make(chan []Entry, 1) + c.snapshot <- replyChan + return <-replyChan + } + return c.entrySnapshot() +} + +// Location gets the time zone location +func (c *Cron) Location() *time.Location { + return c.location +} + +// Entry returns a snapshot of the given entry, or nil if it couldn't be found. +func (c *Cron) Entry(id EntryID) Entry { + for _, entry := range c.Entries() { + if id == entry.ID { + return entry + } + } + return Entry{} +} + +// Remove an entry from being run in the future. +func (c *Cron) Remove(id EntryID) { + c.runningMu.Lock() + defer c.runningMu.Unlock() + if c.running { + c.remove <- id + } else { + c.removeEntry(id) + } +} + +// Start the cron scheduler in its own goroutine, or no-op if already started. +func (c *Cron) Start() { + c.runningMu.Lock() + defer c.runningMu.Unlock() + if c.running { + return + } + c.running = true + go c.run() +} + +// Run the cron scheduler, or no-op if already running. +func (c *Cron) Run() { + c.runningMu.Lock() + if c.running { + c.runningMu.Unlock() + return + } + c.running = true + c.runningMu.Unlock() + c.run() +} + +// run the scheduler.. this is private just due to the need to synchronize +// access to the 'running' state variable. +func (c *Cron) run() { + c.logger.Info("start") + + // Figure out the next activation times for each entry. + now := c.now() + for _, entry := range c.entries { + entry.Next = entry.Schedule.Next(now) + c.logger.Info("schedule", "now", now, "entry", entry.ID, "next", entry.Next) + } + + for { + // Determine the next entry to run. + sort.Sort(byTime(c.entries)) + + var timer *time.Timer + if len(c.entries) == 0 || c.entries[0].Next.IsZero() { + // If there are no entries yet, just sleep - it still handles new entries + // and stop requests. + timer = time.NewTimer(100000 * time.Hour) + } else { + timer = time.NewTimer(c.entries[0].Next.Sub(now)) + } + + for { + select { + case now = <-timer.C: + now = now.In(c.location) + c.logger.Info("wake", "now", now) + + // Run every entry whose next time was less than now + for _, e := range c.entries { + if e.Next.After(now) || e.Next.IsZero() { + break + } + c.startJob(e.WrappedJob) + e.Prev = e.Next + e.Next = e.Schedule.Next(now) + c.logger.Info("run", "now", now, "entry", e.ID, "next", e.Next) + } + + case newEntry := <-c.add: + timer.Stop() + now = c.now() + newEntry.Next = newEntry.Schedule.Next(now) + c.entries = append(c.entries, newEntry) + c.logger.Info("added", "now", now, "entry", newEntry.ID, "next", newEntry.Next) + + case replyChan := <-c.snapshot: + replyChan <- c.entrySnapshot() + continue + + case <-c.stop: + timer.Stop() + c.logger.Info("stop") + return + + case id := <-c.remove: + timer.Stop() + now = c.now() + c.removeEntry(id) + c.logger.Info("removed", "entry", id) + } + + break + } + } +} + +// startJob runs the given job in a new goroutine. +func (c *Cron) startJob(j Job) { + c.jobWaiter.Add(1) + go func() { + defer c.jobWaiter.Done() + j.Run() + }() +} + +// now returns current time in c location +func (c *Cron) now() time.Time { + return time.Now().In(c.location) +} + +// Stop stops the cron scheduler if it is running; otherwise it does nothing. +// A context is returned so the caller can wait for running jobs to complete. +func (c *Cron) Stop() context.Context { + c.runningMu.Lock() + defer c.runningMu.Unlock() + if c.running { + c.stop <- struct{}{} + c.running = false + } + ctx, cancel := context.WithCancel(context.Background()) + go func() { + c.jobWaiter.Wait() + cancel() + }() + return ctx +} + +// entrySnapshot returns a copy of the current cron entry list. +func (c *Cron) entrySnapshot() []Entry { + var entries = make([]Entry, len(c.entries)) + for i, e := range c.entries { + entries[i] = *e + } + return entries +} + +func (c *Cron) removeEntry(id EntryID) { + var entries []*Entry + for _, e := range c.entries { + if e.ID != id { + entries = append(entries, e) + } + } + c.entries = entries +} diff --git a/vendor/github.com/robfig/cron/v3/doc.go b/vendor/github.com/robfig/cron/v3/doc.go new file mode 100644 index 000000000000..fa5d08b4dbb8 --- /dev/null +++ b/vendor/github.com/robfig/cron/v3/doc.go @@ -0,0 +1,231 @@ +/* +Package cron implements a cron spec parser and job runner. + +Installation + +To download the specific tagged release, run: + + go get github.com/robfig/cron/v3@v3.0.0 + +Import it in your program as: + + import "github.com/robfig/cron/v3" + +It requires Go 1.11 or later due to usage of Go Modules. + +Usage + +Callers may register Funcs to be invoked on a given schedule. Cron will run +them in their own goroutines. + + c := cron.New() + c.AddFunc("30 * * * *", func() { fmt.Println("Every hour on the half hour") }) + c.AddFunc("30 3-6,20-23 * * *", func() { fmt.Println(".. in the range 3-6am, 8-11pm") }) + c.AddFunc("CRON_TZ=Asia/Tokyo 30 04 * * *", func() { fmt.Println("Runs at 04:30 Tokyo time every day") }) + c.AddFunc("@hourly", func() { fmt.Println("Every hour, starting an hour from now") }) + c.AddFunc("@every 1h30m", func() { fmt.Println("Every hour thirty, starting an hour thirty from now") }) + c.Start() + .. + // Funcs are invoked in their own goroutine, asynchronously. + ... + // Funcs may also be added to a running Cron + c.AddFunc("@daily", func() { fmt.Println("Every day") }) + .. + // Inspect the cron job entries' next and previous run times. + inspect(c.Entries()) + .. + c.Stop() // Stop the scheduler (does not stop any jobs already running). + +CRON Expression Format + +A cron expression represents a set of times, using 5 space-separated fields. + + Field name | Mandatory? | Allowed values | Allowed special characters + ---------- | ---------- | -------------- | -------------------------- + Minutes | Yes | 0-59 | * / , - + Hours | Yes | 0-23 | * / , - + Day of month | Yes | 1-31 | * / , - ? + Month | Yes | 1-12 or JAN-DEC | * / , - + Day of week | Yes | 0-6 or SUN-SAT | * / , - ? + +Month and Day-of-week field values are case insensitive. "SUN", "Sun", and +"sun" are equally accepted. + +The specific interpretation of the format is based on the Cron Wikipedia page: +https://en.wikipedia.org/wiki/Cron + +Alternative Formats + +Alternative Cron expression formats support other fields like seconds. You can +implement that by creating a custom Parser as follows. + + cron.New( + cron.WithParser( + cron.NewParser( + cron.SecondOptional | cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow | cron.Descriptor))) + +Since adding Seconds is the most common modification to the standard cron spec, +cron provides a builtin function to do that, which is equivalent to the custom +parser you saw earlier, except that its seconds field is REQUIRED: + + cron.New(cron.WithSeconds()) + +That emulates Quartz, the most popular alternative Cron schedule format: +http://www.quartz-scheduler.org/documentation/quartz-2.x/tutorials/crontrigger.html + +Special Characters + +Asterisk ( * ) + +The asterisk indicates that the cron expression will match for all values of the +field; e.g., using an asterisk in the 5th field (month) would indicate every +month. + +Slash ( / ) + +Slashes are used to describe increments of ranges. For example 3-59/15 in the +1st field (minutes) would indicate the 3rd minute of the hour and every 15 +minutes thereafter. The form "*\/..." is equivalent to the form "first-last/...", +that is, an increment over the largest possible range of the field. The form +"N/..." is accepted as meaning "N-MAX/...", that is, starting at N, use the +increment until the end of that specific range. It does not wrap around. + +Comma ( , ) + +Commas are used to separate items of a list. For example, using "MON,WED,FRI" in +the 5th field (day of week) would mean Mondays, Wednesdays and Fridays. + +Hyphen ( - ) + +Hyphens are used to define ranges. For example, 9-17 would indicate every +hour between 9am and 5pm inclusive. + +Question mark ( ? ) + +Question mark may be used instead of '*' for leaving either day-of-month or +day-of-week blank. + +Predefined schedules + +You may use one of several pre-defined schedules in place of a cron expression. + + Entry | Description | Equivalent To + ----- | ----------- | ------------- + @yearly (or @annually) | Run once a year, midnight, Jan. 1st | 0 0 1 1 * + @monthly | Run once a month, midnight, first of month | 0 0 1 * * + @weekly | Run once a week, midnight between Sat/Sun | 0 0 * * 0 + @daily (or @midnight) | Run once a day, midnight | 0 0 * * * + @hourly | Run once an hour, beginning of hour | 0 * * * * + +Intervals + +You may also schedule a job to execute at fixed intervals, starting at the time it's added +or cron is run. This is supported by formatting the cron spec like this: + + @every + +where "duration" is a string accepted by time.ParseDuration +(http://golang.org/pkg/time/#ParseDuration). + +For example, "@every 1h30m10s" would indicate a schedule that activates after +1 hour, 30 minutes, 10 seconds, and then every interval after that. + +Note: The interval does not take the job runtime into account. For example, +if a job takes 3 minutes to run, and it is scheduled to run every 5 minutes, +it will have only 2 minutes of idle time between each run. + +Time zones + +By default, all interpretation and scheduling is done in the machine's local +time zone (time.Local). You can specify a different time zone on construction: + + cron.New( + cron.WithLocation(time.UTC)) + +Individual cron schedules may also override the time zone they are to be +interpreted in by providing an additional space-separated field at the beginning +of the cron spec, of the form "CRON_TZ=Asia/Tokyo". + +For example: + + # Runs at 6am in time.Local + cron.New().AddFunc("0 6 * * ?", ...) + + # Runs at 6am in America/New_York + nyc, _ := time.LoadLocation("America/New_York") + c := cron.New(cron.WithLocation(nyc)) + c.AddFunc("0 6 * * ?", ...) + + # Runs at 6am in Asia/Tokyo + cron.New().AddFunc("CRON_TZ=Asia/Tokyo 0 6 * * ?", ...) + + # Runs at 6am in Asia/Tokyo + c := cron.New(cron.WithLocation(nyc)) + c.SetLocation("America/New_York") + c.AddFunc("CRON_TZ=Asia/Tokyo 0 6 * * ?", ...) + +The prefix "TZ=(TIME ZONE)" is also supported for legacy compatibility. + +Be aware that jobs scheduled during daylight-savings leap-ahead transitions will +not be run! + +Job Wrappers + +A Cron runner may be configured with a chain of job wrappers to add +cross-cutting functionality to all submitted jobs. For example, they may be used +to achieve the following effects: + + - Recover any panics from jobs (activated by default) + - Delay a job's execution if the previous run hasn't completed yet + - Skip a job's execution if the previous run hasn't completed yet + - Log each job's invocations + +Install wrappers for all jobs added to a cron using the `cron.WithChain` option: + + cron.New(cron.WithChain( + cron.SkipIfStillRunning(logger), + )) + +Install wrappers for individual jobs by explicitly wrapping them: + + job = cron.NewChain( + cron.SkipIfStillRunning(logger), + ).Then(job) + +Thread safety + +Since the Cron service runs concurrently with the calling code, some amount of +care must be taken to ensure proper synchronization. + +All cron methods are designed to be correctly synchronized as long as the caller +ensures that invocations have a clear happens-before ordering between them. + +Logging + +Cron defines a Logger interface that is a subset of the one defined in +github.com/go-logr/logr. It has two logging levels (Info and Error), and +parameters are key/value pairs. This makes it possible for cron logging to plug +into structured logging systems. An adapter, [Verbose]PrintfLogger, is provided +to wrap the standard library *log.Logger. + +For additional insight into Cron operations, verbose logging may be activated +which will record job runs, scheduling decisions, and added or removed jobs. +Activate it with a one-off logger as follows: + + cron.New( + cron.WithLogger( + cron.VerbosePrintfLogger(log.New(os.Stdout, "cron: ", log.LstdFlags)))) + + +Implementation + +Cron entries are stored in an array, sorted by their next activation time. Cron +sleeps until the next job is due to be run. + +Upon waking: + - it runs each entry that is active on that second + - it calculates the next run times for the jobs that were run + - it re-sorts the array of entries by next activation time. + - it goes to sleep until the soonest job. +*/ +package cron diff --git a/vendor/github.com/robfig/cron/v3/logger.go b/vendor/github.com/robfig/cron/v3/logger.go new file mode 100644 index 000000000000..b4efcc053567 --- /dev/null +++ b/vendor/github.com/robfig/cron/v3/logger.go @@ -0,0 +1,86 @@ +package cron + +import ( + "io/ioutil" + "log" + "os" + "strings" + "time" +) + +// DefaultLogger is used by Cron if none is specified. +var DefaultLogger Logger = PrintfLogger(log.New(os.Stdout, "cron: ", log.LstdFlags)) + +// DiscardLogger can be used by callers to discard all log messages. +var DiscardLogger Logger = PrintfLogger(log.New(ioutil.Discard, "", 0)) + +// Logger is the interface used in this package for logging, so that any backend +// can be plugged in. It is a subset of the github.com/go-logr/logr interface. +type Logger interface { + // Info logs routine messages about cron's operation. + Info(msg string, keysAndValues ...interface{}) + // Error logs an error condition. + Error(err error, msg string, keysAndValues ...interface{}) +} + +// PrintfLogger wraps a Printf-based logger (such as the standard library "log") +// into an implementation of the Logger interface which logs errors only. +func PrintfLogger(l interface{ Printf(string, ...interface{}) }) Logger { + return printfLogger{l, false} +} + +// VerbosePrintfLogger wraps a Printf-based logger (such as the standard library +// "log") into an implementation of the Logger interface which logs everything. +func VerbosePrintfLogger(l interface{ Printf(string, ...interface{}) }) Logger { + return printfLogger{l, true} +} + +type printfLogger struct { + logger interface{ Printf(string, ...interface{}) } + logInfo bool +} + +func (pl printfLogger) Info(msg string, keysAndValues ...interface{}) { + if pl.logInfo { + keysAndValues = formatTimes(keysAndValues) + pl.logger.Printf( + formatString(len(keysAndValues)), + append([]interface{}{msg}, keysAndValues...)...) + } +} + +func (pl printfLogger) Error(err error, msg string, keysAndValues ...interface{}) { + keysAndValues = formatTimes(keysAndValues) + pl.logger.Printf( + formatString(len(keysAndValues)+2), + append([]interface{}{msg, "error", err}, keysAndValues...)...) +} + +// formatString returns a logfmt-like format string for the number of +// key/values. +func formatString(numKeysAndValues int) string { + var sb strings.Builder + sb.WriteString("%s") + if numKeysAndValues > 0 { + sb.WriteString(", ") + } + for i := 0; i < numKeysAndValues/2; i++ { + if i > 0 { + sb.WriteString(", ") + } + sb.WriteString("%v=%v") + } + return sb.String() +} + +// formatTimes formats any time.Time values as RFC3339. +func formatTimes(keysAndValues []interface{}) []interface{} { + var formattedArgs []interface{} + for _, arg := range keysAndValues { + if t, ok := arg.(time.Time); ok { + arg = t.Format(time.RFC3339) + } + formattedArgs = append(formattedArgs, arg) + } + return formattedArgs +} diff --git a/vendor/github.com/robfig/cron/v3/option.go b/vendor/github.com/robfig/cron/v3/option.go new file mode 100644 index 000000000000..09e4278e779e --- /dev/null +++ b/vendor/github.com/robfig/cron/v3/option.go @@ -0,0 +1,45 @@ +package cron + +import ( + "time" +) + +// Option represents a modification to the default behavior of a Cron. +type Option func(*Cron) + +// WithLocation overrides the timezone of the cron instance. +func WithLocation(loc *time.Location) Option { + return func(c *Cron) { + c.location = loc + } +} + +// WithSeconds overrides the parser used for interpreting job schedules to +// include a seconds field as the first one. +func WithSeconds() Option { + return WithParser(NewParser( + Second | Minute | Hour | Dom | Month | Dow | Descriptor, + )) +} + +// WithParser overrides the parser used for interpreting job schedules. +func WithParser(p ScheduleParser) Option { + return func(c *Cron) { + c.parser = p + } +} + +// WithChain specifies Job wrappers to apply to all jobs added to this cron. +// Refer to the Chain* functions in this package for provided wrappers. +func WithChain(wrappers ...JobWrapper) Option { + return func(c *Cron) { + c.chain = NewChain(wrappers...) + } +} + +// WithLogger uses the provided logger. +func WithLogger(logger Logger) Option { + return func(c *Cron) { + c.logger = logger + } +} diff --git a/vendor/github.com/robfig/cron/v3/parser.go b/vendor/github.com/robfig/cron/v3/parser.go new file mode 100644 index 000000000000..3cf8879f7e71 --- /dev/null +++ b/vendor/github.com/robfig/cron/v3/parser.go @@ -0,0 +1,434 @@ +package cron + +import ( + "fmt" + "math" + "strconv" + "strings" + "time" +) + +// Configuration options for creating a parser. Most options specify which +// fields should be included, while others enable features. If a field is not +// included the parser will assume a default value. These options do not change +// the order fields are parse in. +type ParseOption int + +const ( + Second ParseOption = 1 << iota // Seconds field, default 0 + SecondOptional // Optional seconds field, default 0 + Minute // Minutes field, default 0 + Hour // Hours field, default 0 + Dom // Day of month field, default * + Month // Month field, default * + Dow // Day of week field, default * + DowOptional // Optional day of week field, default * + Descriptor // Allow descriptors such as @monthly, @weekly, etc. +) + +var places = []ParseOption{ + Second, + Minute, + Hour, + Dom, + Month, + Dow, +} + +var defaults = []string{ + "0", + "0", + "0", + "*", + "*", + "*", +} + +// A custom Parser that can be configured. +type Parser struct { + options ParseOption +} + +// NewParser creates a Parser with custom options. +// +// It panics if more than one Optional is given, since it would be impossible to +// correctly infer which optional is provided or missing in general. +// +// Examples +// +// // Standard parser without descriptors +// specParser := NewParser(Minute | Hour | Dom | Month | Dow) +// sched, err := specParser.Parse("0 0 15 */3 *") +// +// // Same as above, just excludes time fields +// subsParser := NewParser(Dom | Month | Dow) +// sched, err := specParser.Parse("15 */3 *") +// +// // Same as above, just makes Dow optional +// subsParser := NewParser(Dom | Month | DowOptional) +// sched, err := specParser.Parse("15 */3") +// +func NewParser(options ParseOption) Parser { + optionals := 0 + if options&DowOptional > 0 { + optionals++ + } + if options&SecondOptional > 0 { + optionals++ + } + if optionals > 1 { + panic("multiple optionals may not be configured") + } + return Parser{options} +} + +// Parse returns a new crontab schedule representing the given spec. +// It returns a descriptive error if the spec is not valid. +// It accepts crontab specs and features configured by NewParser. +func (p Parser) Parse(spec string) (Schedule, error) { + if len(spec) == 0 { + return nil, fmt.Errorf("empty spec string") + } + + // Extract timezone if present + var loc = time.Local + if strings.HasPrefix(spec, "TZ=") || strings.HasPrefix(spec, "CRON_TZ=") { + var err error + i := strings.Index(spec, " ") + eq := strings.Index(spec, "=") + if loc, err = time.LoadLocation(spec[eq+1 : i]); err != nil { + return nil, fmt.Errorf("provided bad location %s: %v", spec[eq+1:i], err) + } + spec = strings.TrimSpace(spec[i:]) + } + + // Handle named schedules (descriptors), if configured + if strings.HasPrefix(spec, "@") { + if p.options&Descriptor == 0 { + return nil, fmt.Errorf("parser does not accept descriptors: %v", spec) + } + return parseDescriptor(spec, loc) + } + + // Split on whitespace. + fields := strings.Fields(spec) + + // Validate & fill in any omitted or optional fields + var err error + fields, err = normalizeFields(fields, p.options) + if err != nil { + return nil, err + } + + field := func(field string, r bounds) uint64 { + if err != nil { + return 0 + } + var bits uint64 + bits, err = getField(field, r) + return bits + } + + var ( + second = field(fields[0], seconds) + minute = field(fields[1], minutes) + hour = field(fields[2], hours) + dayofmonth = field(fields[3], dom) + month = field(fields[4], months) + dayofweek = field(fields[5], dow) + ) + if err != nil { + return nil, err + } + + return &SpecSchedule{ + Second: second, + Minute: minute, + Hour: hour, + Dom: dayofmonth, + Month: month, + Dow: dayofweek, + Location: loc, + }, nil +} + +// normalizeFields takes a subset set of the time fields and returns the full set +// with defaults (zeroes) populated for unset fields. +// +// As part of performing this function, it also validates that the provided +// fields are compatible with the configured options. +func normalizeFields(fields []string, options ParseOption) ([]string, error) { + // Validate optionals & add their field to options + optionals := 0 + if options&SecondOptional > 0 { + options |= Second + optionals++ + } + if options&DowOptional > 0 { + options |= Dow + optionals++ + } + if optionals > 1 { + return nil, fmt.Errorf("multiple optionals may not be configured") + } + + // Figure out how many fields we need + max := 0 + for _, place := range places { + if options&place > 0 { + max++ + } + } + min := max - optionals + + // Validate number of fields + if count := len(fields); count < min || count > max { + if min == max { + return nil, fmt.Errorf("expected exactly %d fields, found %d: %s", min, count, fields) + } + return nil, fmt.Errorf("expected %d to %d fields, found %d: %s", min, max, count, fields) + } + + // Populate the optional field if not provided + if min < max && len(fields) == min { + switch { + case options&DowOptional > 0: + fields = append(fields, defaults[5]) // TODO: improve access to default + case options&SecondOptional > 0: + fields = append([]string{defaults[0]}, fields...) + default: + return nil, fmt.Errorf("unknown optional field") + } + } + + // Populate all fields not part of options with their defaults + n := 0 + expandedFields := make([]string, len(places)) + copy(expandedFields, defaults) + for i, place := range places { + if options&place > 0 { + expandedFields[i] = fields[n] + n++ + } + } + return expandedFields, nil +} + +var standardParser = NewParser( + Minute | Hour | Dom | Month | Dow | Descriptor, +) + +// ParseStandard returns a new crontab schedule representing the given +// standardSpec (https://en.wikipedia.org/wiki/Cron). It requires 5 entries +// representing: minute, hour, day of month, month and day of week, in that +// order. It returns a descriptive error if the spec is not valid. +// +// It accepts +// - Standard crontab specs, e.g. "* * * * ?" +// - Descriptors, e.g. "@midnight", "@every 1h30m" +func ParseStandard(standardSpec string) (Schedule, error) { + return standardParser.Parse(standardSpec) +} + +// getField returns an Int with the bits set representing all of the times that +// the field represents or error parsing field value. A "field" is a comma-separated +// list of "ranges". +func getField(field string, r bounds) (uint64, error) { + var bits uint64 + ranges := strings.FieldsFunc(field, func(r rune) bool { return r == ',' }) + for _, expr := range ranges { + bit, err := getRange(expr, r) + if err != nil { + return bits, err + } + bits |= bit + } + return bits, nil +} + +// getRange returns the bits indicated by the given expression: +// number | number "-" number [ "/" number ] +// or error parsing range. +func getRange(expr string, r bounds) (uint64, error) { + var ( + start, end, step uint + rangeAndStep = strings.Split(expr, "/") + lowAndHigh = strings.Split(rangeAndStep[0], "-") + singleDigit = len(lowAndHigh) == 1 + err error + ) + + var extra uint64 + if lowAndHigh[0] == "*" || lowAndHigh[0] == "?" { + start = r.min + end = r.max + extra = starBit + } else { + start, err = parseIntOrName(lowAndHigh[0], r.names) + if err != nil { + return 0, err + } + switch len(lowAndHigh) { + case 1: + end = start + case 2: + end, err = parseIntOrName(lowAndHigh[1], r.names) + if err != nil { + return 0, err + } + default: + return 0, fmt.Errorf("too many hyphens: %s", expr) + } + } + + switch len(rangeAndStep) { + case 1: + step = 1 + case 2: + step, err = mustParseInt(rangeAndStep[1]) + if err != nil { + return 0, err + } + + // Special handling: "N/step" means "N-max/step". + if singleDigit { + end = r.max + } + if step > 1 { + extra = 0 + } + default: + return 0, fmt.Errorf("too many slashes: %s", expr) + } + + if start < r.min { + return 0, fmt.Errorf("beginning of range (%d) below minimum (%d): %s", start, r.min, expr) + } + if end > r.max { + return 0, fmt.Errorf("end of range (%d) above maximum (%d): %s", end, r.max, expr) + } + if start > end { + return 0, fmt.Errorf("beginning of range (%d) beyond end of range (%d): %s", start, end, expr) + } + if step == 0 { + return 0, fmt.Errorf("step of range should be a positive number: %s", expr) + } + + return getBits(start, end, step) | extra, nil +} + +// parseIntOrName returns the (possibly-named) integer contained in expr. +func parseIntOrName(expr string, names map[string]uint) (uint, error) { + if names != nil { + if namedInt, ok := names[strings.ToLower(expr)]; ok { + return namedInt, nil + } + } + return mustParseInt(expr) +} + +// mustParseInt parses the given expression as an int or returns an error. +func mustParseInt(expr string) (uint, error) { + num, err := strconv.Atoi(expr) + if err != nil { + return 0, fmt.Errorf("failed to parse int from %s: %s", expr, err) + } + if num < 0 { + return 0, fmt.Errorf("negative number (%d) not allowed: %s", num, expr) + } + + return uint(num), nil +} + +// getBits sets all bits in the range [min, max], modulo the given step size. +func getBits(min, max, step uint) uint64 { + var bits uint64 + + // If step is 1, use shifts. + if step == 1 { + return ^(math.MaxUint64 << (max + 1)) & (math.MaxUint64 << min) + } + + // Else, use a simple loop. + for i := min; i <= max; i += step { + bits |= 1 << i + } + return bits +} + +// all returns all bits within the given bounds. (plus the star bit) +func all(r bounds) uint64 { + return getBits(r.min, r.max, 1) | starBit +} + +// parseDescriptor returns a predefined schedule for the expression, or error if none matches. +func parseDescriptor(descriptor string, loc *time.Location) (Schedule, error) { + switch descriptor { + case "@yearly", "@annually": + return &SpecSchedule{ + Second: 1 << seconds.min, + Minute: 1 << minutes.min, + Hour: 1 << hours.min, + Dom: 1 << dom.min, + Month: 1 << months.min, + Dow: all(dow), + Location: loc, + }, nil + + case "@monthly": + return &SpecSchedule{ + Second: 1 << seconds.min, + Minute: 1 << minutes.min, + Hour: 1 << hours.min, + Dom: 1 << dom.min, + Month: all(months), + Dow: all(dow), + Location: loc, + }, nil + + case "@weekly": + return &SpecSchedule{ + Second: 1 << seconds.min, + Minute: 1 << minutes.min, + Hour: 1 << hours.min, + Dom: all(dom), + Month: all(months), + Dow: 1 << dow.min, + Location: loc, + }, nil + + case "@daily", "@midnight": + return &SpecSchedule{ + Second: 1 << seconds.min, + Minute: 1 << minutes.min, + Hour: 1 << hours.min, + Dom: all(dom), + Month: all(months), + Dow: all(dow), + Location: loc, + }, nil + + case "@hourly": + return &SpecSchedule{ + Second: 1 << seconds.min, + Minute: 1 << minutes.min, + Hour: all(hours), + Dom: all(dom), + Month: all(months), + Dow: all(dow), + Location: loc, + }, nil + + } + + const every = "@every " + if strings.HasPrefix(descriptor, every) { + duration, err := time.ParseDuration(descriptor[len(every):]) + if err != nil { + return nil, fmt.Errorf("failed to parse duration %s: %s", descriptor, err) + } + return Every(duration), nil + } + + return nil, fmt.Errorf("unrecognized descriptor: %s", descriptor) +} diff --git a/vendor/github.com/robfig/cron/v3/spec.go b/vendor/github.com/robfig/cron/v3/spec.go new file mode 100644 index 000000000000..fa1e241e5fb5 --- /dev/null +++ b/vendor/github.com/robfig/cron/v3/spec.go @@ -0,0 +1,188 @@ +package cron + +import "time" + +// SpecSchedule specifies a duty cycle (to the second granularity), based on a +// traditional crontab specification. It is computed initially and stored as bit sets. +type SpecSchedule struct { + Second, Minute, Hour, Dom, Month, Dow uint64 + + // Override location for this schedule. + Location *time.Location +} + +// bounds provides a range of acceptable values (plus a map of name to value). +type bounds struct { + min, max uint + names map[string]uint +} + +// The bounds for each field. +var ( + seconds = bounds{0, 59, nil} + minutes = bounds{0, 59, nil} + hours = bounds{0, 23, nil} + dom = bounds{1, 31, nil} + months = bounds{1, 12, map[string]uint{ + "jan": 1, + "feb": 2, + "mar": 3, + "apr": 4, + "may": 5, + "jun": 6, + "jul": 7, + "aug": 8, + "sep": 9, + "oct": 10, + "nov": 11, + "dec": 12, + }} + dow = bounds{0, 6, map[string]uint{ + "sun": 0, + "mon": 1, + "tue": 2, + "wed": 3, + "thu": 4, + "fri": 5, + "sat": 6, + }} +) + +const ( + // Set the top bit if a star was included in the expression. + starBit = 1 << 63 +) + +// Next returns the next time this schedule is activated, greater than the given +// time. If no time can be found to satisfy the schedule, return the zero time. +func (s *SpecSchedule) Next(t time.Time) time.Time { + // General approach + // + // For Month, Day, Hour, Minute, Second: + // Check if the time value matches. If yes, continue to the next field. + // If the field doesn't match the schedule, then increment the field until it matches. + // While incrementing the field, a wrap-around brings it back to the beginning + // of the field list (since it is necessary to re-verify previous field + // values) + + // Convert the given time into the schedule's timezone, if one is specified. + // Save the original timezone so we can convert back after we find a time. + // Note that schedules without a time zone specified (time.Local) are treated + // as local to the time provided. + origLocation := t.Location() + loc := s.Location + if loc == time.Local { + loc = t.Location() + } + if s.Location != time.Local { + t = t.In(s.Location) + } + + // Start at the earliest possible time (the upcoming second). + t = t.Add(1*time.Second - time.Duration(t.Nanosecond())*time.Nanosecond) + + // This flag indicates whether a field has been incremented. + added := false + + // If no time is found within five years, return zero. + yearLimit := t.Year() + 5 + +WRAP: + if t.Year() > yearLimit { + return time.Time{} + } + + // Find the first applicable month. + // If it's this month, then do nothing. + for 1< 12 { + t = t.Add(time.Duration(24-t.Hour()) * time.Hour) + } else { + t = t.Add(time.Duration(-t.Hour()) * time.Hour) + } + } + + if t.Day() == 1 { + goto WRAP + } + } + + for 1< 0 + dowMatch bool = 1< 0 + ) + if s.Dom&starBit > 0 || s.Dow&starBit > 0 { + return domMatch && dowMatch + } + return domMatch || dowMatch +} diff --git a/vendor/github.com/rogpeppe/go-internal/fmtsort/mapelem.go b/vendor/github.com/rogpeppe/go-internal/fmtsort/mapelem.go index 07c4a4716e40..4e4c29459edd 100644 --- a/vendor/github.com/rogpeppe/go-internal/fmtsort/mapelem.go +++ b/vendor/github.com/rogpeppe/go-internal/fmtsort/mapelem.go @@ -1,3 +1,4 @@ +//go:build go1.12 // +build go1.12 package fmtsort @@ -7,12 +8,16 @@ import "reflect" const brokenNaNs = false func mapElems(mapValue reflect.Value) ([]reflect.Value, []reflect.Value) { - key := make([]reflect.Value, mapValue.Len()) - value := make([]reflect.Value, len(key)) + // Note: this code is arranged to not panic even in the presence + // of a concurrent map update. The runtime is responsible for + // yelling loudly if that happens. See issue 33275. + n := mapValue.Len() + key := make([]reflect.Value, 0, n) + value := make([]reflect.Value, 0, n) iter := mapValue.MapRange() - for i := 0; iter.Next(); i++ { - key[i] = iter.Key() - value[i] = iter.Value() + for iter.Next() { + key = append(key, iter.Key()) + value = append(value, iter.Value()) } return key, value } diff --git a/vendor/github.com/rogpeppe/go-internal/fmtsort/mapelem_1.11.go b/vendor/github.com/rogpeppe/go-internal/fmtsort/mapelem_1.11.go index 8c28451a1eeb..873bf7f5e8e8 100644 --- a/vendor/github.com/rogpeppe/go-internal/fmtsort/mapelem_1.11.go +++ b/vendor/github.com/rogpeppe/go-internal/fmtsort/mapelem_1.11.go @@ -1,3 +1,4 @@ +//go:build !go1.12 // +build !go1.12 package fmtsort @@ -8,8 +9,8 @@ const brokenNaNs = true func mapElems(mapValue reflect.Value) ([]reflect.Value, []reflect.Value) { key := mapValue.MapKeys() - value := make([]reflect.Value, len(key)) - for i, k := range key { + value := make([]reflect.Value, 0, len(key)) + for _, k := range key { v := mapValue.MapIndex(k) if !v.IsValid() { // Note: we can't retrieve the value, probably because @@ -17,7 +18,7 @@ func mapElems(mapValue reflect.Value) ([]reflect.Value, []reflect.Value) { // add a zero value of the correct type in that case. v = reflect.Zero(mapValue.Type().Elem()) } - value[i] = v + value = append(value, v) } return key, value } diff --git a/vendor/modules.txt b/vendor/modules.txt index 29de8caf15fa..c416decbe294 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -12,6 +12,9 @@ github.com/MakeNowJust/heredoc # github.com/NYTimes/gziphandler v1.1.1 ## explicit; go 1.11 github.com/NYTimes/gziphandler +# github.com/adhocore/gronx v1.6.3 +## explicit; go 1.13 +github.com/adhocore/gronx # github.com/alessio/shellescape v1.4.1 ## explicit; go 1.14 github.com/alessio/shellescape @@ -87,6 +90,9 @@ github.com/fsnotify/fsnotify # github.com/fvbommel/sortorder v1.0.1 ## explicit; go 1.13 github.com/fvbommel/sortorder +# github.com/go-co-op/gocron v1.30.1 +## explicit; go 1.16 +github.com/go-co-op/gocron # github.com/go-errors/errors v1.0.1 ## explicit github.com/go-errors/errors @@ -398,8 +404,11 @@ github.com/prometheus/procfs/internal/util # github.com/rivo/uniseg v0.4.2 ## explicit; go 1.18 github.com/rivo/uniseg -# github.com/rogpeppe/go-internal v1.6.1 -## explicit; go 1.11 +# github.com/robfig/cron/v3 v3.0.1 +## explicit; go 1.12 +github.com/robfig/cron/v3 +# github.com/rogpeppe/go-internal v1.8.1 +## explicit; go 1.16 github.com/rogpeppe/go-internal/fmtsort # github.com/rs/zerolog v1.26.1 ## explicit; go 1.15